problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_61378 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
inplace operation in pairwise_cosine_similarity
## π Bug
Hello !
The x, y values are modified inplace in the `pairwise_cosine_similarity` function.
This is not documented and may cause bugs that are difficult to find.
Thank you.
### To Reproduce
```python
import torch
from torchmetrics.functional import pairwise_cosine_similarity
x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
print("Result:", pairwise_cosine_similarity(x, y))
print("X:", x)
print("Y:", y)
"""Out[0]
Result: tensor([[0.5547, 0.8682],
[0.5145, 0.8437],
[0.5300, 0.8533]])
X: tensor([[0.5547, 0.8321],
[0.5145, 0.8575],
[0.5300, 0.8480]])
Y: tensor([[1.0000, 0.0000],
[0.8944, 0.4472]])
"""
```
### Environment
torchmetrics==0.10.0
</issue>
<code>
[start of src/torchmetrics/functional/pairwise/cosine.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
21 from torchmetrics.utilities.compute import _safe_matmul
22
23
24 def _pairwise_cosine_similarity_update(
25 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
26 ) -> Tensor:
27 """Calculates the pairwise cosine similarity matrix.
28
29 Args:
30 x: tensor of shape ``[N,d]``
31 y: tensor of shape ``[M,d]``
32 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
33 """
34 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
35
36 norm = torch.norm(x, p=2, dim=1)
37 x /= norm.unsqueeze(1)
38 norm = torch.norm(y, p=2, dim=1)
39 y /= norm.unsqueeze(1)
40
41 distance = _safe_matmul(x, y)
42 if zero_diagonal:
43 distance.fill_diagonal_(0)
44 return distance
45
46
47 def pairwise_cosine_similarity(
48 x: Tensor,
49 y: Optional[Tensor] = None,
50 reduction: Literal["mean", "sum", "none", None] = None,
51 zero_diagonal: Optional[bool] = None,
52 ) -> Tensor:
53 r"""Calculates pairwise cosine similarity:
54
55 .. math::
56 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
57 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D y_i^2}}
58
59 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
60 between the rows of :math:`x` and :math:`y`.
61 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
62
63 Args:
64 x: Tensor with shape ``[N, d]``
65 y: Tensor with shape ``[M, d]``, optional
66 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
67 (applied along column dimension) or `'none'`, `None` for no reduction
68 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
69 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
70
71 Returns:
72 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
73
74 Example:
75 >>> import torch
76 >>> from torchmetrics.functional import pairwise_cosine_similarity
77 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
78 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
79 >>> pairwise_cosine_similarity(x, y)
80 tensor([[0.5547, 0.8682],
81 [0.5145, 0.8437],
82 [0.5300, 0.8533]])
83 >>> pairwise_cosine_similarity(x)
84 tensor([[0.0000, 0.9989, 0.9996],
85 [0.9989, 0.0000, 0.9998],
86 [0.9996, 0.9998, 0.0000]])
87 """
88 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
89 return _reduce_distance_matrix(distance, reduction)
90
[end of src/torchmetrics/functional/pairwise/cosine.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py
--- a/src/torchmetrics/functional/pairwise/cosine.py
+++ b/src/torchmetrics/functional/pairwise/cosine.py
@@ -34,9 +34,9 @@
x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
norm = torch.norm(x, p=2, dim=1)
- x /= norm.unsqueeze(1)
+ x = x / norm.unsqueeze(1)
norm = torch.norm(y, p=2, dim=1)
- y /= norm.unsqueeze(1)
+ y = y / norm.unsqueeze(1)
distance = _safe_matmul(x, y)
if zero_diagonal:
| {"golden_diff": "diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py\n--- a/src/torchmetrics/functional/pairwise/cosine.py\n+++ b/src/torchmetrics/functional/pairwise/cosine.py\n@@ -34,9 +34,9 @@\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n \n norm = torch.norm(x, p=2, dim=1)\n- x /= norm.unsqueeze(1)\n+ x = x / norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n- y /= norm.unsqueeze(1)\n+ y = y / norm.unsqueeze(1)\n \n distance = _safe_matmul(x, y)\n if zero_diagonal:\n", "issue": "inplace operation in pairwise_cosine_similarity\n## \ud83d\udc1b Bug\r\nHello !\r\nThe x, y values are modified inplace in the `pairwise_cosine_similarity` function. \r\nThis is not documented and may cause bugs that are difficult to find. \r\nThank you.\r\n\r\n### To Reproduce\r\n\r\n```python\r\nimport torch\r\nfrom torchmetrics.functional import pairwise_cosine_similarity\r\nx = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\r\ny = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\r\nprint(\"Result:\", pairwise_cosine_similarity(x, y))\r\nprint(\"X:\", x)\r\nprint(\"Y:\", y)\r\n\"\"\"Out[0]\r\nResult: tensor([[0.5547, 0.8682],\r\n [0.5145, 0.8437],\r\n [0.5300, 0.8533]])\r\nX: tensor([[0.5547, 0.8321],\r\n [0.5145, 0.8575],\r\n [0.5300, 0.8480]])\r\nY: tensor([[1.0000, 0.0000],\r\n [0.8944, 0.4472]])\r\n\"\"\"\r\n```\r\n\r\n### Environment\r\ntorchmetrics==0.10.0\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}]} | 2,044 | 185 |
gh_patches_debug_1038 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-341 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Individually run API tests don't build tables database
## Description
Running a individual test in `mathesar` that doesn't use the `engine` or `test_db` fixture will not have the tables databases built for the test. As a result, many will error when trying to access the tables database.
## Expected behavior
The tables database should always be built.
## To Reproduce
Run any test in `mathesar` that doesn't use `engine` or `test_db`. Ex:
```
docker exec mathesar_web_1 pytest mathesar/tests/views/api/test_schema_api.py::test_schema_update
```
## Additional context
Introduced due to the changes in #329, since `pytest-django` no longer creates the tables db for us.
</issue>
<code>
[start of conftest.py]
1 """
2 This file should provide utilities for setting up test DBs and the like. It's
3 intended to be the containment zone for anything specific about the testing
4 environment (e.g., the login info for the Postgres instance for testing)
5 """
6 import pytest
7 from sqlalchemy import create_engine, text
8 from config.settings import DATABASES
9
10 TEST_DB = "mathesar_db_test"
11
12
13 @pytest.fixture(scope="session")
14 def test_db_name():
15 return TEST_DB
16
17
18 @pytest.fixture(scope="session")
19 def test_db():
20 superuser_engine = _get_superuser_engine()
21 with superuser_engine.connect() as conn:
22 conn.execution_options(isolation_level="AUTOCOMMIT")
23 conn.execute(text(f"DROP DATABASE IF EXISTS {TEST_DB} WITH (FORCE)"))
24 conn.execute(text(f"CREATE DATABASE {TEST_DB}"))
25 yield TEST_DB
26 with superuser_engine.connect() as conn:
27 conn.execution_options(isolation_level="AUTOCOMMIT")
28 conn.execute(text(f"DROP DATABASE {TEST_DB} WITH (FORCE)"))
29
30
31 @pytest.fixture(scope="session")
32 def engine(test_db):
33 return create_engine(
34 _get_connection_string(
35 DATABASES["default"]["USER"],
36 DATABASES["default"]["PASSWORD"],
37 DATABASES["default"]["HOST"],
38 test_db,
39 ),
40 future=True,
41 )
42
43
44 def _get_superuser_engine():
45 return create_engine(
46 _get_connection_string(
47 username=DATABASES["default"]["USER"],
48 password=DATABASES["default"]["PASSWORD"],
49 hostname=DATABASES["default"]["HOST"],
50 database=DATABASES["default"]["NAME"],
51 ),
52 future=True,
53 )
54
55
56 def _get_connection_string(username, password, hostname, database):
57 return f"postgresql://{username}:{password}@{hostname}/{database}"
58
[end of conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -15,7 +15,7 @@
return TEST_DB
[email protected](scope="session")
[email protected](scope="session", autouse=True)
def test_db():
superuser_engine = _get_superuser_engine()
with superuser_engine.connect() as conn:
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -15,7 +15,7 @@\n return TEST_DB\n \n \[email protected](scope=\"session\")\[email protected](scope=\"session\", autouse=True)\n def test_db():\n superuser_engine = _get_superuser_engine()\n with superuser_engine.connect() as conn:\n", "issue": "Individually run API tests don't build tables database\n## Description\r\nRunning a individual test in `mathesar` that doesn't use the `engine` or `test_db` fixture will not have the tables databases built for the test. As a result, many will error when trying to access the tables database.\r\n\r\n## Expected behavior\r\nThe tables database should always be built.\r\n\r\n## To Reproduce\r\nRun any test in `mathesar` that doesn't use `engine` or `test_db`. Ex:\r\n```\r\ndocker exec mathesar_web_1 pytest mathesar/tests/views/api/test_schema_api.py::test_schema_update\r\n```\r\n\r\n## Additional context\r\nIntroduced due to the changes in #329, since `pytest-django` no longer creates the tables db for us.\r\n\n", "before_files": [{"content": "\"\"\"\nThis file should provide utilities for setting up test DBs and the like. It's\nintended to be the containment zone for anything specific about the testing\nenvironment (e.g., the login info for the Postgres instance for testing)\n\"\"\"\nimport pytest\nfrom sqlalchemy import create_engine, text\nfrom config.settings import DATABASES\n\nTEST_DB = \"mathesar_db_test\"\n\n\[email protected](scope=\"session\")\ndef test_db_name():\n return TEST_DB\n\n\[email protected](scope=\"session\")\ndef test_db():\n superuser_engine = _get_superuser_engine()\n with superuser_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"DROP DATABASE IF EXISTS {TEST_DB} WITH (FORCE)\"))\n conn.execute(text(f\"CREATE DATABASE {TEST_DB}\"))\n yield TEST_DB\n with superuser_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"DROP DATABASE {TEST_DB} WITH (FORCE)\"))\n\n\[email protected](scope=\"session\")\ndef engine(test_db):\n return create_engine(\n _get_connection_string(\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n test_db,\n ),\n future=True,\n )\n\n\ndef _get_superuser_engine():\n return create_engine(\n _get_connection_string(\n username=DATABASES[\"default\"][\"USER\"],\n password=DATABASES[\"default\"][\"PASSWORD\"],\n hostname=DATABASES[\"default\"][\"HOST\"],\n database=DATABASES[\"default\"][\"NAME\"],\n ),\n future=True,\n )\n\n\ndef _get_connection_string(username, password, hostname, database):\n return f\"postgresql://{username}:{password}@{hostname}/{database}\"\n", "path": "conftest.py"}]} | 1,179 | 90 |
gh_patches_debug_23746 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-3114 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tcp_message script not working
Hi,
I tried to execute the TCP message replace script from the doc but it seems is not working. I don't know if this is a issue with the doc script or with mitmproxy.
The script was unchanged.
##### Steps to reproduce the problem:
1. mitmdump --mode transparent --tcp-host ".*" -k -s examples/complex/tcp_message.py
Loading script: examples/tcp_message.py
Proxy server listening at http://*:8080
192.168.1.241:37604: clientconnect
::ffff:192.168.1.241:37604: Certificate verification error for None: hostname 'no-hostname' doesn't match either of '*.local.org', 'local.org'
::ffff:192.168.1.241:37604: Ignoring server verification error, continuing with connection
Addon error: Traceback (most recent call last):
File "examples/tcp_message.py", line 16, in tcp_message
modified_msg = tcp_msg.message.replace("foo", "bar")
AttributeError: 'TCPFlow' object has no attribute 'message'
192.168.1.241:37604 -> tcp -> 10.0.0.2:5443
Addon error: Traceback (most recent call last):
File "examples/tcp_message.py", line 16, in tcp_message
modified_msg = tcp_msg.message.replace("foo", "bar")
AttributeError: 'TCPFlow' object has no attribute 'message'
192.168.1.241:37604 <- tcp <- 10.0.0.2:5443
##### System information
<!-- Paste the output of "mitmproxy --version" here. -->
mitmdump --version
Mitmproxy: 3.0.4
Python: 3.6.0
OpenSSL: OpenSSL 1.1.0h 27 Mar 2018
Platform: Linux-3.19.0-65-generic-x86_64-with-debian-jessie-sid
<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->
</issue>
<code>
[start of examples/complex/tcp_message.py]
1 """
2 tcp_message Inline Script Hook API Demonstration
3 ------------------------------------------------
4
5 * modifies packets containing "foo" to "bar"
6 * prints various details for each packet.
7
8 example cmdline invocation:
9 mitmdump -T --host --tcp ".*" -q -s examples/tcp_message.py
10 """
11 from mitmproxy.utils import strutils
12 from mitmproxy import ctx
13
14
15 def tcp_message(tcp_msg):
16 modified_msg = tcp_msg.message.replace("foo", "bar")
17
18 is_modified = False if modified_msg == tcp_msg.message else True
19 tcp_msg.message = modified_msg
20
21 ctx.log.info(
22 "[tcp_message{}] from {} {} to {} {}:\r\n{}".format(
23 " (modified)" if is_modified else "",
24 "client" if tcp_msg.sender == tcp_msg.client_conn else "server",
25 tcp_msg.sender.address,
26 "server" if tcp_msg.receiver == tcp_msg.server_conn else "client",
27 tcp_msg.receiver.address, strutils.bytes_to_escaped_str(tcp_msg.message))
28 )
29
[end of examples/complex/tcp_message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/complex/tcp_message.py b/examples/complex/tcp_message.py
--- a/examples/complex/tcp_message.py
+++ b/examples/complex/tcp_message.py
@@ -6,23 +6,22 @@
* prints various details for each packet.
example cmdline invocation:
-mitmdump -T --host --tcp ".*" -q -s examples/tcp_message.py
+mitmdump --rawtcp --tcp-host ".*" -s examples/complex/tcp_message.py
"""
from mitmproxy.utils import strutils
from mitmproxy import ctx
+from mitmproxy import tcp
-def tcp_message(tcp_msg):
- modified_msg = tcp_msg.message.replace("foo", "bar")
-
- is_modified = False if modified_msg == tcp_msg.message else True
- tcp_msg.message = modified_msg
+def tcp_message(flow: tcp.TCPFlow):
+ message = flow.messages[-1]
+ old_content = message.content
+ message.content = old_content.replace(b"foo", b"bar")
ctx.log.info(
- "[tcp_message{}] from {} {} to {} {}:\r\n{}".format(
- " (modified)" if is_modified else "",
- "client" if tcp_msg.sender == tcp_msg.client_conn else "server",
- tcp_msg.sender.address,
- "server" if tcp_msg.receiver == tcp_msg.server_conn else "client",
- tcp_msg.receiver.address, strutils.bytes_to_escaped_str(tcp_msg.message))
+ "[tcp_message{}] from {} to {}:\n{}".format(
+ " (modified)" if message.content != old_content else "",
+ "client" if message.from_client else "server",
+ "server" if message.from_client else "client",
+ strutils.bytes_to_escaped_str(message.content))
)
| {"golden_diff": "diff --git a/examples/complex/tcp_message.py b/examples/complex/tcp_message.py\n--- a/examples/complex/tcp_message.py\n+++ b/examples/complex/tcp_message.py\n@@ -6,23 +6,22 @@\n * prints various details for each packet.\n \n example cmdline invocation:\n-mitmdump -T --host --tcp \".*\" -q -s examples/tcp_message.py\n+mitmdump --rawtcp --tcp-host \".*\" -s examples/complex/tcp_message.py\n \"\"\"\n from mitmproxy.utils import strutils\n from mitmproxy import ctx\n+from mitmproxy import tcp\n \n \n-def tcp_message(tcp_msg):\n- modified_msg = tcp_msg.message.replace(\"foo\", \"bar\")\n-\n- is_modified = False if modified_msg == tcp_msg.message else True\n- tcp_msg.message = modified_msg\n+def tcp_message(flow: tcp.TCPFlow):\n+ message = flow.messages[-1]\n+ old_content = message.content\n+ message.content = old_content.replace(b\"foo\", b\"bar\")\n \n ctx.log.info(\n- \"[tcp_message{}] from {} {} to {} {}:\\r\\n{}\".format(\n- \" (modified)\" if is_modified else \"\",\n- \"client\" if tcp_msg.sender == tcp_msg.client_conn else \"server\",\n- tcp_msg.sender.address,\n- \"server\" if tcp_msg.receiver == tcp_msg.server_conn else \"client\",\n- tcp_msg.receiver.address, strutils.bytes_to_escaped_str(tcp_msg.message))\n+ \"[tcp_message{}] from {} to {}:\\n{}\".format(\n+ \" (modified)\" if message.content != old_content else \"\",\n+ \"client\" if message.from_client else \"server\",\n+ \"server\" if message.from_client else \"client\",\n+ strutils.bytes_to_escaped_str(message.content))\n )\n", "issue": "tcp_message script not working\nHi,\r\n\r\nI tried to execute the TCP message replace script from the doc but it seems is not working. I don't know if this is a issue with the doc script or with mitmproxy.\r\n\r\nThe script was unchanged.\r\n\r\n##### Steps to reproduce the problem:\r\n\r\n1. mitmdump --mode transparent --tcp-host \".*\" -k -s examples/complex/tcp_message.py\r\n\r\nLoading script: examples/tcp_message.py\r\nProxy server listening at http://*:8080\r\n192.168.1.241:37604: clientconnect\r\n::ffff:192.168.1.241:37604: Certificate verification error for None: hostname 'no-hostname' doesn't match either of '*.local.org', 'local.org'\r\n::ffff:192.168.1.241:37604: Ignoring server verification error, continuing with connection\r\nAddon error: Traceback (most recent call last):\r\n File \"examples/tcp_message.py\", line 16, in tcp_message\r\n modified_msg = tcp_msg.message.replace(\"foo\", \"bar\")\r\nAttributeError: 'TCPFlow' object has no attribute 'message'\r\n\r\n192.168.1.241:37604 -> tcp -> 10.0.0.2:5443\r\nAddon error: Traceback (most recent call last):\r\n File \"examples/tcp_message.py\", line 16, in tcp_message\r\n modified_msg = tcp_msg.message.replace(\"foo\", \"bar\")\r\nAttributeError: 'TCPFlow' object has no attribute 'message'\r\n\r\n192.168.1.241:37604 <- tcp <- 10.0.0.2:5443\r\n\r\n##### System information\r\n\r\n<!-- Paste the output of \"mitmproxy --version\" here. -->\r\n\r\nmitmdump --version\r\nMitmproxy: 3.0.4 \r\nPython: 3.6.0\r\nOpenSSL: OpenSSL 1.1.0h 27 Mar 2018\r\nPlatform: Linux-3.19.0-65-generic-x86_64-with-debian-jessie-sid\r\n\r\n<!-- Please use the mitmproxy forums (https://discourse.mitmproxy.org/) for support/how-to questions. Thanks! :) -->\r\n\n", "before_files": [{"content": "\"\"\"\ntcp_message Inline Script Hook API Demonstration\n------------------------------------------------\n\n* modifies packets containing \"foo\" to \"bar\"\n* prints various details for each packet.\n\nexample cmdline invocation:\nmitmdump -T --host --tcp \".*\" -q -s examples/tcp_message.py\n\"\"\"\nfrom mitmproxy.utils import strutils\nfrom mitmproxy import ctx\n\n\ndef tcp_message(tcp_msg):\n modified_msg = tcp_msg.message.replace(\"foo\", \"bar\")\n\n is_modified = False if modified_msg == tcp_msg.message else True\n tcp_msg.message = modified_msg\n\n ctx.log.info(\n \"[tcp_message{}] from {} {} to {} {}:\\r\\n{}\".format(\n \" (modified)\" if is_modified else \"\",\n \"client\" if tcp_msg.sender == tcp_msg.client_conn else \"server\",\n tcp_msg.sender.address,\n \"server\" if tcp_msg.receiver == tcp_msg.server_conn else \"client\",\n tcp_msg.receiver.address, strutils.bytes_to_escaped_str(tcp_msg.message))\n )\n", "path": "examples/complex/tcp_message.py"}]} | 1,321 | 387 |
gh_patches_debug_13366 | rasdani/github-patches | git_diff | ansible__awx-13022 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Genrated certificate in install bundle for Execution node is valid only for 10 days
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
### Bug Summary
The certificate for receptor in install bundle (`*.tar.gz`) for execution node that generated by AWX is valid only for 10 days.
In my understanding currently there is no automated certificate renewal feature in both AWX and Receptor, so I guess the execution environment will be invalidated after 10 days.
Therefore frequently renewal (generating bundle and invoke playbook again) by hand might be required.
```bash
$ ls -l
total 24
-rw-rw-r--. 1 kuro kuro 11278 Oct 7 05:12 ansible.log
drwxrwxr-x. 2 kuro kuro 21 Oct 7 05:09 group_vars
-rw-r--r--. 1 kuro kuro 406 Jan 1 1970 install_receptor.yml
-rw-r--r--. 1 kuro kuro 159 Oct 7 05:10 inventory.yml
drwxrwxr-x. 3 kuro kuro 44 Oct 7 05:09 receptor
-rw-r--r--. 1 kuro kuro 137 Jan 1 1970 requirements.yml
$ openssl x509 -text -in receptor/tls/receptor.crt -noout | grep Not
Not Before: Oct 6 20:09:21 2022 GMT
Not After : Oct 16 20:09:21 2022 GMT πππ
```
Installer (playbook) just place certificate on execution node without any modification, so deployed execution node uses certificate that included in the install bandle as is.
I have no idea if this is a _bug_ or _designed behavior_, but just in case I'm creating this issue.
### AWX version
21.7.0
### Select the relevant components
- [ ] UI
- [X] API
- [ ] Docs
- [ ] Collection
- [ ] CLI
- [ ] Other
### Installation method
kubernetes
### Modifications
no
### Ansible version
N/A
### Operating system
CentOS 8 Stream
### Web browser
Chrome
### Steps to reproduce
1. Deploy AWX 21.7.0 on any platform
2. Add new instance and generate install bundle for the instance
3. Extract install bundle and ensure `Not After` in the `receptor.crt` by `openssl x509 -text -in receptor/tls/receptor.crt -noout`
### Expected results
Certificate and execution environment are valid for enough life time.
### Actual results
Not tested enough, I guess my execution environment will be expired in 10 days if I won't make any manual renewal for the certs.
### Additional information
@TheRealHaoLiu @jbradberry
Thanks for excellent feature; execution node! It works as expected in my environment in this few days :)
Sorry for annoying you by this mention, but I want to confirm that this is designed or not.
I found that there is hard-coded `days=10` in your commit.
https://github.com/ansible/awx/blob/150c55c72a4d1d896474c9d3aaaceeb1b69ee253/awx/api/views/instance_install_bundle.py#L181
According to your comment on `awx-operator` repository, it seems that you've designed it as _10 years_ instead of _10 days_ I guess...?; https://github.com/ansible/awx-operator/pull/1012#issuecomment-1208423340
Please let me know it's not recommended that trying to use generated certs in the long term, or if you have an easier way to renew certs.
</issue>
<code>
[start of awx/api/views/instance_install_bundle.py]
1 # Copyright (c) 2018 Red Hat, Inc.
2 # All Rights Reserved.
3
4 import datetime
5 import io
6 import ipaddress
7 import os
8 import tarfile
9
10 import asn1
11 from awx.api import serializers
12 from awx.api.generics import GenericAPIView, Response
13 from awx.api.permissions import IsSystemAdminOrAuditor
14 from awx.main import models
15 from cryptography import x509
16 from cryptography.hazmat.primitives import hashes, serialization
17 from cryptography.hazmat.primitives.asymmetric import rsa
18 from cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName
19 from cryptography.x509.oid import NameOID
20 from django.http import HttpResponse
21 from django.template.loader import render_to_string
22 from django.utils.translation import gettext_lazy as _
23 from rest_framework import status
24
25 # Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.
26 RECEPTOR_OID = "1.3.6.1.4.1.2312.19.1"
27
28 # generate install bundle for the instance
29 # install bundle directory structure
30 # βββ install_receptor.yml (playbook)
31 # βββ inventory.yml
32 # βββ group_vars
33 # β βββ all.yml
34 # βββ receptor
35 # β βββ tls
36 # β β βββ ca
37 # β β β βββ receptor-ca.crt
38 # β β βββ receptor.crt
39 # β β βββ receptor.key
40 # β βββ work-public-key.pem
41 # βββ requirements.yml
42 class InstanceInstallBundle(GenericAPIView):
43
44 name = _('Install Bundle')
45 model = models.Instance
46 serializer_class = serializers.InstanceSerializer
47 permission_classes = (IsSystemAdminOrAuditor,)
48
49 def get(self, request, *args, **kwargs):
50 instance_obj = self.get_object()
51
52 if instance_obj.node_type not in ('execution',):
53 return Response(
54 data=dict(msg=_('Install bundle can only be generated for execution nodes.')),
55 status=status.HTTP_400_BAD_REQUEST,
56 )
57
58 with io.BytesIO() as f:
59 with tarfile.open(fileobj=f, mode='w:gz') as tar:
60 # copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file
61 tar.add(
62 os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt"
63 )
64
65 # copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem
66 tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem")
67
68 # generate and write the receptor key to receptor/tls/receptor.key in the tar file
69 key, cert = generate_receptor_tls(instance_obj)
70
71 key_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key")
72 key_tarinfo.size = len(key)
73 tar.addfile(key_tarinfo, io.BytesIO(key))
74
75 cert_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt")
76 cert_tarinfo.size = len(cert)
77 tar.addfile(cert_tarinfo, io.BytesIO(cert))
78
79 # generate and write install_receptor.yml to the tar file
80 playbook = generate_playbook().encode('utf-8')
81 playbook_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/install_receptor.yml")
82 playbook_tarinfo.size = len(playbook)
83 tar.addfile(playbook_tarinfo, io.BytesIO(playbook))
84
85 # generate and write inventory.yml to the tar file
86 inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')
87 inventory_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/inventory.yml")
88 inventory_yml_tarinfo.size = len(inventory_yml)
89 tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))
90
91 # generate and write group_vars/all.yml to the tar file
92 group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')
93 group_vars_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/group_vars/all.yml")
94 group_vars_tarinfo.size = len(group_vars)
95 tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))
96
97 # generate and write requirements.yml to the tar file
98 requirements_yml = generate_requirements_yml().encode('utf-8')
99 requirements_yml_tarinfo = tarfile.TarInfo(f"{instance_obj.hostname}_install_bundle/requirements.yml")
100 requirements_yml_tarinfo.size = len(requirements_yml)
101 tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))
102
103 # respond with the tarfile
104 f.seek(0)
105 response = HttpResponse(f.read(), status=status.HTTP_200_OK)
106 response['Content-Disposition'] = f"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz"
107 return response
108
109
110 def generate_playbook():
111 return render_to_string("instance_install_bundle/install_receptor.yml")
112
113
114 def generate_requirements_yml():
115 return render_to_string("instance_install_bundle/requirements.yml")
116
117
118 def generate_inventory_yml(instance_obj):
119 return render_to_string("instance_install_bundle/inventory.yml", context=dict(instance=instance_obj))
120
121
122 def generate_group_vars_all_yml(instance_obj):
123 return render_to_string("instance_install_bundle/group_vars/all.yml", context=dict(instance=instance_obj))
124
125
126 def generate_receptor_tls(instance_obj):
127 # generate private key for the receptor
128 key = rsa.generate_private_key(public_exponent=65537, key_size=2048)
129
130 # encode receptor hostname to asn1
131 hostname = instance_obj.hostname
132 encoder = asn1.Encoder()
133 encoder.start()
134 encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)
135 hostname_asn1 = encoder.output()
136
137 san_params = [
138 DNSName(hostname),
139 OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),
140 ]
141
142 try:
143 san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))
144 except ipaddress.AddressValueError:
145 pass
146
147 # generate certificate for the receptor
148 csr = (
149 x509.CertificateSigningRequestBuilder()
150 .subject_name(
151 x509.Name(
152 [
153 x509.NameAttribute(NameOID.COMMON_NAME, hostname),
154 ]
155 )
156 )
157 .add_extension(
158 x509.SubjectAlternativeName(san_params),
159 critical=False,
160 )
161 .sign(key, hashes.SHA256())
162 )
163
164 # sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key
165 with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:
166 ca_key = serialization.load_pem_private_key(
167 f.read(),
168 password=None,
169 )
170
171 with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:
172 ca_cert = x509.load_pem_x509_certificate(f.read())
173
174 cert = (
175 x509.CertificateBuilder()
176 .subject_name(csr.subject)
177 .issuer_name(ca_cert.issuer)
178 .public_key(csr.public_key())
179 .serial_number(x509.random_serial_number())
180 .not_valid_before(datetime.datetime.utcnow())
181 .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10))
182 .add_extension(
183 csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
184 critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
185 )
186 .sign(ca_key, hashes.SHA256())
187 )
188
189 key = key.private_bytes(
190 encoding=serialization.Encoding.PEM,
191 format=serialization.PrivateFormat.TraditionalOpenSSL,
192 encryption_algorithm=serialization.NoEncryption(),
193 )
194
195 cert = cert.public_bytes(
196 encoding=serialization.Encoding.PEM,
197 )
198
199 return key, cert
200
[end of awx/api/views/instance_install_bundle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awx/api/views/instance_install_bundle.py b/awx/api/views/instance_install_bundle.py
--- a/awx/api/views/instance_install_bundle.py
+++ b/awx/api/views/instance_install_bundle.py
@@ -178,7 +178,7 @@
.public_key(csr.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
- .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10))
+ .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))
.add_extension(
csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,
critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,
| {"golden_diff": "diff --git a/awx/api/views/instance_install_bundle.py b/awx/api/views/instance_install_bundle.py\n--- a/awx/api/views/instance_install_bundle.py\n+++ b/awx/api/views/instance_install_bundle.py\n@@ -178,7 +178,7 @@\n .public_key(csr.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(datetime.datetime.utcnow())\n- .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10))\n+ .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=3650))\n .add_extension(\n csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,\n critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,\n", "issue": "Genrated certificate in install bundle for Execution node is valid only for 10 days\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n\r\n### Bug Summary\r\n\r\nThe certificate for receptor in install bundle (`*.tar.gz`) for execution node that generated by AWX is valid only for 10 days.\r\n\r\nIn my understanding currently there is no automated certificate renewal feature in both AWX and Receptor, so I guess the execution environment will be invalidated after 10 days. \r\n\r\nTherefore frequently renewal (generating bundle and invoke playbook again) by hand might be required.\r\n\r\n```bash\r\n$ ls -l\r\ntotal 24\r\n-rw-rw-r--. 1 kuro kuro 11278 Oct 7 05:12 ansible.log\r\ndrwxrwxr-x. 2 kuro kuro 21 Oct 7 05:09 group_vars\r\n-rw-r--r--. 1 kuro kuro 406 Jan 1 1970 install_receptor.yml\r\n-rw-r--r--. 1 kuro kuro 159 Oct 7 05:10 inventory.yml\r\ndrwxrwxr-x. 3 kuro kuro 44 Oct 7 05:09 receptor\r\n-rw-r--r--. 1 kuro kuro 137 Jan 1 1970 requirements.yml\r\n\r\n$ openssl x509 -text -in receptor/tls/receptor.crt -noout | grep Not\r\n Not Before: Oct 6 20:09:21 2022 GMT\r\n Not After : Oct 16 20:09:21 2022 GMT \ud83d\udc48\ud83d\udc48\ud83d\udc48\r\n```\r\n\r\nInstaller (playbook) just place certificate on execution node without any modification, so deployed execution node uses certificate that included in the install bandle as is. \r\n\r\nI have no idea if this is a _bug_ or _designed behavior_, but just in case I'm creating this issue.\r\n\r\n### AWX version\r\n\r\n21.7.0\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [X] API\r\n- [ ] Docs\r\n- [ ] Collection\r\n- [ ] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\nN/A\r\n\r\n### Operating system\r\n\r\nCentOS 8 Stream\r\n\r\n### Web browser\r\n\r\nChrome\r\n\r\n### Steps to reproduce\r\n\r\n1. Deploy AWX 21.7.0 on any platform\r\n2. Add new instance and generate install bundle for the instance\r\n3. Extract install bundle and ensure `Not After` in the `receptor.crt` by `openssl x509 -text -in receptor/tls/receptor.crt -noout`\r\n\r\n### Expected results\r\n\r\nCertificate and execution environment are valid for enough life time.\r\n\r\n### Actual results\r\n\r\nNot tested enough, I guess my execution environment will be expired in 10 days if I won't make any manual renewal for the certs.\r\n\r\n### Additional information\r\n\r\n@TheRealHaoLiu @jbradberry \r\nThanks for excellent feature; execution node! It works as expected in my environment in this few days :)\r\nSorry for annoying you by this mention, but I want to confirm that this is designed or not.\r\n\r\nI found that there is hard-coded `days=10` in your commit.\r\nhttps://github.com/ansible/awx/blob/150c55c72a4d1d896474c9d3aaaceeb1b69ee253/awx/api/views/instance_install_bundle.py#L181\r\n\r\nAccording to your comment on `awx-operator` repository, it seems that you've designed it as _10 years_ instead of _10 days_ I guess...?; https://github.com/ansible/awx-operator/pull/1012#issuecomment-1208423340\r\n\r\nPlease let me know it's not recommended that trying to use generated certs in the long term, or if you have an easier way to renew certs.\n", "before_files": [{"content": "# Copyright (c) 2018 Red Hat, Inc.\n# All Rights Reserved.\n\nimport datetime\nimport io\nimport ipaddress\nimport os\nimport tarfile\n\nimport asn1\nfrom awx.api import serializers\nfrom awx.api.generics import GenericAPIView, Response\nfrom awx.api.permissions import IsSystemAdminOrAuditor\nfrom awx.main import models\nfrom cryptography import x509\nfrom cryptography.hazmat.primitives import hashes, serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.x509 import DNSName, IPAddress, ObjectIdentifier, OtherName\nfrom cryptography.x509.oid import NameOID\nfrom django.http import HttpResponse\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework import status\n\n# Red Hat has an OID namespace (RHANANA). Receptor has its own designation under that.\nRECEPTOR_OID = \"1.3.6.1.4.1.2312.19.1\"\n\n# generate install bundle for the instance\n# install bundle directory structure\n# \u251c\u2500\u2500 install_receptor.yml (playbook)\n# \u251c\u2500\u2500 inventory.yml\n# \u251c\u2500\u2500 group_vars\n# \u2502 \u2514\u2500\u2500 all.yml\n# \u251c\u2500\u2500 receptor\n# \u2502 \u251c\u2500\u2500 tls\n# \u2502 \u2502 \u251c\u2500\u2500 ca\n# \u2502 \u2502 \u2502 \u2514\u2500\u2500 receptor-ca.crt\n# \u2502 \u2502 \u251c\u2500\u2500 receptor.crt\n# \u2502 \u2502 \u2514\u2500\u2500 receptor.key\n# \u2502 \u2514\u2500\u2500 work-public-key.pem\n# \u2514\u2500\u2500 requirements.yml\nclass InstanceInstallBundle(GenericAPIView):\n\n name = _('Install Bundle')\n model = models.Instance\n serializer_class = serializers.InstanceSerializer\n permission_classes = (IsSystemAdminOrAuditor,)\n\n def get(self, request, *args, **kwargs):\n instance_obj = self.get_object()\n\n if instance_obj.node_type not in ('execution',):\n return Response(\n data=dict(msg=_('Install bundle can only be generated for execution nodes.')),\n status=status.HTTP_400_BAD_REQUEST,\n )\n\n with io.BytesIO() as f:\n with tarfile.open(fileobj=f, mode='w:gz') as tar:\n # copy /etc/receptor/tls/ca/receptor-ca.crt to receptor/tls/ca in the tar file\n tar.add(\n os.path.realpath('/etc/receptor/tls/ca/receptor-ca.crt'), arcname=f\"{instance_obj.hostname}_install_bundle/receptor/tls/ca/receptor-ca.crt\"\n )\n\n # copy /etc/receptor/signing/work-public-key.pem to receptor/work-public-key.pem\n tar.add('/etc/receptor/signing/work-public-key.pem', arcname=f\"{instance_obj.hostname}_install_bundle/receptor/work-public-key.pem\")\n\n # generate and write the receptor key to receptor/tls/receptor.key in the tar file\n key, cert = generate_receptor_tls(instance_obj)\n\n key_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.key\")\n key_tarinfo.size = len(key)\n tar.addfile(key_tarinfo, io.BytesIO(key))\n\n cert_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/receptor/tls/receptor.crt\")\n cert_tarinfo.size = len(cert)\n tar.addfile(cert_tarinfo, io.BytesIO(cert))\n\n # generate and write install_receptor.yml to the tar file\n playbook = generate_playbook().encode('utf-8')\n playbook_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/install_receptor.yml\")\n playbook_tarinfo.size = len(playbook)\n tar.addfile(playbook_tarinfo, io.BytesIO(playbook))\n\n # generate and write inventory.yml to the tar file\n inventory_yml = generate_inventory_yml(instance_obj).encode('utf-8')\n inventory_yml_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/inventory.yml\")\n inventory_yml_tarinfo.size = len(inventory_yml)\n tar.addfile(inventory_yml_tarinfo, io.BytesIO(inventory_yml))\n\n # generate and write group_vars/all.yml to the tar file\n group_vars = generate_group_vars_all_yml(instance_obj).encode('utf-8')\n group_vars_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/group_vars/all.yml\")\n group_vars_tarinfo.size = len(group_vars)\n tar.addfile(group_vars_tarinfo, io.BytesIO(group_vars))\n\n # generate and write requirements.yml to the tar file\n requirements_yml = generate_requirements_yml().encode('utf-8')\n requirements_yml_tarinfo = tarfile.TarInfo(f\"{instance_obj.hostname}_install_bundle/requirements.yml\")\n requirements_yml_tarinfo.size = len(requirements_yml)\n tar.addfile(requirements_yml_tarinfo, io.BytesIO(requirements_yml))\n\n # respond with the tarfile\n f.seek(0)\n response = HttpResponse(f.read(), status=status.HTTP_200_OK)\n response['Content-Disposition'] = f\"attachment; filename={instance_obj.hostname}_install_bundle.tar.gz\"\n return response\n\n\ndef generate_playbook():\n return render_to_string(\"instance_install_bundle/install_receptor.yml\")\n\n\ndef generate_requirements_yml():\n return render_to_string(\"instance_install_bundle/requirements.yml\")\n\n\ndef generate_inventory_yml(instance_obj):\n return render_to_string(\"instance_install_bundle/inventory.yml\", context=dict(instance=instance_obj))\n\n\ndef generate_group_vars_all_yml(instance_obj):\n return render_to_string(\"instance_install_bundle/group_vars/all.yml\", context=dict(instance=instance_obj))\n\n\ndef generate_receptor_tls(instance_obj):\n # generate private key for the receptor\n key = rsa.generate_private_key(public_exponent=65537, key_size=2048)\n\n # encode receptor hostname to asn1\n hostname = instance_obj.hostname\n encoder = asn1.Encoder()\n encoder.start()\n encoder.write(hostname.encode(), nr=asn1.Numbers.UTF8String)\n hostname_asn1 = encoder.output()\n\n san_params = [\n DNSName(hostname),\n OtherName(ObjectIdentifier(RECEPTOR_OID), hostname_asn1),\n ]\n\n try:\n san_params.append(IPAddress(ipaddress.IPv4Address(hostname)))\n except ipaddress.AddressValueError:\n pass\n\n # generate certificate for the receptor\n csr = (\n x509.CertificateSigningRequestBuilder()\n .subject_name(\n x509.Name(\n [\n x509.NameAttribute(NameOID.COMMON_NAME, hostname),\n ]\n )\n )\n .add_extension(\n x509.SubjectAlternativeName(san_params),\n critical=False,\n )\n .sign(key, hashes.SHA256())\n )\n\n # sign csr with the receptor ca key from /etc/receptor/ca/receptor-ca.key\n with open('/etc/receptor/tls/ca/receptor-ca.key', 'rb') as f:\n ca_key = serialization.load_pem_private_key(\n f.read(),\n password=None,\n )\n\n with open('/etc/receptor/tls/ca/receptor-ca.crt', 'rb') as f:\n ca_cert = x509.load_pem_x509_certificate(f.read())\n\n cert = (\n x509.CertificateBuilder()\n .subject_name(csr.subject)\n .issuer_name(ca_cert.issuer)\n .public_key(csr.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(datetime.datetime.utcnow())\n .not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=10))\n .add_extension(\n csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).value,\n critical=csr.extensions.get_extension_for_class(x509.SubjectAlternativeName).critical,\n )\n .sign(ca_key, hashes.SHA256())\n )\n\n key = key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption(),\n )\n\n cert = cert.public_bytes(\n encoding=serialization.Encoding.PEM,\n )\n\n return key, cert\n", "path": "awx/api/views/instance_install_bundle.py"}]} | 3,783 | 175 |
gh_patches_debug_758 | rasdani/github-patches | git_diff | vllm-project__vllm-2337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v0.2.7] Release Tracker
**ETA**: Jan 3rd - 4th
## Major changes
TBD
## PRs to be merged before the release
- [x] #2221
- [ ] ~~#2293~~ (deferred)
</issue>
<code>
[start of vllm/__init__.py]
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.2.6"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
[end of vllm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.2.6"
+__version__ = "0.2.7"
__all__ = [
"LLM",
| {"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.2.6\"\n+__version__ = \"0.2.7\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.2.7] Release Tracker\n**ETA**: Jan 3rd - 4th\r\n\r\n## Major changes\r\n\r\nTBD\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] #2221 \r\n- [ ] ~~#2293~~ (deferred)\n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.2.6\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]} | 819 | 108 |
gh_patches_debug_12904 | rasdani/github-patches | git_diff | netket__netket-611 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Floating Point Error in Lattice.py
I tried generating a periodic Kagome lattice using lattice.py:
>>> kagome = nk.graph.Lattice(basis_vectors=[[2.,0.],[1.,np.sqrt(3)]],extent=[2,2],atoms_coord=[[0.,0.],[1./2.,np.sqrt(3)/2.],[1.,0.]])
Only half of the edges appeared:
>>>kagome.n_edges()
12
The bug is related to floating points, some distances are registered as 0.99999... and some are registered as 1.0
</issue>
<code>
[start of netket/graph/lattice.py]
1 # Copyright 2020, 2021 The NetKet Authors - All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from .graph import NetworkX
16 from scipy.spatial import cKDTree
17 from scipy.sparse import find, triu
18 import numpy as _np
19 import itertools
20 import networkx as _nx
21 import warnings
22
23
24 def get_edges(atoms_positions, cutoff):
25 kdtree = cKDTree(atoms_positions)
26 dist_matrix = kdtree.sparse_distance_matrix(kdtree, cutoff)
27 id1, id2, values = find(triu(dist_matrix))
28 pairs = []
29 min_dists = {} # keys are nodes, values are min dists
30 for node in _np.unique(_np.concatenate((id1, id2))):
31 min_dist = _np.min(values[(id1 == node) | (id2 == node)])
32 min_dists[node] = min_dist
33 for node in _np.unique(id1):
34 min_dist = _np.min(values[id1 == node])
35 mask = (id1 == node) & (values == min_dist)
36 first = id1[mask]
37 second = id2[mask]
38 for pair in zip(first, second):
39 if min_dist == min_dists[pair[0]] and min_dist == min_dists[pair[1]]:
40 pairs.append(pair)
41 return pairs
42
43
44 def create_points(basis_vectors, extent, atom_coords, pbc):
45 shell_vec = _np.zeros(extent.size, dtype=int)
46 shift_vec = _np.zeros(extent.size, dtype=int)
47 # note: by modifying these, the number of shells can be tuned.
48 shell_vec[pbc] = 2
49 shift_vec[pbc] = 1
50 ranges = tuple([list(range(ex)) for ex in extent + shell_vec])
51 atoms = []
52 cellANDlabel_to_site = {}
53 for s_cell in itertools.product(*ranges):
54 s_coord_cell = _np.asarray(s_cell) - shift_vec
55 if _np.any(s_coord_cell < 0) or _np.any(s_coord_cell > (extent - 1)):
56 inside = False
57 else:
58 inside = True
59 atom_count = len(atoms)
60 for i, atom_coord in enumerate(atom_coords):
61 s_coord_atom = s_coord_cell + atom_coord
62 r_coord_atom = _np.matmul(basis_vectors.T, s_coord_atom)
63 atoms.append(
64 {
65 "Label": i,
66 "cell": s_coord_cell,
67 "r_coord": r_coord_atom,
68 "inside": inside,
69 }
70 )
71 if tuple(s_coord_cell) not in cellANDlabel_to_site.keys():
72 cellANDlabel_to_site[tuple(s_coord_cell)] = {}
73 cellANDlabel_to_site[tuple(s_coord_cell)][i] = atom_count + i
74 return atoms, cellANDlabel_to_site
75
76
77 def get_true_edges(basis_vectors, atoms, cellANDlabel_to_site, extent):
78 atoms_positions = dicts_to_array(atoms, "r_coord")
79 naive_edges = get_edges(
80 atoms_positions, _np.linalg.norm(basis_vectors, axis=1).max()
81 )
82 true_edges = []
83 for node1, node2 in naive_edges:
84 atom1 = atoms[node1]
85 atom2 = atoms[node2]
86 if atom1["inside"] and atom2["inside"]:
87 true_edges.append((node1, node2))
88 elif atom1["inside"] or atom2["inside"]:
89 cell1 = atom1["cell"] % extent
90 cell2 = atom2["cell"] % extent
91 node1 = cellANDlabel_to_site[tuple(cell1)][atom1["Label"]]
92 node2 = cellANDlabel_to_site[tuple(cell2)][atom2["Label"]]
93 edge = (node1, node2)
94 if edge not in true_edges and (node2, node1) not in true_edges:
95 true_edges.append(edge)
96 return true_edges
97
98
99 def dicts_to_array(dicts, key):
100 result = []
101 for d in dicts:
102 result.append(d[key])
103 return _np.asarray(result)
104
105
106 class Lattice(NetworkX):
107 """A lattice built translating a unit cell and adding edges between nearest neighbours sites.
108
109 The unit cell is defined by the ``basis_vectors`` and it can contain an arbitrary number of atoms.
110 Each atom is located at an arbitrary position and is labelled by an integer number,
111 meant to distinguish between the different atoms within the unit cell.
112 Periodic boundary conditions can also be imposed along the desired directions.
113 There are three different ways to refer to the lattice sites. A site can be labelled
114 by a simple integer number (the site index) or by its coordinates (actual position in space).
115 """
116
117 def __init__(self, basis_vectors, extent, *, pbc: bool = True, atoms_coord=[]):
118 """
119 Constructs a new ``Lattice`` given its side length and the features of the unit cell.
120
121 Args:
122 basis_vectors: The basis vectors of the unit cell.
123 extent: The number of copies of the unit cell.
124 pbc: If ``True`` then the constructed lattice
125 will have periodic boundary conditions, otherwise
126 open boundary conditions are imposed (default=``True``).
127 atoms_coord: The coordinates of different atoms in the unit cell (default=one atom at the origin).
128
129 Examples:
130 Constructs a rectangular 3X4 lattice with periodic boundary conditions.
131
132 >>> import netket
133 >>> g=netket.graph.Lattice(basis_vectors=[[1,0],[0,1]],extent=[3,4])
134 >>> print(g.n_nodes)
135 12
136
137 """
138
139 self._basis_vectors = _np.asarray(basis_vectors)
140 if self._basis_vectors.ndim != 2:
141 raise ValueError("Every vector must have the same dimension.")
142 if self._basis_vectors.shape[0] != self._basis_vectors.shape[1]:
143 raise ValueError(
144 "basis_vectors must be a basis for the N-dimensional vector space you chose"
145 )
146
147 if not atoms_coord:
148 atoms_coord = [_np.zeros(self._basis_vectors.shape[0])]
149 atoms_coord = _np.asarray(atoms_coord)
150 atoms_coord_fractional = _np.asarray(
151 [
152 _np.matmul(_np.linalg.inv(self._basis_vectors.T), atom_coord)
153 for atom_coord in atoms_coord
154 ]
155 )
156 if atoms_coord_fractional.min() < 0 or atoms_coord_fractional.max() >= 1:
157 # Maybe there is another way to state this. I want to avoid that there exists the possibility that two atoms from different cells are at the same position:
158 raise ValueError(
159 "atoms must reside inside their corresponding unit cell, which includes only the 0-faces in fractional coordinates."
160 )
161 uniques = _np.unique(atoms_coord, axis=0)
162 if len(atoms_coord) != uniques.shape[0]:
163 atoms_coord = _np.asarray(uniques)
164 warnings.warn(
165 f"Some atom positions are not unique. Duplicates were dropped, and now atom positions are {atoms_coord}",
166 UserWarning,
167 )
168
169 self._atoms_coord = atoms_coord
170
171 if isinstance(pbc, bool):
172 self._pbc = [pbc] * self._basis_vectors.shape[1]
173 elif (
174 not isinstance(pbc, list)
175 or len(pbc) != self._basis_vectors.shape[1]
176 or sum([1 for pbci in pbc if isinstance(pbci, bool)])
177 != self._basis_vectors.shape[1]
178 ):
179 raise ValueError(
180 "pbc must be either a boolean or a list of booleans with the same dimension as the vector space you chose."
181 )
182 else:
183 self._pbc = pbc
184
185 extent = _np.asarray(extent)
186 self.extent = extent
187
188 atoms, cellANDlabel_to_site = create_points(
189 self._basis_vectors, extent, atoms_coord_fractional, pbc
190 )
191 edges = get_true_edges(self._basis_vectors, atoms, cellANDlabel_to_site, extent)
192 graph = _nx.MultiGraph(edges)
193
194 # Rename atoms
195 old_nodes = sorted(set([node for edge in edges for node in edge]))
196 self._atoms = [atoms[old_node] for old_node in old_nodes]
197 self._coord_to_site = {
198 tuple(atom["r_coord"]): new_site
199 for new_site, atom in enumerate(self._atoms)
200 }
201 new_nodes = {old_node: new_node for new_node, old_node in enumerate(old_nodes)}
202 graph = _nx.relabel_nodes(graph, new_nodes)
203
204 # Order node names
205 nodes = sorted(graph.nodes())
206 edges = list(graph.edges())
207 graph = _nx.MultiGraph()
208 graph.add_nodes_from(nodes)
209 graph.add_edges_from(edges)
210
211 super().__init__(graph)
212
213 @property
214 def basis_vectors(self):
215 return self._basis_vectors
216
217 @property
218 def atoms_coord(self):
219 """
220 Coordinates of atoms in the unit cell.
221 """
222 return self._atoms_coord
223
224 def atom_label(self, site):
225 return self._atoms[site]["Label"]
226
227 def site_to_coord(self, site):
228 return self._atoms[site]["r_coord"]
229
230 def coord_to_site(self, coord):
231 return self._coord_to_site[tuple(coord)]
232
233 def site_to_vector(self, site):
234 return self._atoms[site]["cell"]
235
236 def vector_to_coord(self, vector):
237 return _np.matmul(self._basis_vectors, vector)
238
239 def __repr__(self):
240 return "Lattice(n_nodes={})\n extent={}\n basis_vectors={}".format(
241 self.n_nodes, self.extent.tolist(), self.basis_vectors.tolist()
242 )
243
[end of netket/graph/lattice.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netket/graph/lattice.py b/netket/graph/lattice.py
--- a/netket/graph/lattice.py
+++ b/netket/graph/lattice.py
@@ -32,11 +32,13 @@
min_dists[node] = min_dist
for node in _np.unique(id1):
min_dist = _np.min(values[id1 == node])
- mask = (id1 == node) & (values == min_dist)
+ mask = (id1 == node) & (_np.isclose(values, min_dist))
first = id1[mask]
second = id2[mask]
for pair in zip(first, second):
- if min_dist == min_dists[pair[0]] and min_dist == min_dists[pair[1]]:
+ if _np.isclose(min_dist, min_dists[pair[0]]) and _np.isclose(
+ min_dist, min_dists[pair[1]]
+ ):
pairs.append(pair)
return pairs
| {"golden_diff": "diff --git a/netket/graph/lattice.py b/netket/graph/lattice.py\n--- a/netket/graph/lattice.py\n+++ b/netket/graph/lattice.py\n@@ -32,11 +32,13 @@\n min_dists[node] = min_dist\n for node in _np.unique(id1):\n min_dist = _np.min(values[id1 == node])\n- mask = (id1 == node) & (values == min_dist)\n+ mask = (id1 == node) & (_np.isclose(values, min_dist))\n first = id1[mask]\n second = id2[mask]\n for pair in zip(first, second):\n- if min_dist == min_dists[pair[0]] and min_dist == min_dists[pair[1]]:\n+ if _np.isclose(min_dist, min_dists[pair[0]]) and _np.isclose(\n+ min_dist, min_dists[pair[1]]\n+ ):\n pairs.append(pair)\n return pairs\n", "issue": "Floating Point Error in Lattice.py\nI tried generating a periodic Kagome lattice using lattice.py:\r\n\r\n>>> kagome = nk.graph.Lattice(basis_vectors=[[2.,0.],[1.,np.sqrt(3)]],extent=[2,2],atoms_coord=[[0.,0.],[1./2.,np.sqrt(3)/2.],[1.,0.]])\r\n\r\nOnly half of the edges appeared:\r\n\r\n>>>kagome.n_edges()\r\n12\r\n\r\nThe bug is related to floating points, some distances are registered as 0.99999... and some are registered as 1.0\r\n\n", "before_files": [{"content": "# Copyright 2020, 2021 The NetKet Authors - All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .graph import NetworkX\nfrom scipy.spatial import cKDTree\nfrom scipy.sparse import find, triu\nimport numpy as _np\nimport itertools\nimport networkx as _nx\nimport warnings\n\n\ndef get_edges(atoms_positions, cutoff):\n kdtree = cKDTree(atoms_positions)\n dist_matrix = kdtree.sparse_distance_matrix(kdtree, cutoff)\n id1, id2, values = find(triu(dist_matrix))\n pairs = []\n min_dists = {} # keys are nodes, values are min dists\n for node in _np.unique(_np.concatenate((id1, id2))):\n min_dist = _np.min(values[(id1 == node) | (id2 == node)])\n min_dists[node] = min_dist\n for node in _np.unique(id1):\n min_dist = _np.min(values[id1 == node])\n mask = (id1 == node) & (values == min_dist)\n first = id1[mask]\n second = id2[mask]\n for pair in zip(first, second):\n if min_dist == min_dists[pair[0]] and min_dist == min_dists[pair[1]]:\n pairs.append(pair)\n return pairs\n\n\ndef create_points(basis_vectors, extent, atom_coords, pbc):\n shell_vec = _np.zeros(extent.size, dtype=int)\n shift_vec = _np.zeros(extent.size, dtype=int)\n # note: by modifying these, the number of shells can be tuned.\n shell_vec[pbc] = 2\n shift_vec[pbc] = 1\n ranges = tuple([list(range(ex)) for ex in extent + shell_vec])\n atoms = []\n cellANDlabel_to_site = {}\n for s_cell in itertools.product(*ranges):\n s_coord_cell = _np.asarray(s_cell) - shift_vec\n if _np.any(s_coord_cell < 0) or _np.any(s_coord_cell > (extent - 1)):\n inside = False\n else:\n inside = True\n atom_count = len(atoms)\n for i, atom_coord in enumerate(atom_coords):\n s_coord_atom = s_coord_cell + atom_coord\n r_coord_atom = _np.matmul(basis_vectors.T, s_coord_atom)\n atoms.append(\n {\n \"Label\": i,\n \"cell\": s_coord_cell,\n \"r_coord\": r_coord_atom,\n \"inside\": inside,\n }\n )\n if tuple(s_coord_cell) not in cellANDlabel_to_site.keys():\n cellANDlabel_to_site[tuple(s_coord_cell)] = {}\n cellANDlabel_to_site[tuple(s_coord_cell)][i] = atom_count + i\n return atoms, cellANDlabel_to_site\n\n\ndef get_true_edges(basis_vectors, atoms, cellANDlabel_to_site, extent):\n atoms_positions = dicts_to_array(atoms, \"r_coord\")\n naive_edges = get_edges(\n atoms_positions, _np.linalg.norm(basis_vectors, axis=1).max()\n )\n true_edges = []\n for node1, node2 in naive_edges:\n atom1 = atoms[node1]\n atom2 = atoms[node2]\n if atom1[\"inside\"] and atom2[\"inside\"]:\n true_edges.append((node1, node2))\n elif atom1[\"inside\"] or atom2[\"inside\"]:\n cell1 = atom1[\"cell\"] % extent\n cell2 = atom2[\"cell\"] % extent\n node1 = cellANDlabel_to_site[tuple(cell1)][atom1[\"Label\"]]\n node2 = cellANDlabel_to_site[tuple(cell2)][atom2[\"Label\"]]\n edge = (node1, node2)\n if edge not in true_edges and (node2, node1) not in true_edges:\n true_edges.append(edge)\n return true_edges\n\n\ndef dicts_to_array(dicts, key):\n result = []\n for d in dicts:\n result.append(d[key])\n return _np.asarray(result)\n\n\nclass Lattice(NetworkX):\n \"\"\"A lattice built translating a unit cell and adding edges between nearest neighbours sites.\n\n The unit cell is defined by the ``basis_vectors`` and it can contain an arbitrary number of atoms.\n Each atom is located at an arbitrary position and is labelled by an integer number,\n meant to distinguish between the different atoms within the unit cell.\n Periodic boundary conditions can also be imposed along the desired directions.\n There are three different ways to refer to the lattice sites. A site can be labelled\n by a simple integer number (the site index) or by its coordinates (actual position in space).\n \"\"\"\n\n def __init__(self, basis_vectors, extent, *, pbc: bool = True, atoms_coord=[]):\n \"\"\"\n Constructs a new ``Lattice`` given its side length and the features of the unit cell.\n\n Args:\n basis_vectors: The basis vectors of the unit cell.\n extent: The number of copies of the unit cell.\n pbc: If ``True`` then the constructed lattice\n will have periodic boundary conditions, otherwise\n open boundary conditions are imposed (default=``True``).\n atoms_coord: The coordinates of different atoms in the unit cell (default=one atom at the origin).\n\n Examples:\n Constructs a rectangular 3X4 lattice with periodic boundary conditions.\n\n >>> import netket\n >>> g=netket.graph.Lattice(basis_vectors=[[1,0],[0,1]],extent=[3,4])\n >>> print(g.n_nodes)\n 12\n\n \"\"\"\n\n self._basis_vectors = _np.asarray(basis_vectors)\n if self._basis_vectors.ndim != 2:\n raise ValueError(\"Every vector must have the same dimension.\")\n if self._basis_vectors.shape[0] != self._basis_vectors.shape[1]:\n raise ValueError(\n \"basis_vectors must be a basis for the N-dimensional vector space you chose\"\n )\n\n if not atoms_coord:\n atoms_coord = [_np.zeros(self._basis_vectors.shape[0])]\n atoms_coord = _np.asarray(atoms_coord)\n atoms_coord_fractional = _np.asarray(\n [\n _np.matmul(_np.linalg.inv(self._basis_vectors.T), atom_coord)\n for atom_coord in atoms_coord\n ]\n )\n if atoms_coord_fractional.min() < 0 or atoms_coord_fractional.max() >= 1:\n # Maybe there is another way to state this. I want to avoid that there exists the possibility that two atoms from different cells are at the same position:\n raise ValueError(\n \"atoms must reside inside their corresponding unit cell, which includes only the 0-faces in fractional coordinates.\"\n )\n uniques = _np.unique(atoms_coord, axis=0)\n if len(atoms_coord) != uniques.shape[0]:\n atoms_coord = _np.asarray(uniques)\n warnings.warn(\n f\"Some atom positions are not unique. Duplicates were dropped, and now atom positions are {atoms_coord}\",\n UserWarning,\n )\n\n self._atoms_coord = atoms_coord\n\n if isinstance(pbc, bool):\n self._pbc = [pbc] * self._basis_vectors.shape[1]\n elif (\n not isinstance(pbc, list)\n or len(pbc) != self._basis_vectors.shape[1]\n or sum([1 for pbci in pbc if isinstance(pbci, bool)])\n != self._basis_vectors.shape[1]\n ):\n raise ValueError(\n \"pbc must be either a boolean or a list of booleans with the same dimension as the vector space you chose.\"\n )\n else:\n self._pbc = pbc\n\n extent = _np.asarray(extent)\n self.extent = extent\n\n atoms, cellANDlabel_to_site = create_points(\n self._basis_vectors, extent, atoms_coord_fractional, pbc\n )\n edges = get_true_edges(self._basis_vectors, atoms, cellANDlabel_to_site, extent)\n graph = _nx.MultiGraph(edges)\n\n # Rename atoms\n old_nodes = sorted(set([node for edge in edges for node in edge]))\n self._atoms = [atoms[old_node] for old_node in old_nodes]\n self._coord_to_site = {\n tuple(atom[\"r_coord\"]): new_site\n for new_site, atom in enumerate(self._atoms)\n }\n new_nodes = {old_node: new_node for new_node, old_node in enumerate(old_nodes)}\n graph = _nx.relabel_nodes(graph, new_nodes)\n\n # Order node names\n nodes = sorted(graph.nodes())\n edges = list(graph.edges())\n graph = _nx.MultiGraph()\n graph.add_nodes_from(nodes)\n graph.add_edges_from(edges)\n\n super().__init__(graph)\n\n @property\n def basis_vectors(self):\n return self._basis_vectors\n\n @property\n def atoms_coord(self):\n \"\"\"\n Coordinates of atoms in the unit cell.\n \"\"\"\n return self._atoms_coord\n\n def atom_label(self, site):\n return self._atoms[site][\"Label\"]\n\n def site_to_coord(self, site):\n return self._atoms[site][\"r_coord\"]\n\n def coord_to_site(self, coord):\n return self._coord_to_site[tuple(coord)]\n\n def site_to_vector(self, site):\n return self._atoms[site][\"cell\"]\n\n def vector_to_coord(self, vector):\n return _np.matmul(self._basis_vectors, vector)\n\n def __repr__(self):\n return \"Lattice(n_nodes={})\\n extent={}\\n basis_vectors={}\".format(\n self.n_nodes, self.extent.tolist(), self.basis_vectors.tolist()\n )\n", "path": "netket/graph/lattice.py"}]} | 3,495 | 218 |
gh_patches_debug_107 | rasdani/github-patches | git_diff | getsentry__sentry-5098 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MYSQL_PWD not recognized as sensitive field
In Sentry 8.11.0, the data key `MYSQL_PWD` is not treated as sensitive and is transmitted in cleartext and shown in the UI, while things that look like mysql connection string are rendered as `mysql://readonly:[Filtered]@db1.example.com/`
MYSQL_PWD is the standard way of providing a password to mysql cli tools, and I'd argue any field that ends in _PWD is unsafe.
</issue>
<code>
[start of src/sentry/constants.py]
1 """
2 sentry.constants
3 ~~~~~~~~~~~~~~~~
4
5 These settings act as the default (base) settings for the Sentry-provided
6 web-server
7
8 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
9 :license: BSD, see LICENSE for more details.
10 """
11 from __future__ import absolute_import, print_function
12
13 import logging
14 import os.path
15 import six
16
17 from collections import OrderedDict
18 from django.conf import settings
19 from django.utils.translation import ugettext_lazy as _
20 from operator import attrgetter
21
22
23 def get_all_languages():
24 results = []
25 for path in os.listdir(os.path.join(MODULE_ROOT, 'locale')):
26 if path.startswith('.'):
27 continue
28 if '_' in path:
29 pre, post = path.split('_', 1)
30 path = '{}-{}'.format(pre, post.lower())
31 results.append(path)
32 return results
33
34 MODULE_ROOT = os.path.dirname(__import__('sentry').__file__)
35 DATA_ROOT = os.path.join(MODULE_ROOT, 'data')
36
37 SORT_OPTIONS = OrderedDict((
38 ('priority', _('Priority')),
39 ('date', _('Last Seen')),
40 ('new', _('First Seen')),
41 ('freq', _('Frequency')),
42 ))
43
44 SEARCH_SORT_OPTIONS = OrderedDict((
45 ('score', _('Score')),
46 ('date', _('Last Seen')),
47 ('new', _('First Seen')),
48 ))
49
50 # XXX: Deprecated: use GroupStatus instead
51 STATUS_UNRESOLVED = 0
52 STATUS_RESOLVED = 1
53 STATUS_IGNORED = 2
54
55 STATUS_CHOICES = {
56 'resolved': STATUS_RESOLVED,
57 'unresolved': STATUS_UNRESOLVED,
58 'ignored': STATUS_IGNORED,
59
60 # TODO(dcramer): remove in 9.0
61 'muted': STATUS_IGNORED,
62 }
63
64 # Normalize counts to the 15 minute marker. This value MUST be less than 60. A
65 # value of 0 would store counts for every minute, and is the lowest level of
66 # accuracy provided.
67 MINUTE_NORMALIZATION = 15
68
69 MAX_TAG_KEY_LENGTH = 32
70 MAX_TAG_VALUE_LENGTH = 200
71 MAX_CULPRIT_LENGTH = 200
72 MAX_EMAIL_FIELD_LENGTH = 75
73
74 # Team slugs which may not be used. Generally these are top level URL patterns
75 # which we don't want to worry about conflicts on.
76 RESERVED_ORGANIZATION_SLUGS = frozenset((
77 'admin', 'manage', 'login', 'account', 'register', 'api',
78 'accept', 'organizations', 'teams', 'projects', 'help',
79 'docs', 'logout', '404', '500', '_static', 'out', 'debug',
80 'remote', 'get-cli', 'blog', 'welcome', 'features',
81 'customers', 'integrations', 'signup', 'pricing',
82 'subscribe', 'enterprise', 'about', 'jobs', 'thanks', 'guide',
83 'privacy', 'security', 'terms', 'from', 'sponsorship', 'for',
84 'at', 'platforms', 'branding', 'vs', 'answers', '_admin',
85 'support',
86 ))
87
88 LOG_LEVELS = {
89 logging.DEBUG: 'debug',
90 logging.INFO: 'info',
91 logging.WARNING: 'warning',
92 logging.ERROR: 'error',
93 logging.FATAL: 'fatal',
94 }
95 DEFAULT_LOG_LEVEL = 'error'
96 DEFAULT_LOGGER_NAME = ''
97 LOG_LEVELS_MAP = {v: k for k, v in six.iteritems(LOG_LEVELS)}
98
99
100 # Default alerting threshold values
101 DEFAULT_ALERT_PROJECT_THRESHOLD = (500, 25) # 500%, 25 events
102 DEFAULT_ALERT_GROUP_THRESHOLD = (1000, 25) # 1000%, 25 events
103
104 # Default paginator value
105 EVENTS_PER_PAGE = 15
106
107 # Default sort option for the group stream
108 DEFAULT_SORT_OPTION = 'date'
109
110 # Setup languages for only available locales
111 LANGUAGE_MAP = dict(settings.LANGUAGES)
112 LANGUAGES = [(k, LANGUAGE_MAP[k]) for k in get_all_languages() if k in LANGUAGE_MAP]
113
114 # TODO(dcramer): We eventually want to make this user-editable
115 TAG_LABELS = {
116 'exc_type': 'Exception Type',
117 'sentry:user': 'User',
118 'sentry:filename': 'File',
119 'sentry:function': 'Function',
120 'sentry:release': 'Release',
121 'os': 'OS',
122 'url': 'URL',
123 'server_name': 'Server',
124 }
125
126 # TODO(dcramer): once this is more flushed out we want this to be extendable
127 SENTRY_RULES = (
128 'sentry.rules.actions.notify_event.NotifyEventAction',
129 'sentry.rules.actions.notify_event_service.NotifyEventServiceAction',
130 'sentry.rules.conditions.every_event.EveryEventCondition',
131 'sentry.rules.conditions.first_seen_event.FirstSeenEventCondition',
132 'sentry.rules.conditions.regression_event.RegressionEventCondition',
133 'sentry.rules.conditions.tagged_event.TaggedEventCondition',
134 'sentry.rules.conditions.event_frequency.EventFrequencyCondition',
135 'sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyCondition',
136 'sentry.rules.conditions.event_attribute.EventAttributeCondition',
137 'sentry.rules.conditions.level.LevelCondition',
138 )
139
140 # methods as defined by http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html + PATCH
141 HTTP_METHODS = ('GET', 'POST', 'PUT', 'OPTIONS', 'HEAD', 'DELETE', 'TRACE', 'CONNECT', 'PATCH')
142
143 CLIENT_RESERVED_ATTRS = (
144 'project',
145 'errors',
146 'event_id',
147 'message',
148 'checksum',
149 'culprit',
150 'fingerprint',
151 'level',
152 'time_spent',
153 'logger',
154 'server_name',
155 'site',
156 'received',
157 'timestamp',
158 'extra',
159 'modules',
160 'tags',
161 'platform',
162 'release',
163 'environment',
164 )
165
166 DEFAULT_SCRUBBED_FIELDS = (
167 'password',
168 'secret',
169 'passwd',
170 'authorization',
171 'api_key',
172 'apikey',
173 'access_token',
174 'auth',
175 'credentials',
176 )
177
178 VALID_PLATFORMS = set([
179 'as3',
180 'c',
181 'cfml',
182 'cocoa',
183 'csharp',
184 'go',
185 'java',
186 'javascript',
187 'node',
188 'objc',
189 'other',
190 'perl',
191 'php',
192 'python',
193 'ruby',
194 'elixir',
195 'haskell',
196 'groovy',
197 ])
198
199 OK_PLUGIN_ENABLED = _("The {name} integration has been enabled.")
200
201 OK_PLUGIN_DISABLED = _("The {name} integration has been disabled.")
202
203 OK_PLUGIN_SAVED = _('Configuration for the {name} integration has been saved.')
204
205 WARN_SESSION_EXPIRED = 'Your session has expired.' # TODO: translate this
206
207 # Key to use when ordering a list of events manually
208 EVENT_ORDERING_KEY = attrgetter('datetime', 'id')
209
210 FILTER_MASK = '[Filtered]'
211
212 # Maximum length of a symbol
213 MAX_SYM = 256
214
215 # Known dsym mimetypes
216 KNOWN_DSYM_TYPES = {
217 'application/x-mach-binary': 'macho'
218 }
219
220 NATIVE_UNKNOWN_STRING = '<unknown>'
221
222
223 class ObjectStatus(object):
224 VISIBLE = 0
225 HIDDEN = 1
226 PENDING_DELETION = 2
227 DELETION_IN_PROGRESS = 3
228
229 @classmethod
230 def as_choices(cls):
231 return (
232 (cls.VISIBLE, 'visible'),
233 (cls.HIDDEN, 'hidden'),
234 (cls.PENDING_DELETION, 'pending_deletion'),
235 (cls.DELETION_IN_PROGRESS, 'deletion_in_progress'),
236 )
237
[end of src/sentry/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/sentry/constants.py b/src/sentry/constants.py
--- a/src/sentry/constants.py
+++ b/src/sentry/constants.py
@@ -173,6 +173,7 @@
'access_token',
'auth',
'credentials',
+ 'mysql_pwd',
)
VALID_PLATFORMS = set([
| {"golden_diff": "diff --git a/src/sentry/constants.py b/src/sentry/constants.py\n--- a/src/sentry/constants.py\n+++ b/src/sentry/constants.py\n@@ -173,6 +173,7 @@\n 'access_token',\n 'auth',\n 'credentials',\n+ 'mysql_pwd',\n )\n \n VALID_PLATFORMS = set([\n", "issue": "MYSQL_PWD not recognized as sensitive field\nIn Sentry 8.11.0, the data key `MYSQL_PWD` is not treated as sensitive and is transmitted in cleartext and shown in the UI, while things that look like mysql connection string are rendered as `mysql://readonly:[Filtered]@db1.example.com/`\r\n\r\nMYSQL_PWD is the standard way of providing a password to mysql cli tools, and I'd argue any field that ends in _PWD is unsafe.\n", "before_files": [{"content": "\"\"\"\nsentry.constants\n~~~~~~~~~~~~~~~~\n\nThese settings act as the default (base) settings for the Sentry-provided\nweb-server\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import, print_function\n\nimport logging\nimport os.path\nimport six\n\nfrom collections import OrderedDict\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom operator import attrgetter\n\n\ndef get_all_languages():\n results = []\n for path in os.listdir(os.path.join(MODULE_ROOT, 'locale')):\n if path.startswith('.'):\n continue\n if '_' in path:\n pre, post = path.split('_', 1)\n path = '{}-{}'.format(pre, post.lower())\n results.append(path)\n return results\n\nMODULE_ROOT = os.path.dirname(__import__('sentry').__file__)\nDATA_ROOT = os.path.join(MODULE_ROOT, 'data')\n\nSORT_OPTIONS = OrderedDict((\n ('priority', _('Priority')),\n ('date', _('Last Seen')),\n ('new', _('First Seen')),\n ('freq', _('Frequency')),\n))\n\nSEARCH_SORT_OPTIONS = OrderedDict((\n ('score', _('Score')),\n ('date', _('Last Seen')),\n ('new', _('First Seen')),\n))\n\n# XXX: Deprecated: use GroupStatus instead\nSTATUS_UNRESOLVED = 0\nSTATUS_RESOLVED = 1\nSTATUS_IGNORED = 2\n\nSTATUS_CHOICES = {\n 'resolved': STATUS_RESOLVED,\n 'unresolved': STATUS_UNRESOLVED,\n 'ignored': STATUS_IGNORED,\n\n # TODO(dcramer): remove in 9.0\n 'muted': STATUS_IGNORED,\n}\n\n# Normalize counts to the 15 minute marker. This value MUST be less than 60. A\n# value of 0 would store counts for every minute, and is the lowest level of\n# accuracy provided.\nMINUTE_NORMALIZATION = 15\n\nMAX_TAG_KEY_LENGTH = 32\nMAX_TAG_VALUE_LENGTH = 200\nMAX_CULPRIT_LENGTH = 200\nMAX_EMAIL_FIELD_LENGTH = 75\n\n# Team slugs which may not be used. Generally these are top level URL patterns\n# which we don't want to worry about conflicts on.\nRESERVED_ORGANIZATION_SLUGS = frozenset((\n 'admin', 'manage', 'login', 'account', 'register', 'api',\n 'accept', 'organizations', 'teams', 'projects', 'help',\n 'docs', 'logout', '404', '500', '_static', 'out', 'debug',\n 'remote', 'get-cli', 'blog', 'welcome', 'features',\n 'customers', 'integrations', 'signup', 'pricing',\n 'subscribe', 'enterprise', 'about', 'jobs', 'thanks', 'guide',\n 'privacy', 'security', 'terms', 'from', 'sponsorship', 'for',\n 'at', 'platforms', 'branding', 'vs', 'answers', '_admin',\n 'support',\n))\n\nLOG_LEVELS = {\n logging.DEBUG: 'debug',\n logging.INFO: 'info',\n logging.WARNING: 'warning',\n logging.ERROR: 'error',\n logging.FATAL: 'fatal',\n}\nDEFAULT_LOG_LEVEL = 'error'\nDEFAULT_LOGGER_NAME = ''\nLOG_LEVELS_MAP = {v: k for k, v in six.iteritems(LOG_LEVELS)}\n\n\n# Default alerting threshold values\nDEFAULT_ALERT_PROJECT_THRESHOLD = (500, 25) # 500%, 25 events\nDEFAULT_ALERT_GROUP_THRESHOLD = (1000, 25) # 1000%, 25 events\n\n# Default paginator value\nEVENTS_PER_PAGE = 15\n\n# Default sort option for the group stream\nDEFAULT_SORT_OPTION = 'date'\n\n# Setup languages for only available locales\nLANGUAGE_MAP = dict(settings.LANGUAGES)\nLANGUAGES = [(k, LANGUAGE_MAP[k]) for k in get_all_languages() if k in LANGUAGE_MAP]\n\n# TODO(dcramer): We eventually want to make this user-editable\nTAG_LABELS = {\n 'exc_type': 'Exception Type',\n 'sentry:user': 'User',\n 'sentry:filename': 'File',\n 'sentry:function': 'Function',\n 'sentry:release': 'Release',\n 'os': 'OS',\n 'url': 'URL',\n 'server_name': 'Server',\n}\n\n# TODO(dcramer): once this is more flushed out we want this to be extendable\nSENTRY_RULES = (\n 'sentry.rules.actions.notify_event.NotifyEventAction',\n 'sentry.rules.actions.notify_event_service.NotifyEventServiceAction',\n 'sentry.rules.conditions.every_event.EveryEventCondition',\n 'sentry.rules.conditions.first_seen_event.FirstSeenEventCondition',\n 'sentry.rules.conditions.regression_event.RegressionEventCondition',\n 'sentry.rules.conditions.tagged_event.TaggedEventCondition',\n 'sentry.rules.conditions.event_frequency.EventFrequencyCondition',\n 'sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyCondition',\n 'sentry.rules.conditions.event_attribute.EventAttributeCondition',\n 'sentry.rules.conditions.level.LevelCondition',\n)\n\n# methods as defined by http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html + PATCH\nHTTP_METHODS = ('GET', 'POST', 'PUT', 'OPTIONS', 'HEAD', 'DELETE', 'TRACE', 'CONNECT', 'PATCH')\n\nCLIENT_RESERVED_ATTRS = (\n 'project',\n 'errors',\n 'event_id',\n 'message',\n 'checksum',\n 'culprit',\n 'fingerprint',\n 'level',\n 'time_spent',\n 'logger',\n 'server_name',\n 'site',\n 'received',\n 'timestamp',\n 'extra',\n 'modules',\n 'tags',\n 'platform',\n 'release',\n 'environment',\n)\n\nDEFAULT_SCRUBBED_FIELDS = (\n 'password',\n 'secret',\n 'passwd',\n 'authorization',\n 'api_key',\n 'apikey',\n 'access_token',\n 'auth',\n 'credentials',\n)\n\nVALID_PLATFORMS = set([\n 'as3',\n 'c',\n 'cfml',\n 'cocoa',\n 'csharp',\n 'go',\n 'java',\n 'javascript',\n 'node',\n 'objc',\n 'other',\n 'perl',\n 'php',\n 'python',\n 'ruby',\n 'elixir',\n 'haskell',\n 'groovy',\n])\n\nOK_PLUGIN_ENABLED = _(\"The {name} integration has been enabled.\")\n\nOK_PLUGIN_DISABLED = _(\"The {name} integration has been disabled.\")\n\nOK_PLUGIN_SAVED = _('Configuration for the {name} integration has been saved.')\n\nWARN_SESSION_EXPIRED = 'Your session has expired.' # TODO: translate this\n\n# Key to use when ordering a list of events manually\nEVENT_ORDERING_KEY = attrgetter('datetime', 'id')\n\nFILTER_MASK = '[Filtered]'\n\n# Maximum length of a symbol\nMAX_SYM = 256\n\n# Known dsym mimetypes\nKNOWN_DSYM_TYPES = {\n 'application/x-mach-binary': 'macho'\n}\n\nNATIVE_UNKNOWN_STRING = '<unknown>'\n\n\nclass ObjectStatus(object):\n VISIBLE = 0\n HIDDEN = 1\n PENDING_DELETION = 2\n DELETION_IN_PROGRESS = 3\n\n @classmethod\n def as_choices(cls):\n return (\n (cls.VISIBLE, 'visible'),\n (cls.HIDDEN, 'hidden'),\n (cls.PENDING_DELETION, 'pending_deletion'),\n (cls.DELETION_IN_PROGRESS, 'deletion_in_progress'),\n )\n", "path": "src/sentry/constants.py"}]} | 2,925 | 73 |
gh_patches_debug_7314 | rasdani/github-patches | git_diff | chainer__chainer-552 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
where doesn't work with int16 and float64 on cuda6.5
On only cuda 6.5, this code doesn't work:
```
x = cupy.array([1,2,3], dtype=cupy.int16)
y = cupy.array([1,2,3], dtype=cupy.float64)
c = cupy.array([1,0,1], dtype=cupy.bool_)
cupy.where(c, x, y)
```
Other combinations such as (int16, float32) and (int32, float64) correctly work.
Maybe this is a bug on cuda 6.5, and fixed on 7.0.
Note that `cupy.where(c, y, x)` can work.
</issue>
<code>
[start of cupy/sorting/search.py]
1 from cupy import elementwise
2 from cupy import reduction
3
4
5 def argmax(a, axis=None, dtype=None, out=None, keepdims=False):
6 """Returns the indices of the maximum along an axis.
7
8 Args:
9 a (cupy.ndarray): Array to take argmax.
10 axis (int): Along which axis to find the maximum. ``a`` is flattened by
11 default.
12 dtype: Data type specifier.
13 out (cupy.ndarray): Output array.
14 keepdims (bool): If True, the axis ``axis`` is preserved as an axis of
15 length one.
16
17 Returns:
18 cupy.ndarray: The indices of the maximum of ``a`` along an axis.
19
20 .. seealso:: :func:`numpy.argmax`
21
22 """
23 return reduction.argmax(a, axis=axis, dtype=dtype, out=out,
24 keepdims=keepdims)
25
26
27 # TODO(okuta): Implement nanargmax
28
29
30 def argmin(a, axis=None, dtype=None, out=None, keepdims=False):
31 """Returns the indices of the minimum along an axis.
32
33 Args:
34 a (cupy.ndarray): Array to take argmin.
35 axis (int): Along which axis to find the minimum. ``a`` is flattened by
36 default.
37 dtype: Data type specifier.
38 out (cupy.ndarray): Output array.
39 keepdims (bool): If True, the axis ``axis`` is preserved as an axis of
40 length one.
41
42 Returns:
43 cupy.ndarray: The indices of the minimum of ``a`` along an axis.
44
45 .. seealso:: :func:`numpy.argmin`
46
47 """
48 return reduction.argmin(a, axis=axis, dtype=dtype, out=out,
49 keepdims=keepdims)
50
51
52 # TODO(okuta): Implement nanargmin
53
54
55 # TODO(okuta): Implement argwhere
56
57
58 # TODO(okuta): Implement nonzero
59
60
61 # TODO(okuta): Implement flatnonzero
62
63
64 def where(condition, x=None, y=None):
65 """Return elements, either from x or y, depending on condition.
66
67 .. note::
68
69 Currently Cupy doesn't support ``where(condition)``, that Numpy
70 supports.
71
72 Args:
73 condition (cupy.ndarray): When True, take x, otherwise take y.
74 x (cupy.ndarray): Values from which to choose on ``True``.
75 y (cupy.ndarray): Values from which to choose on ``False``.
76
77 Returns:
78 cupy.ndarray: Each element of output contains elements of ``x`` when
79 ``condition`` is ``True``, otherwise elements of ``y``.
80
81 """
82
83 missing = (x is None, y is None).count(True)
84
85 if missing == 1:
86 raise ValueError("Must provide both 'x' and 'y' or neither.")
87 if missing == 2:
88 # TODO(unno): return nonzero(cond)
89 return NotImplementedError()
90
91 return _where_ufunc(condition.astype('?'), x, y)
92
93 _where_ufunc = elementwise.create_ufunc(
94 'cupy_where',
95 ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',
96 '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', '?dd->d'),
97 'out0 = in0 ? in1 : in2')
98
99
100 # TODO(okuta): Implement searchsorted
101
102
103 # TODO(okuta): Implement extract
104
[end of cupy/sorting/search.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py
--- a/cupy/sorting/search.py
+++ b/cupy/sorting/search.py
@@ -93,7 +93,12 @@
_where_ufunc = elementwise.create_ufunc(
'cupy_where',
('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',
- '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', '?dd->d'),
+ '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',
+ # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it
+ # works).
+ # See issue #551.
+ '?hd->d', '?Hd->d',
+ '?dd->d'),
'out0 = in0 ? in1 : in2')
| {"golden_diff": "diff --git a/cupy/sorting/search.py b/cupy/sorting/search.py\n--- a/cupy/sorting/search.py\n+++ b/cupy/sorting/search.py\n@@ -93,7 +93,12 @@\n _where_ufunc = elementwise.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n- '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', '?dd->d'),\n+ '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f',\n+ # On CUDA 6.5 these combinations don't work correctly (on CUDA >=7.0, it\n+ # works).\n+ # See issue #551.\n+ '?hd->d', '?Hd->d',\n+ '?dd->d'),\n 'out0 = in0 ? in1 : in2')\n", "issue": "where doesn't work with int16 and float64 on cuda6.5\nOn only cuda 6.5, this code doesn't work:\n\n```\nx = cupy.array([1,2,3], dtype=cupy.int16)\ny = cupy.array([1,2,3], dtype=cupy.float64)\nc = cupy.array([1,0,1], dtype=cupy.bool_)\ncupy.where(c, x, y)\n```\n\nOther combinations such as (int16, float32) and (int32, float64) correctly work.\nMaybe this is a bug on cuda 6.5, and fixed on 7.0.\n\nNote that `cupy.where(c, y, x)` can work.\n\n", "before_files": [{"content": "from cupy import elementwise\nfrom cupy import reduction\n\n\ndef argmax(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the maximum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmax.\n axis (int): Along which axis to find the maximum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If True, the axis ``axis`` is preserved as an axis of\n length one.\n\n Returns:\n cupy.ndarray: The indices of the maximum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmax`\n\n \"\"\"\n return reduction.argmax(a, axis=axis, dtype=dtype, out=out,\n keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmax\n\n\ndef argmin(a, axis=None, dtype=None, out=None, keepdims=False):\n \"\"\"Returns the indices of the minimum along an axis.\n\n Args:\n a (cupy.ndarray): Array to take argmin.\n axis (int): Along which axis to find the minimum. ``a`` is flattened by\n default.\n dtype: Data type specifier.\n out (cupy.ndarray): Output array.\n keepdims (bool): If True, the axis ``axis`` is preserved as an axis of\n length one.\n\n Returns:\n cupy.ndarray: The indices of the minimum of ``a`` along an axis.\n\n .. seealso:: :func:`numpy.argmin`\n\n \"\"\"\n return reduction.argmin(a, axis=axis, dtype=dtype, out=out,\n keepdims=keepdims)\n\n\n# TODO(okuta): Implement nanargmin\n\n\n# TODO(okuta): Implement argwhere\n\n\n# TODO(okuta): Implement nonzero\n\n\n# TODO(okuta): Implement flatnonzero\n\n\ndef where(condition, x=None, y=None):\n \"\"\"Return elements, either from x or y, depending on condition.\n\n .. note::\n\n Currently Cupy doesn't support ``where(condition)``, that Numpy\n supports.\n\n Args:\n condition (cupy.ndarray): When True, take x, otherwise take y.\n x (cupy.ndarray): Values from which to choose on ``True``.\n y (cupy.ndarray): Values from which to choose on ``False``.\n\n Returns:\n cupy.ndarray: Each element of output contains elements of ``x`` when\n ``condition`` is ``True``, otherwise elements of ``y``.\n\n \"\"\"\n\n missing = (x is None, y is None).count(True)\n\n if missing == 1:\n raise ValueError(\"Must provide both 'x' and 'y' or neither.\")\n if missing == 2:\n # TODO(unno): return nonzero(cond)\n return NotImplementedError()\n\n return _where_ufunc(condition.astype('?'), x, y)\n\n_where_ufunc = elementwise.create_ufunc(\n 'cupy_where',\n ('???->?', '?bb->b', '?BB->B', '?hh->h', '?HH->H', '?ii->i', '?II->I',\n '?ll->l', '?LL->L', '?qq->q', '?QQ->Q', '?ee->e', '?ff->f', '?dd->d'),\n 'out0 = in0 ? in1 : in2')\n\n\n# TODO(okuta): Implement searchsorted\n\n\n# TODO(okuta): Implement extract\n", "path": "cupy/sorting/search.py"}]} | 1,674 | 246 |
gh_patches_debug_7069 | rasdani/github-patches | git_diff | pytorch__pytorch-928 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing scipy.misc after torch causes segfault
As shown in below. Is this something expected?
This seems most closely related to https://github.com/pytorch/pytorch/issues/595, but it is closed without any specific solutions.
```
(venv) $ pip freeze
appdirs==1.4.0
numpy==1.12.0
packaging==16.8
pyparsing==2.1.10
PyYAML==3.12
scipy==0.18.1
six==1.10.0
torch==0.1.9.post2
(venv) $ python -c 'import torch, scipy.misc'
Segmentation fault (core dumped)
(venv) $ python -c 'import scipy.misc, torch'
(venv) $ echo 'import torch, scipy.misc' > spam.py
(venv) $ python spam.py
Segmentation fault (core dumped)
(venv) $ gdb --args python spam.py
GNU gdb (Ubuntu 7.7.1-0ubuntu5~14.04.2) 7.7.1
Copyright (C) 2014 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law. Type "show copying"
and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<http://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from python...(no debugging symbols found)...done.
(gdb) run
Starting program: /tmp/venv/bin/python spam.py
[Thread debugging using libthread_db enabled]
Using host libthread_db library "/lib/x86_64-linux-gnu/libthread_db.so.1".
[New Thread 0x7fffc7088700 (LWP 5549)]
[New Thread 0x7fffc6887700 (LWP 5550)]
[New Thread 0x7fffc4086700 (LWP 5551)]
[New Thread 0x7fffc1885700 (LWP 5552)]
[New Thread 0x7fffbf084700 (LWP 5553)]
[New Thread 0x7fffbc883700 (LWP 5554)]
[New Thread 0x7fffba082700 (LWP 5555)]
[New Thread 0x7fffb7881700 (LWP 5556)]
[New Thread 0x7fffb5080700 (LWP 5557)]
[New Thread 0x7fffb287f700 (LWP 5558)]
[New Thread 0x7fffb007e700 (LWP 5559)]
Program received signal SIGSEGV, Segmentation fault.
0x00007fffc9d65fc0 in PyArray_API () from /tmp/venv/local/lib/python2.7/site-packages/numpy/core/multiarray.so
```
</issue>
<code>
[start of torch/__init__.py]
1 """
2 The torch package contains data structures for multi-dimensional
3 tensors and mathematical operations over these are defined.
4 Additionally, it provides many utilities for efficient serializing of
5 Tensors and arbitrary types, and other useful utilities.
6
7 It has a CUDA counterpart, that enables you to run your tensor computations
8 on an NVIDIA GPU with compute capability >= 2.0.
9 """
10
11 import sys
12 from ._utils import _import_dotted_name
13 from .version import __version__
14
15 __all__ = [
16 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',
17 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',
18 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack',
19 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
20 'ShortStorage', 'CharStorage', 'ByteStorage',
21 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
22 'ShortTensor', 'CharTensor', 'ByteTensor',
23 ]
24
25 ################################################################################
26 # Load the extension module
27 ################################################################################
28
29 # Loading the extension with RTLD_GLOBAL option allows to not link extension
30 # modules against the _C shared object. Their missing THP symbols will be
31 # automatically filled by the dynamic loader.
32 import os as _dl_flags
33
34 # first check if the os package has the required flags
35 if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'):
36 try:
37 # next try if DLFCN exists
38 import DLFCN as _dl_flags
39 except ImportError:
40 # as a last attempt, use compile-time constants
41 import torch._dl as _dl_flags
42
43 old_flags = sys.getdlopenflags()
44 sys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_NOW)
45
46 from torch._C import *
47
48 __all__ += [name for name in dir(_C)
49 if name[0] != '_' and
50 not name.endswith('Base')]
51
52 sys.setdlopenflags(old_flags)
53 del _dl_flags
54 del old_flags
55
56 ################################################################################
57 # Define basic utilities
58 ################################################################################
59
60
61 def typename(o):
62 module = ''
63 class_name = ''
64 if hasattr(o, '__module__') and o.__module__ != 'builtins' \
65 and o.__module__ != '__builtin__' and o.__module__ is not None:
66 module = o.__module__ + '.'
67
68 if hasattr(o, '__qualname__'):
69 class_name = o.__qualname__
70 elif hasattr(o, '__name__'):
71 class_name = o.__name__
72 else:
73 class_name = o.__class__.__name__
74
75 return module + class_name
76
77
78 def is_tensor(obj):
79 r"""Returns True if `obj` is a pytorch tensor.
80
81 Args:
82 obj (Object): Object to test
83 """
84 return obj.__class__ in _tensor_classes
85
86
87 def is_storage(obj):
88 r"""Returns True if `obj` is a pytorch storage object.
89
90 Args:
91 obj (Object): Object to test
92 """
93 return obj.__class__ in _storage_classes
94
95
96 def set_default_tensor_type(t):
97 global Tensor
98 global Storage
99 Tensor = _import_dotted_name(t)
100 Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))
101 _C._set_default_tensor_type(Tensor)
102
103
104 def set_rng_state(new_state):
105 r"""Sets the random number generator state.
106
107 Args:
108 new_state (torch.ByteTensor): The desired state
109 """
110 default_generator.set_state(new_state)
111
112
113 def get_rng_state():
114 r"""Returns the random number generator state as a ByteTensor."""
115 return default_generator.get_state()
116
117
118 def manual_seed(seed):
119 r"""Sets the seed for generating random numbers. And returns a
120 `torch._C.Generator` object.
121
122 Args:
123 seed (int or long): The desired seed.
124 """
125 return default_generator.manual_seed(seed)
126
127
128 def initial_seed():
129 r"""Returns the initial seed for generating random numbers as a
130 python `long`.
131 """
132 return default_generator.initial_seed()
133
134
135 from .serialization import save, load
136 from ._tensor_str import set_printoptions
137
138 ################################################################################
139 # Define Storage and Tensor classes
140 ################################################################################
141
142 from .storage import _StorageBase
143 from .tensor import _TensorBase
144
145
146 class DoubleStorage(_C.DoubleStorageBase, _StorageBase):
147 pass
148
149
150 class FloatStorage(_C.FloatStorageBase, _StorageBase):
151 pass
152
153
154 class HalfStorage(_C.HalfStorageBase, _StorageBase):
155 pass
156
157
158 class LongStorage(_C.LongStorageBase, _StorageBase):
159 pass
160
161
162 class IntStorage(_C.IntStorageBase, _StorageBase):
163 pass
164
165
166 class ShortStorage(_C.ShortStorageBase, _StorageBase):
167 pass
168
169
170 class CharStorage(_C.CharStorageBase, _StorageBase):
171 pass
172
173
174 class ByteStorage(_C.ByteStorageBase, _StorageBase):
175 pass
176
177
178 class DoubleTensor(_C.DoubleTensorBase, _TensorBase):
179
180 def is_signed(self):
181 return True
182
183 @classmethod
184 def storage_type(cls):
185 return DoubleStorage
186
187
188 class FloatTensor(_C.FloatTensorBase, _TensorBase):
189
190 def is_signed(self):
191 return True
192
193 @classmethod
194 def storage_type(cls):
195 return FloatStorage
196
197
198 class HalfTensor(_C.HalfTensorBase, _TensorBase):
199
200 def is_signed(self):
201 return True
202
203 @classmethod
204 def storage_type(cls):
205 return HalfStorage
206
207
208 class LongTensor(_C.LongTensorBase, _TensorBase):
209
210 def is_signed(self):
211 return True
212
213 @classmethod
214 def storage_type(cls):
215 return LongStorage
216
217
218 class IntTensor(_C.IntTensorBase, _TensorBase):
219
220 def is_signed(self):
221 return True
222
223 @classmethod
224 def storage_type(cls):
225 return IntStorage
226
227
228 class ShortTensor(_C.ShortTensorBase, _TensorBase):
229
230 def is_signed(self):
231 return True
232
233 @classmethod
234 def storage_type(cls):
235 return ShortStorage
236
237
238 class CharTensor(_C.CharTensorBase, _TensorBase):
239
240 def is_signed(self):
241 # TODO
242 return False
243
244 @classmethod
245 def storage_type(cls):
246 return CharStorage
247
248
249 class ByteTensor(_C.ByteTensorBase, _TensorBase):
250
251 def is_signed(self):
252 return False
253
254 @classmethod
255 def storage_type(cls):
256 return ByteStorage
257
258
259 _storage_classes = {
260 DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,
261 CharStorage, ByteStorage,
262 }
263
264 _tensor_classes = {
265 DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor,
266 CharTensor, ByteTensor,
267 }
268
269
270 set_default_tensor_type('torch.FloatTensor')
271
272 ################################################################################
273 # Import interface functions defined in Python
274 ################################################################################
275
276 from .functional import *
277
278 ################################################################################
279 # Initialize extension
280 ################################################################################
281
282 # Shared memory manager needs to know the exact location of manager executable
283 import os
284 manager_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')
285 if sys.version_info[0] >= 3:
286 manager_path = bytes(manager_path, 'ascii')
287
288 _C._initExtension(manager_path)
289
290 del os
291 del manager_path
292
293 ################################################################################
294 # Remove unnecessary members
295 ################################################################################
296
297 del DoubleStorageBase
298 del FloatStorageBase
299 del LongStorageBase
300 del IntStorageBase
301 del ShortStorageBase
302 del CharStorageBase
303 del ByteStorageBase
304 del DoubleTensorBase
305 del FloatTensorBase
306 del LongTensorBase
307 del IntTensorBase
308 del ShortTensorBase
309 del CharTensorBase
310 del ByteTensorBase
311
312 del SparseDoubleTensorBase
313 del SparseFloatTensorBase
314 del SparseLongTensorBase
315 del SparseIntTensorBase
316 del SparseShortTensorBase
317 del SparseCharTensorBase
318 del SparseByteTensorBase
319
320 ################################################################################
321 # Import most common subpackages
322 ################################################################################
323
324 import torch.cuda
325 import torch.autograd
326 import torch.nn
327 import torch.optim
328 import torch.multiprocessing
329
330 # attach docstrings to torch and tensor functions
331 from . import _torch_docs, _tensor_docs
332 del _torch_docs, _tensor_docs
333
[end of torch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torch/__init__.py b/torch/__init__.py
--- a/torch/__init__.py
+++ b/torch/__init__.py
@@ -31,6 +31,13 @@
# automatically filled by the dynamic loader.
import os as _dl_flags
+# if we have numpy, it *must* be imported before the call to setdlopenflags()
+# or there is risk that later c modules will segfault when importing numpy
+try:
+ import numpy as np
+except:
+ pass
+
# first check if the os package has the required flags
if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'):
try:
| {"golden_diff": "diff --git a/torch/__init__.py b/torch/__init__.py\n--- a/torch/__init__.py\n+++ b/torch/__init__.py\n@@ -31,6 +31,13 @@\n # automatically filled by the dynamic loader.\n import os as _dl_flags\n \n+# if we have numpy, it *must* be imported before the call to setdlopenflags()\n+# or there is risk that later c modules will segfault when importing numpy\n+try:\n+ import numpy as np\n+except:\n+ pass\n+\n # first check if the os package has the required flags\n if not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'):\n try:\n", "issue": "Importing scipy.misc after torch causes segfault\nAs shown in below. Is this something expected?\r\nThis seems most closely related to https://github.com/pytorch/pytorch/issues/595, but it is closed without any specific solutions.\r\n```\r\n(venv) $ pip freeze\r\nappdirs==1.4.0\r\nnumpy==1.12.0\r\npackaging==16.8\r\npyparsing==2.1.10\r\nPyYAML==3.12\r\nscipy==0.18.1\r\nsix==1.10.0\r\ntorch==0.1.9.post2\r\n\r\n(venv) $ python -c 'import torch, scipy.misc'\r\nSegmentation fault (core dumped)\r\n\r\n(venv) $ python -c 'import scipy.misc, torch'\r\n\r\n(venv) $ echo 'import torch, scipy.misc' > spam.py\r\n\r\n(venv) $ python spam.py\r\nSegmentation fault (core dumped)\r\n\r\n(venv) $ gdb --args python spam.py\r\nGNU gdb (Ubuntu 7.7.1-0ubuntu5~14.04.2) 7.7.1\r\nCopyright (C) 2014 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law. Type \"show copying\"\r\nand \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<http://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n<http://www.gnu.org/software/gdb/documentation/>.\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\"...\r\nReading symbols from python...(no debugging symbols found)...done.\r\n(gdb) run\r\nStarting program: /tmp/venv/bin/python spam.py\r\n[Thread debugging using libthread_db enabled]\r\nUsing host libthread_db library \"/lib/x86_64-linux-gnu/libthread_db.so.1\".\r\n[New Thread 0x7fffc7088700 (LWP 5549)]\r\n[New Thread 0x7fffc6887700 (LWP 5550)]\r\n[New Thread 0x7fffc4086700 (LWP 5551)]\r\n[New Thread 0x7fffc1885700 (LWP 5552)]\r\n[New Thread 0x7fffbf084700 (LWP 5553)]\r\n[New Thread 0x7fffbc883700 (LWP 5554)]\r\n[New Thread 0x7fffba082700 (LWP 5555)]\r\n[New Thread 0x7fffb7881700 (LWP 5556)]\r\n[New Thread 0x7fffb5080700 (LWP 5557)]\r\n[New Thread 0x7fffb287f700 (LWP 5558)]\r\n[New Thread 0x7fffb007e700 (LWP 5559)]\r\n\r\nProgram received signal SIGSEGV, Segmentation fault.\r\n0x00007fffc9d65fc0 in PyArray_API () from /tmp/venv/local/lib/python2.7/site-packages/numpy/core/multiarray.so\r\n```\n", "before_files": [{"content": "\"\"\"\nThe torch package contains data structures for multi-dimensional\ntensors and mathematical operations over these are defined.\nAdditionally, it provides many utilities for efficient serializing of\nTensors and arbitrary types, and other useful utilities.\n\nIt has a CUDA counterpart, that enables you to run your tensor computations\non an NVIDIA GPU with compute capability >= 2.0.\n\"\"\"\n\nimport sys\nfrom ._utils import _import_dotted_name\nfrom .version import __version__\n\n__all__ = [\n 'typename', 'is_tensor', 'is_storage', 'set_default_tensor_type',\n 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed',\n 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack',\n 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',\n 'ShortStorage', 'CharStorage', 'ByteStorage',\n 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',\n 'ShortTensor', 'CharTensor', 'ByteTensor',\n]\n\n################################################################################\n# Load the extension module\n################################################################################\n\n# Loading the extension with RTLD_GLOBAL option allows to not link extension\n# modules against the _C shared object. Their missing THP symbols will be\n# automatically filled by the dynamic loader.\nimport os as _dl_flags\n\n# first check if the os package has the required flags\nif not hasattr(_dl_flags, 'RTLD_GLOBAL') or not hasattr(_dl_flags, 'RTLD_NOW'):\n try:\n # next try if DLFCN exists\n import DLFCN as _dl_flags\n except ImportError:\n # as a last attempt, use compile-time constants\n import torch._dl as _dl_flags\n\nold_flags = sys.getdlopenflags()\nsys.setdlopenflags(_dl_flags.RTLD_GLOBAL | _dl_flags.RTLD_NOW)\n\nfrom torch._C import *\n\n__all__ += [name for name in dir(_C)\n if name[0] != '_' and\n not name.endswith('Base')]\n\nsys.setdlopenflags(old_flags)\ndel _dl_flags\ndel old_flags\n\n################################################################################\n# Define basic utilities\n################################################################################\n\n\ndef typename(o):\n module = ''\n class_name = ''\n if hasattr(o, '__module__') and o.__module__ != 'builtins' \\\n and o.__module__ != '__builtin__' and o.__module__ is not None:\n module = o.__module__ + '.'\n\n if hasattr(o, '__qualname__'):\n class_name = o.__qualname__\n elif hasattr(o, '__name__'):\n class_name = o.__name__\n else:\n class_name = o.__class__.__name__\n\n return module + class_name\n\n\ndef is_tensor(obj):\n r\"\"\"Returns True if `obj` is a pytorch tensor.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return obj.__class__ in _tensor_classes\n\n\ndef is_storage(obj):\n r\"\"\"Returns True if `obj` is a pytorch storage object.\n\n Args:\n obj (Object): Object to test\n \"\"\"\n return obj.__class__ in _storage_classes\n\n\ndef set_default_tensor_type(t):\n global Tensor\n global Storage\n Tensor = _import_dotted_name(t)\n Storage = _import_dotted_name(t.replace('Tensor', 'Storage'))\n _C._set_default_tensor_type(Tensor)\n\n\ndef set_rng_state(new_state):\n r\"\"\"Sets the random number generator state.\n\n Args:\n new_state (torch.ByteTensor): The desired state\n \"\"\"\n default_generator.set_state(new_state)\n\n\ndef get_rng_state():\n r\"\"\"Returns the random number generator state as a ByteTensor.\"\"\"\n return default_generator.get_state()\n\n\ndef manual_seed(seed):\n r\"\"\"Sets the seed for generating random numbers. And returns a\n `torch._C.Generator` object.\n\n Args:\n seed (int or long): The desired seed.\n \"\"\"\n return default_generator.manual_seed(seed)\n\n\ndef initial_seed():\n r\"\"\"Returns the initial seed for generating random numbers as a\n python `long`.\n \"\"\"\n return default_generator.initial_seed()\n\n\nfrom .serialization import save, load\nfrom ._tensor_str import set_printoptions\n\n################################################################################\n# Define Storage and Tensor classes\n################################################################################\n\nfrom .storage import _StorageBase\nfrom .tensor import _TensorBase\n\n\nclass DoubleStorage(_C.DoubleStorageBase, _StorageBase):\n pass\n\n\nclass FloatStorage(_C.FloatStorageBase, _StorageBase):\n pass\n\n\nclass HalfStorage(_C.HalfStorageBase, _StorageBase):\n pass\n\n\nclass LongStorage(_C.LongStorageBase, _StorageBase):\n pass\n\n\nclass IntStorage(_C.IntStorageBase, _StorageBase):\n pass\n\n\nclass ShortStorage(_C.ShortStorageBase, _StorageBase):\n pass\n\n\nclass CharStorage(_C.CharStorageBase, _StorageBase):\n pass\n\n\nclass ByteStorage(_C.ByteStorageBase, _StorageBase):\n pass\n\n\nclass DoubleTensor(_C.DoubleTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return DoubleStorage\n\n\nclass FloatTensor(_C.FloatTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return FloatStorage\n\n\nclass HalfTensor(_C.HalfTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return HalfStorage\n\n\nclass LongTensor(_C.LongTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return LongStorage\n\n\nclass IntTensor(_C.IntTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return IntStorage\n\n\nclass ShortTensor(_C.ShortTensorBase, _TensorBase):\n\n def is_signed(self):\n return True\n\n @classmethod\n def storage_type(cls):\n return ShortStorage\n\n\nclass CharTensor(_C.CharTensorBase, _TensorBase):\n\n def is_signed(self):\n # TODO\n return False\n\n @classmethod\n def storage_type(cls):\n return CharStorage\n\n\nclass ByteTensor(_C.ByteTensorBase, _TensorBase):\n\n def is_signed(self):\n return False\n\n @classmethod\n def storage_type(cls):\n return ByteStorage\n\n\n_storage_classes = {\n DoubleStorage, FloatStorage, LongStorage, IntStorage, ShortStorage,\n CharStorage, ByteStorage,\n}\n\n_tensor_classes = {\n DoubleTensor, FloatTensor, LongTensor, IntTensor, ShortTensor,\n CharTensor, ByteTensor,\n}\n\n\nset_default_tensor_type('torch.FloatTensor')\n\n################################################################################\n# Import interface functions defined in Python\n################################################################################\n\nfrom .functional import *\n\n################################################################################\n# Initialize extension\n################################################################################\n\n# Shared memory manager needs to know the exact location of manager executable\nimport os\nmanager_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib', 'torch_shm_manager')\nif sys.version_info[0] >= 3:\n manager_path = bytes(manager_path, 'ascii')\n\n_C._initExtension(manager_path)\n\ndel os\ndel manager_path\n\n################################################################################\n# Remove unnecessary members\n################################################################################\n\ndel DoubleStorageBase\ndel FloatStorageBase\ndel LongStorageBase\ndel IntStorageBase\ndel ShortStorageBase\ndel CharStorageBase\ndel ByteStorageBase\ndel DoubleTensorBase\ndel FloatTensorBase\ndel LongTensorBase\ndel IntTensorBase\ndel ShortTensorBase\ndel CharTensorBase\ndel ByteTensorBase\n\ndel SparseDoubleTensorBase\ndel SparseFloatTensorBase\ndel SparseLongTensorBase\ndel SparseIntTensorBase\ndel SparseShortTensorBase\ndel SparseCharTensorBase\ndel SparseByteTensorBase\n\n################################################################################\n# Import most common subpackages\n################################################################################\n\nimport torch.cuda\nimport torch.autograd\nimport torch.nn\nimport torch.optim\nimport torch.multiprocessing\n\n# attach docstrings to torch and tensor functions\nfrom . import _torch_docs, _tensor_docs\ndel _torch_docs, _tensor_docs\n", "path": "torch/__init__.py"}]} | 4,039 | 158 |
gh_patches_debug_41259 | rasdani/github-patches | git_diff | rucio__rucio-2603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optimize the update of the final states in the necromancer
Motivation
----------
Currently the necromancer update up-to 10000000 bad replicas states in one transaction
Modification
------------
Update by chunks
</issue>
<code>
[start of lib/rucio/rse/protocols/__init__.py]
1 # Copyright European Organization for Nuclear Research (CERN)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # You may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
6
7 # These are all protocols which are considered in good health
8 # based on generally available support libraries, native dependencies,
9 # and implementations in Rucio
10 supported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https', 'storm']
11
[end of lib/rucio/rse/protocols/__init__.py]
[start of lib/rucio/daemons/badreplicas/necromancer.py]
1 # Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Cedric Serfon <[email protected]>, 2014-2017
17 # - Vincent Garonne <[email protected]>, 2015-2018
18 # - Mario Lassnig <[email protected]>, 2015
19 # - Wen Guan <[email protected]>, 2015
20 # - Hannes Hansen <[email protected]>, 2018-2019
21 #
22 # PY3K COMPATIBLE
23
24 import logging
25 import os
26 import socket
27 import threading
28 import time
29
30 from sys import exc_info, stdout, argv
31 from traceback import format_exception
32
33 from rucio.db.sqla.constants import ReplicaState
34 from rucio.common.config import config_get
35 from rucio.common.exception import DatabaseException
36 from rucio.core import monitor, heartbeat
37 from rucio.core.replica import list_bad_replicas, get_replicas_state, list_bad_replicas_history, update_bad_replicas_history
38 from rucio.core.rule import update_rules_for_lost_replica, update_rules_for_bad_replica
39
40
41 logging.basicConfig(stream=stdout,
42 level=getattr(logging,
43 config_get('common', 'loglevel',
44 raise_exception=False,
45 default='DEBUG').upper()),
46 format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
47
48 graceful_stop = threading.Event()
49
50
51 def necromancer(thread=0, bulk=5, once=False):
52 """
53 Creates a Necromancer Worker that gets a list of bad replicas for a given hash,
54 identify lost DIDs and for non-lost ones, set the locks and rules for reevaluation.
55
56 :param thread: Thread number at startup.
57 :param bulk: The number of requests to process.
58 :param once: Run only once.
59 """
60
61 sleep_time = 60
62 update_history_threshold = 3600
63 update_history_time = time.time()
64
65 executable = ' '.join(argv)
66 hostname = socket.getfqdn()
67 pid = os.getpid()
68 hb_thread = threading.current_thread()
69 heartbeat.sanity_check(executable=executable, hostname=hostname)
70
71 while not graceful_stop.is_set():
72
73 heart_beat = heartbeat.live(executable, hostname, pid, hb_thread)
74 prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'] + 1, heart_beat['nr_threads'])
75
76 stime = time.time()
77 replicas = []
78 try:
79 replicas = list_bad_replicas(limit=bulk, thread=heart_beat['assign_thread'], total_threads=heart_beat['nr_threads'])
80
81 for replica in replicas:
82 scope, name, rse_id, rse = replica['scope'], replica['name'], replica['rse_id'], replica['rse']
83 logging.info(prepend_str + 'Working on %s:%s on %s' % (scope, name, rse))
84
85 list_replicas = get_replicas_state(scope=scope, name=name)
86 if ReplicaState.AVAILABLE not in list_replicas and ReplicaState.TEMPORARY_UNAVAILABLE not in list_replicas:
87 logging.info(prepend_str + 'File %s:%s has no other available or temporary available replicas, it will be marked as lost' % (scope, name))
88 try:
89 update_rules_for_lost_replica(scope=scope, name=name, rse_id=rse_id, nowait=True)
90 monitor.record_counter(counters='necromancer.badfiles.lostfile', delta=1)
91 except DatabaseException as error:
92 logging.info(prepend_str + '%s' % (str(error)))
93
94 else:
95 rep = list_replicas.get(ReplicaState.AVAILABLE, [])
96 unavailable_rep = list_replicas.get(ReplicaState.TEMPORARY_UNAVAILABLE, [])
97 logging.info(prepend_str + 'File %s:%s can be recovered. Available sources : %s + Unavailable sources : %s' % (scope, name, str(rep), str(unavailable_rep)))
98 try:
99 update_rules_for_bad_replica(scope=scope, name=name, rse_id=rse_id, nowait=True)
100 monitor.record_counter(counters='necromancer.badfiles.recovering', delta=1)
101 except DatabaseException as error:
102 logging.info(prepend_str + '%s' % (str(error)))
103
104 logging.info(prepend_str + 'It took %s seconds to process %s replicas' % (str(time.time() - stime), str(len(replicas))))
105 except Exception:
106 exc_type, exc_value, exc_traceback = exc_info()
107 logging.critical(prepend_str + ''.join(format_exception(exc_type, exc_value, exc_traceback)).strip())
108
109 if once:
110 break
111 else:
112 now = time.time()
113 if (now - update_history_time) > update_history_threshold:
114 logging.info(prepend_str + 'Last update of history table %s seconds ago. Running update.' % (now - update_history_time))
115 bad_replicas = list_bad_replicas_history(limit=10000000,
116 thread=heart_beat['assign_thread'],
117 total_threads=heart_beat['nr_threads'])
118 for rse_id in bad_replicas:
119 update_bad_replicas_history(bad_replicas[rse_id], rse_id)
120 logging.info(prepend_str + 'History table updated in %s seconds' % (time.time() - now))
121 update_history_time = time.time()
122
123 tottime = time.time() - stime
124 if len(replicas) == bulk:
125 logging.info(prepend_str + 'Processed maximum number of replicas according to the bulk size. Restart immediately next cycle')
126 elif tottime < sleep_time:
127 logging.info(prepend_str + 'Will sleep for %s seconds' % (str(sleep_time - tottime)))
128 time.sleep(sleep_time - tottime)
129 continue
130
131 logging.info(prepend_str + 'Graceful stop requested')
132 heartbeat.die(executable, hostname, pid, hb_thread)
133 logging.info(prepend_str + 'Graceful stop done')
134
135
136 def run(threads=1, bulk=100, once=False):
137 """
138 Starts up the necromancer threads.
139 """
140
141 if once:
142 logging.info('Will run only one iteration in a single threaded mode')
143 necromancer(bulk=bulk, once=once)
144 else:
145 logging.info('starting necromancer threads')
146 thread_list = [threading.Thread(target=necromancer, kwargs={'once': once,
147 'thread': i,
148 'bulk': bulk}) for i in range(0, threads)]
149 [t.start() for t in thread_list]
150
151 logging.info('waiting for interrupts')
152
153 # Interruptible joins require a timeout.
154 while thread_list:
155 thread_list = [thread.join(timeout=3.14) for thread in thread_list if thread and thread.isAlive()]
156
157
158 def stop(signum=None, frame=None):
159 """
160 Graceful exit.
161 """
162 graceful_stop.set()
163
[end of lib/rucio/daemons/badreplicas/necromancer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/daemons/badreplicas/necromancer.py b/lib/rucio/daemons/badreplicas/necromancer.py
--- a/lib/rucio/daemons/badreplicas/necromancer.py
+++ b/lib/rucio/daemons/badreplicas/necromancer.py
@@ -13,7 +13,7 @@
# limitations under the License.
#
# Authors:
-# - Cedric Serfon <[email protected]>, 2014-2017
+# - Cedric Serfon <[email protected]>, 2014-2019
# - Vincent Garonne <[email protected]>, 2015-2018
# - Mario Lassnig <[email protected]>, 2015
# - Wen Guan <[email protected]>, 2015
@@ -21,17 +21,21 @@
#
# PY3K COMPATIBLE
+from __future__ import division
+
import logging
import os
import socket
import threading
import time
+from math import ceil
from sys import exc_info, stdout, argv
from traceback import format_exception
from rucio.db.sqla.constants import ReplicaState
from rucio.common.config import config_get
+from rucio.common.utils import chunks
from rucio.common.exception import DatabaseException
from rucio.core import monitor, heartbeat
from rucio.core.replica import list_bad_replicas, get_replicas_state, list_bad_replicas_history, update_bad_replicas_history
@@ -112,11 +116,18 @@
now = time.time()
if (now - update_history_time) > update_history_threshold:
logging.info(prepend_str + 'Last update of history table %s seconds ago. Running update.' % (now - update_history_time))
- bad_replicas = list_bad_replicas_history(limit=10000000,
+ bad_replicas = list_bad_replicas_history(limit=1000000,
thread=heart_beat['assign_thread'],
total_threads=heart_beat['nr_threads'])
for rse_id in bad_replicas:
- update_bad_replicas_history(bad_replicas[rse_id], rse_id)
+ chunk_size = 1000
+ nchunk = int(ceil(len(bad_replicas[rse_id]) / chunk_size))
+ logging.debug(prepend_str + 'Update history for rse_id %s' % (rse_id))
+ cnt = 0
+ for chunk in chunks(bad_replicas[rse_id], chunk_size):
+ logging.debug(prepend_str + ' History for rse_id %s : chunk %i/%i' % (rse_id, cnt, nchunk))
+ cnt += 1
+ update_bad_replicas_history(chunk, rse_id)
logging.info(prepend_str + 'History table updated in %s seconds' % (time.time() - now))
update_history_time = time.time()
diff --git a/lib/rucio/rse/protocols/__init__.py b/lib/rucio/rse/protocols/__init__.py
--- a/lib/rucio/rse/protocols/__init__.py
+++ b/lib/rucio/rse/protocols/__init__.py
@@ -7,4 +7,4 @@
# These are all protocols which are considered in good health
# based on generally available support libraries, native dependencies,
# and implementations in Rucio
-supported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https']
+supported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https', 'storm']
| {"golden_diff": "diff --git a/lib/rucio/daemons/badreplicas/necromancer.py b/lib/rucio/daemons/badreplicas/necromancer.py\n--- a/lib/rucio/daemons/badreplicas/necromancer.py\n+++ b/lib/rucio/daemons/badreplicas/necromancer.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n #\n # Authors:\n-# - Cedric Serfon <[email protected]>, 2014-2017\n+# - Cedric Serfon <[email protected]>, 2014-2019\n # - Vincent Garonne <[email protected]>, 2015-2018\n # - Mario Lassnig <[email protected]>, 2015\n # - Wen Guan <[email protected]>, 2015\n@@ -21,17 +21,21 @@\n #\n # PY3K COMPATIBLE\n \n+from __future__ import division\n+\n import logging\n import os\n import socket\n import threading\n import time\n \n+from math import ceil\n from sys import exc_info, stdout, argv\n from traceback import format_exception\n \n from rucio.db.sqla.constants import ReplicaState\n from rucio.common.config import config_get\n+from rucio.common.utils import chunks\n from rucio.common.exception import DatabaseException\n from rucio.core import monitor, heartbeat\n from rucio.core.replica import list_bad_replicas, get_replicas_state, list_bad_replicas_history, update_bad_replicas_history\n@@ -112,11 +116,18 @@\n now = time.time()\n if (now - update_history_time) > update_history_threshold:\n logging.info(prepend_str + 'Last update of history table %s seconds ago. Running update.' % (now - update_history_time))\n- bad_replicas = list_bad_replicas_history(limit=10000000,\n+ bad_replicas = list_bad_replicas_history(limit=1000000,\n thread=heart_beat['assign_thread'],\n total_threads=heart_beat['nr_threads'])\n for rse_id in bad_replicas:\n- update_bad_replicas_history(bad_replicas[rse_id], rse_id)\n+ chunk_size = 1000\n+ nchunk = int(ceil(len(bad_replicas[rse_id]) / chunk_size))\n+ logging.debug(prepend_str + 'Update history for rse_id %s' % (rse_id))\n+ cnt = 0\n+ for chunk in chunks(bad_replicas[rse_id], chunk_size):\n+ logging.debug(prepend_str + ' History for rse_id %s : chunk %i/%i' % (rse_id, cnt, nchunk))\n+ cnt += 1\n+ update_bad_replicas_history(chunk, rse_id)\n logging.info(prepend_str + 'History table updated in %s seconds' % (time.time() - now))\n update_history_time = time.time()\n \ndiff --git a/lib/rucio/rse/protocols/__init__.py b/lib/rucio/rse/protocols/__init__.py\n--- a/lib/rucio/rse/protocols/__init__.py\n+++ b/lib/rucio/rse/protocols/__init__.py\n@@ -7,4 +7,4 @@\n # These are all protocols which are considered in good health\n # based on generally available support libraries, native dependencies,\n # and implementations in Rucio\n-supported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https']\n+supported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https', 'storm']\n", "issue": "Optimize the update of the final states in the necromancer\nMotivation\r\n----------\r\nCurrently the necromancer update up-to 10000000 bad replicas states in one transaction\r\n\r\n\r\nModification\r\n------------\r\nUpdate by chunks\r\n\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# These are all protocols which are considered in good health\n# based on generally available support libraries, native dependencies,\n# and implementations in Rucio\nsupported_protocols = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 's3', 's3+rucio', 's3+https', 'storm']\n", "path": "lib/rucio/rse/protocols/__init__.py"}, {"content": "# Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Cedric Serfon <[email protected]>, 2014-2017\n# - Vincent Garonne <[email protected]>, 2015-2018\n# - Mario Lassnig <[email protected]>, 2015\n# - Wen Guan <[email protected]>, 2015\n# - Hannes Hansen <[email protected]>, 2018-2019\n#\n# PY3K COMPATIBLE\n\nimport logging\nimport os\nimport socket\nimport threading\nimport time\n\nfrom sys import exc_info, stdout, argv\nfrom traceback import format_exception\n\nfrom rucio.db.sqla.constants import ReplicaState\nfrom rucio.common.config import config_get\nfrom rucio.common.exception import DatabaseException\nfrom rucio.core import monitor, heartbeat\nfrom rucio.core.replica import list_bad_replicas, get_replicas_state, list_bad_replicas_history, update_bad_replicas_history\nfrom rucio.core.rule import update_rules_for_lost_replica, update_rules_for_bad_replica\n\n\nlogging.basicConfig(stream=stdout,\n level=getattr(logging,\n config_get('common', 'loglevel',\n raise_exception=False,\n default='DEBUG').upper()),\n format='%(asctime)s\\t%(process)d\\t%(levelname)s\\t%(message)s')\n\ngraceful_stop = threading.Event()\n\n\ndef necromancer(thread=0, bulk=5, once=False):\n \"\"\"\n Creates a Necromancer Worker that gets a list of bad replicas for a given hash,\n identify lost DIDs and for non-lost ones, set the locks and rules for reevaluation.\n\n :param thread: Thread number at startup.\n :param bulk: The number of requests to process.\n :param once: Run only once.\n \"\"\"\n\n sleep_time = 60\n update_history_threshold = 3600\n update_history_time = time.time()\n\n executable = ' '.join(argv)\n hostname = socket.getfqdn()\n pid = os.getpid()\n hb_thread = threading.current_thread()\n heartbeat.sanity_check(executable=executable, hostname=hostname)\n\n while not graceful_stop.is_set():\n\n heart_beat = heartbeat.live(executable, hostname, pid, hb_thread)\n prepend_str = 'Thread [%i/%i] : ' % (heart_beat['assign_thread'] + 1, heart_beat['nr_threads'])\n\n stime = time.time()\n replicas = []\n try:\n replicas = list_bad_replicas(limit=bulk, thread=heart_beat['assign_thread'], total_threads=heart_beat['nr_threads'])\n\n for replica in replicas:\n scope, name, rse_id, rse = replica['scope'], replica['name'], replica['rse_id'], replica['rse']\n logging.info(prepend_str + 'Working on %s:%s on %s' % (scope, name, rse))\n\n list_replicas = get_replicas_state(scope=scope, name=name)\n if ReplicaState.AVAILABLE not in list_replicas and ReplicaState.TEMPORARY_UNAVAILABLE not in list_replicas:\n logging.info(prepend_str + 'File %s:%s has no other available or temporary available replicas, it will be marked as lost' % (scope, name))\n try:\n update_rules_for_lost_replica(scope=scope, name=name, rse_id=rse_id, nowait=True)\n monitor.record_counter(counters='necromancer.badfiles.lostfile', delta=1)\n except DatabaseException as error:\n logging.info(prepend_str + '%s' % (str(error)))\n\n else:\n rep = list_replicas.get(ReplicaState.AVAILABLE, [])\n unavailable_rep = list_replicas.get(ReplicaState.TEMPORARY_UNAVAILABLE, [])\n logging.info(prepend_str + 'File %s:%s can be recovered. Available sources : %s + Unavailable sources : %s' % (scope, name, str(rep), str(unavailable_rep)))\n try:\n update_rules_for_bad_replica(scope=scope, name=name, rse_id=rse_id, nowait=True)\n monitor.record_counter(counters='necromancer.badfiles.recovering', delta=1)\n except DatabaseException as error:\n logging.info(prepend_str + '%s' % (str(error)))\n\n logging.info(prepend_str + 'It took %s seconds to process %s replicas' % (str(time.time() - stime), str(len(replicas))))\n except Exception:\n exc_type, exc_value, exc_traceback = exc_info()\n logging.critical(prepend_str + ''.join(format_exception(exc_type, exc_value, exc_traceback)).strip())\n\n if once:\n break\n else:\n now = time.time()\n if (now - update_history_time) > update_history_threshold:\n logging.info(prepend_str + 'Last update of history table %s seconds ago. Running update.' % (now - update_history_time))\n bad_replicas = list_bad_replicas_history(limit=10000000,\n thread=heart_beat['assign_thread'],\n total_threads=heart_beat['nr_threads'])\n for rse_id in bad_replicas:\n update_bad_replicas_history(bad_replicas[rse_id], rse_id)\n logging.info(prepend_str + 'History table updated in %s seconds' % (time.time() - now))\n update_history_time = time.time()\n\n tottime = time.time() - stime\n if len(replicas) == bulk:\n logging.info(prepend_str + 'Processed maximum number of replicas according to the bulk size. Restart immediately next cycle')\n elif tottime < sleep_time:\n logging.info(prepend_str + 'Will sleep for %s seconds' % (str(sleep_time - tottime)))\n time.sleep(sleep_time - tottime)\n continue\n\n logging.info(prepend_str + 'Graceful stop requested')\n heartbeat.die(executable, hostname, pid, hb_thread)\n logging.info(prepend_str + 'Graceful stop done')\n\n\ndef run(threads=1, bulk=100, once=False):\n \"\"\"\n Starts up the necromancer threads.\n \"\"\"\n\n if once:\n logging.info('Will run only one iteration in a single threaded mode')\n necromancer(bulk=bulk, once=once)\n else:\n logging.info('starting necromancer threads')\n thread_list = [threading.Thread(target=necromancer, kwargs={'once': once,\n 'thread': i,\n 'bulk': bulk}) for i in range(0, threads)]\n [t.start() for t in thread_list]\n\n logging.info('waiting for interrupts')\n\n # Interruptible joins require a timeout.\n while thread_list:\n thread_list = [thread.join(timeout=3.14) for thread in thread_list if thread and thread.isAlive()]\n\n\ndef stop(signum=None, frame=None):\n \"\"\"\n Graceful exit.\n \"\"\"\n graceful_stop.set()\n", "path": "lib/rucio/daemons/badreplicas/necromancer.py"}]} | 2,863 | 892 |
gh_patches_debug_43198 | rasdani/github-patches | git_diff | pallets__werkzeug-1284 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feature request] werkzeug.contrib.profiler.ProfilerMiddleware custom file name formatting
It would be nice to allow users to customize the output file name format when using the ProfilerMiddleware.
Currently, the file name format is `'%(method)s.%(path)s.%(elapsed)06dms.%(time)d'`, which when multiple invocations have occurred leads to the profile files being ordered by their respective elapsed times. Having the ability to customize this format allows profiling users to make use of the resultant profile files in additional ways.
I propose the `profiler.ProfilerMiddleware` is changed to allow users to customize the format of the profile filenames for their specific needs.
</issue>
<code>
[start of werkzeug/contrib/profiler.py]
1 # -*- coding: utf-8 -*-
2 """
3 werkzeug.contrib.profiler
4 ~~~~~~~~~~~~~~~~~~~~~~~~~
5
6 This module provides a simple WSGI profiler middleware for finding
7 bottlenecks in web application. It uses the :mod:`profile` or
8 :mod:`cProfile` module to do the profiling and writes the stats to the
9 stream provided (defaults to stderr).
10
11 Example usage::
12
13 from werkzeug.contrib.profiler import ProfilerMiddleware
14 app = ProfilerMiddleware(app)
15
16 :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
17 :license: BSD, see LICENSE for more details.
18 """
19 import sys
20 import time
21 import os.path
22 try:
23 try:
24 from cProfile import Profile
25 except ImportError:
26 from profile import Profile
27 from pstats import Stats
28 available = True
29 except ImportError:
30 available = False
31
32
33 class MergeStream(object):
34
35 """An object that redirects `write` calls to multiple streams.
36 Use this to log to both `sys.stdout` and a file::
37
38 f = open('profiler.log', 'w')
39 stream = MergeStream(sys.stdout, f)
40 profiler = ProfilerMiddleware(app, stream)
41 """
42
43 def __init__(self, *streams):
44 if not streams:
45 raise TypeError('at least one stream must be given')
46 self.streams = streams
47
48 def write(self, data):
49 for stream in self.streams:
50 stream.write(data)
51
52
53 class ProfilerMiddleware(object):
54
55 """Simple profiler middleware. Wraps a WSGI application and profiles
56 a request. This intentionally buffers the response so that timings are
57 more exact.
58
59 By giving the `profile_dir` argument, pstat.Stats files are saved to that
60 directory, one file per request. Without it, a summary is printed to
61 `stream` instead.
62
63 For the exact meaning of `sort_by` and `restrictions` consult the
64 :mod:`profile` documentation.
65
66 .. versionadded:: 0.9
67 Added support for `restrictions` and `profile_dir`.
68
69 :param app: the WSGI application to profile.
70 :param stream: the stream for the profiled stats. defaults to stderr.
71 :param sort_by: a tuple of columns to sort the result by.
72 :param restrictions: a tuple of profiling strictions, not used if dumping
73 to `profile_dir`.
74 :param profile_dir: directory name to save pstat files
75 """
76
77 def __init__(self, app, stream=None,
78 sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
79 if not available:
80 raise RuntimeError('the profiler is not available because '
81 'profile or pstat is not installed.')
82 self._app = app
83 self._stream = stream or sys.stdout
84 self._sort_by = sort_by
85 self._restrictions = restrictions
86 self._profile_dir = profile_dir
87
88 def __call__(self, environ, start_response):
89 response_body = []
90
91 def catching_start_response(status, headers, exc_info=None):
92 start_response(status, headers, exc_info)
93 return response_body.append
94
95 def runapp():
96 appiter = self._app(environ, catching_start_response)
97 response_body.extend(appiter)
98 if hasattr(appiter, 'close'):
99 appiter.close()
100
101 p = Profile()
102 start = time.time()
103 p.runcall(runapp)
104 body = b''.join(response_body)
105 elapsed = time.time() - start
106
107 if self._profile_dir is not None:
108 prof_filename = os.path.join(self._profile_dir,
109 '%s.%s.%06dms.%d.prof' % (
110 environ['REQUEST_METHOD'],
111 environ.get('PATH_INFO').strip(
112 '/').replace('/', '.') or 'root',
113 elapsed * 1000.0,
114 time.time()
115 ))
116 p.dump_stats(prof_filename)
117
118 else:
119 stats = Stats(p, stream=self._stream)
120 stats.sort_stats(*self._sort_by)
121
122 self._stream.write('-' * 80)
123 self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
124 stats.print_stats(*self._restrictions)
125 self._stream.write('-' * 80 + '\n\n')
126
127 return [body]
128
129
130 def make_action(app_factory, hostname='localhost', port=5000,
131 threaded=False, processes=1, stream=None,
132 sort_by=('time', 'calls'), restrictions=()):
133 """Return a new callback for :mod:`werkzeug.script` that starts a local
134 server with the profiler enabled.
135
136 ::
137
138 from werkzeug.contrib import profiler
139 action_profile = profiler.make_action(make_app)
140 """
141 def action(hostname=('h', hostname), port=('p', port),
142 threaded=threaded, processes=processes):
143 """Start a new development server."""
144 from werkzeug.serving import run_simple
145 app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
146 run_simple(hostname, port, app, False, None, threaded, processes)
147 return action
148
[end of werkzeug/contrib/profiler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/werkzeug/contrib/profiler.py b/werkzeug/contrib/profiler.py
--- a/werkzeug/contrib/profiler.py
+++ b/werkzeug/contrib/profiler.py
@@ -9,7 +9,6 @@
stream provided (defaults to stderr).
Example usage::
-
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
@@ -60,22 +59,43 @@
directory, one file per request. Without it, a summary is printed to
`stream` instead.
+ The file name format can be customized by passing
+ ``filename_format``. If it is a string, it will be formatted using
+ :meth:`str.format` with the following fields available:
+
+ - ``{method}`` - the request method; GET, POST, etc
+ - ``{path}`` - the request path or 'root' should one not exist
+ - ``{elapsed}`` - the elapsed time of the request
+ - ``{time}`` - the time of the request
+
+ If it is a callable, it will be called with the WSGI ``environ``
+ dict and should return a filename. Either way, the ``'.prof'``
+ extension will be appended to the name. The default format is
+ ``'{method}.{path}.{elapsed:06d}ms.{time:d}'``.
+
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
+ .. versionadded:: 0.15
+ Added ``profile_file_name_format``.
+
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
- :param restrictions: a tuple of profiling strictions, not used if dumping
+ :param restrictions: a tuple of profiling restrictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
+ :param filename_format: format of the filename excluding the extension.
"""
- def __init__(self, app, stream=None,
- sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
+ def __init__(
+ self, app, stream=None,
+ sort_by=('time', 'calls'), restrictions=(), profile_dir=None,
+ filename_format='%(method)s.%(path)s.%(elapsed)06dms.%(time)d'
+ ):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
@@ -84,6 +104,7 @@
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
+ self._filename_format = filename_format
def __call__(self, environ, start_response):
response_body = []
@@ -105,14 +126,18 @@
elapsed = time.time() - start
if self._profile_dir is not None:
- prof_filename = os.path.join(self._profile_dir,
- '%s.%s.%06dms.%d.prof' % (
- environ['REQUEST_METHOD'],
- environ.get('PATH_INFO').strip(
- '/').replace('/', '.') or 'root',
- elapsed * 1000.0,
- time.time()
- ))
+ if callable(self._filename_format):
+ filename = self._filename_format(environ)
+ else:
+ filename = self._filename_format.format(
+ method=environ['REQUEST_METHOD'],
+ path=(
+ environ.get('PATH_INFO').strip('/').replace('/', '.')
+ or 'root'),
+ elapsed=elapsed * 1000.0,
+ time=time.time(),
+ )
+ prof_filename = os.path.join(self._profile_dir, filename + '.prof')
p.dump_stats(prof_filename)
else:
| {"golden_diff": "diff --git a/werkzeug/contrib/profiler.py b/werkzeug/contrib/profiler.py\n--- a/werkzeug/contrib/profiler.py\n+++ b/werkzeug/contrib/profiler.py\n@@ -9,7 +9,6 @@\n stream provided (defaults to stderr).\n \n Example usage::\n-\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n \n@@ -60,22 +59,43 @@\n directory, one file per request. Without it, a summary is printed to\n `stream` instead.\n \n+ The file name format can be customized by passing\n+ ``filename_format``. If it is a string, it will be formatted using\n+ :meth:`str.format` with the following fields available:\n+\n+ - ``{method}`` - the request method; GET, POST, etc\n+ - ``{path}`` - the request path or 'root' should one not exist\n+ - ``{elapsed}`` - the elapsed time of the request\n+ - ``{time}`` - the time of the request\n+\n+ If it is a callable, it will be called with the WSGI ``environ``\n+ dict and should return a filename. Either way, the ``'.prof'``\n+ extension will be appended to the name. The default format is\n+ ``'{method}.{path}.{elapsed:06d}ms.{time:d}'``.\n+\n For the exact meaning of `sort_by` and `restrictions` consult the\n :mod:`profile` documentation.\n \n .. versionadded:: 0.9\n Added support for `restrictions` and `profile_dir`.\n \n+ .. versionadded:: 0.15\n+ Added ``profile_file_name_format``.\n+\n :param app: the WSGI application to profile.\n :param stream: the stream for the profiled stats. defaults to stderr.\n :param sort_by: a tuple of columns to sort the result by.\n- :param restrictions: a tuple of profiling strictions, not used if dumping\n+ :param restrictions: a tuple of profiling restrictions, not used if dumping\n to `profile_dir`.\n :param profile_dir: directory name to save pstat files\n+ :param filename_format: format of the filename excluding the extension.\n \"\"\"\n \n- def __init__(self, app, stream=None,\n- sort_by=('time', 'calls'), restrictions=(), profile_dir=None):\n+ def __init__(\n+ self, app, stream=None,\n+ sort_by=('time', 'calls'), restrictions=(), profile_dir=None,\n+ filename_format='%(method)s.%(path)s.%(elapsed)06dms.%(time)d'\n+ ):\n if not available:\n raise RuntimeError('the profiler is not available because '\n 'profile or pstat is not installed.')\n@@ -84,6 +104,7 @@\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n+ self._filename_format = filename_format\n \n def __call__(self, environ, start_response):\n response_body = []\n@@ -105,14 +126,18 @@\n elapsed = time.time() - start\n \n if self._profile_dir is not None:\n- prof_filename = os.path.join(self._profile_dir,\n- '%s.%s.%06dms.%d.prof' % (\n- environ['REQUEST_METHOD'],\n- environ.get('PATH_INFO').strip(\n- '/').replace('/', '.') or 'root',\n- elapsed * 1000.0,\n- time.time()\n- ))\n+ if callable(self._filename_format):\n+ filename = self._filename_format(environ)\n+ else:\n+ filename = self._filename_format.format(\n+ method=environ['REQUEST_METHOD'],\n+ path=(\n+ environ.get('PATH_INFO').strip('/').replace('/', '.')\n+ or 'root'),\n+ elapsed=elapsed * 1000.0,\n+ time=time.time(),\n+ )\n+ prof_filename = os.path.join(self._profile_dir, filename + '.prof')\n p.dump_stats(prof_filename)\n \n else:\n", "issue": "[feature request] werkzeug.contrib.profiler.ProfilerMiddleware custom file name formatting\nIt would be nice to allow users to customize the output file name format when using the ProfilerMiddleware.\r\n\r\nCurrently, the file name format is `'%(method)s.%(path)s.%(elapsed)06dms.%(time)d'`, which when multiple invocations have occurred leads to the profile files being ordered by their respective elapsed times. Having the ability to customize this format allows profiling users to make use of the resultant profile files in additional ways.\r\n\r\nI propose the `profiler.ProfilerMiddleware` is changed to allow users to customize the format of the profile filenames for their specific needs.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.contrib.profiler\n ~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This module provides a simple WSGI profiler middleware for finding\n bottlenecks in web application. It uses the :mod:`profile` or\n :mod:`cProfile` module to do the profiling and writes the stats to the\n stream provided (defaults to stderr).\n\n Example usage::\n\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport sys\nimport time\nimport os.path\ntry:\n try:\n from cProfile import Profile\n except ImportError:\n from profile import Profile\n from pstats import Stats\n available = True\nexcept ImportError:\n available = False\n\n\nclass MergeStream(object):\n\n \"\"\"An object that redirects `write` calls to multiple streams.\n Use this to log to both `sys.stdout` and a file::\n\n f = open('profiler.log', 'w')\n stream = MergeStream(sys.stdout, f)\n profiler = ProfilerMiddleware(app, stream)\n \"\"\"\n\n def __init__(self, *streams):\n if not streams:\n raise TypeError('at least one stream must be given')\n self.streams = streams\n\n def write(self, data):\n for stream in self.streams:\n stream.write(data)\n\n\nclass ProfilerMiddleware(object):\n\n \"\"\"Simple profiler middleware. Wraps a WSGI application and profiles\n a request. This intentionally buffers the response so that timings are\n more exact.\n\n By giving the `profile_dir` argument, pstat.Stats files are saved to that\n directory, one file per request. Without it, a summary is printed to\n `stream` instead.\n\n For the exact meaning of `sort_by` and `restrictions` consult the\n :mod:`profile` documentation.\n\n .. versionadded:: 0.9\n Added support for `restrictions` and `profile_dir`.\n\n :param app: the WSGI application to profile.\n :param stream: the stream for the profiled stats. defaults to stderr.\n :param sort_by: a tuple of columns to sort the result by.\n :param restrictions: a tuple of profiling strictions, not used if dumping\n to `profile_dir`.\n :param profile_dir: directory name to save pstat files\n \"\"\"\n\n def __init__(self, app, stream=None,\n sort_by=('time', 'calls'), restrictions=(), profile_dir=None):\n if not available:\n raise RuntimeError('the profiler is not available because '\n 'profile or pstat is not installed.')\n self._app = app\n self._stream = stream or sys.stdout\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n\n def __call__(self, environ, start_response):\n response_body = []\n\n def catching_start_response(status, headers, exc_info=None):\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp():\n appiter = self._app(environ, catching_start_response)\n response_body.extend(appiter)\n if hasattr(appiter, 'close'):\n appiter.close()\n\n p = Profile()\n start = time.time()\n p.runcall(runapp)\n body = b''.join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n prof_filename = os.path.join(self._profile_dir,\n '%s.%s.%06dms.%d.prof' % (\n environ['REQUEST_METHOD'],\n environ.get('PATH_INFO').strip(\n '/').replace('/', '.') or 'root',\n elapsed * 1000.0,\n time.time()\n ))\n p.dump_stats(prof_filename)\n\n else:\n stats = Stats(p, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n\n self._stream.write('-' * 80)\n self._stream.write('\\nPATH: %r\\n' % environ.get('PATH_INFO'))\n stats.print_stats(*self._restrictions)\n self._stream.write('-' * 80 + '\\n\\n')\n\n return [body]\n\n\ndef make_action(app_factory, hostname='localhost', port=5000,\n threaded=False, processes=1, stream=None,\n sort_by=('time', 'calls'), restrictions=()):\n \"\"\"Return a new callback for :mod:`werkzeug.script` that starts a local\n server with the profiler enabled.\n\n ::\n\n from werkzeug.contrib import profiler\n action_profile = profiler.make_action(make_app)\n \"\"\"\n def action(hostname=('h', hostname), port=('p', port),\n threaded=threaded, processes=processes):\n \"\"\"Start a new development server.\"\"\"\n from werkzeug.serving import run_simple\n app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)\n run_simple(hostname, port, app, False, None, threaded, processes)\n return action\n", "path": "werkzeug/contrib/profiler.py"}]} | 2,142 | 935 |
gh_patches_debug_3226 | rasdani/github-patches | git_diff | conan-io__conan-center-index-352 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[spdlog] spdlog/1.4.2: header_only=True is broken.
This configuration was never tested for 1.4.2, neither here nor in the previous bincrafters edition.
1. (This is the error seen in the attached log.) Not an expert on how conandata.yml trick-shots work, but I think on conanfile.py:77 `tools.patch(tools.get(**self.conan_data["patches"][self.version]))` should be `tools.patch(**self.conan_data["patches"][self.version])`. Not sure how that one happened (controls Cee and Vee...). If I fix that, we get past this, but
2. The patch file it uses no longer works, and this is the case as well in the original `bincrafters/stable` edition, because everyone just bumps up version numbers without checking. ππ
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **spdlog/1.4.2**
* Operating System+version: **Mac OS Mojave**
* Compiler+version: **n/a**
* Docker image: **n/a**
* Conan version: **conan 1.20.0**
* Python version: **Python 3.7.5**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
compiler=apple-clang
compiler.libcxx=libc++
compiler.version=10.0
os=Macos
os_build=Macos
[options]
header_only=True
[build_requires]
[env]
```
### Steps to reproduce (Include if Applicable)
$ conan install spdlog/1.4.2@ -o header_only=True --build missing
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
Configuration:
[settings]
arch=x86_64
arch_build=x86_64
build_type=Release
compiler=apple-clang
compiler.libcxx=libc++
compiler.version=10.0
os=Macos
os_build=Macos
[options]
header_only=True
[build_requires]
[env]
Installing package: spdlog/1.4.2
Requirements
fmt/6.0.0 from 'conan-center' - Cache
spdlog/1.4.2 from 'conan-center' - Cache
Packages
fmt/6.0.0:853c4b61e2571e98cd7b854c1cda6bc111b8b32c - Cache
spdlog/1.4.2:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Build
fmt/6.0.0: Already installed!
Downloading conan_sources.tgz: 100%|##########| 1.34k/1.34k [00:00<00:00, 2.73MB/s]
Decompressing conan_sources.tgz: 100%|##########| 1.34k/1.34k [00:00<00:00, 641kB/s]
spdlog/1.4.2: Configuring sources in /Users/martin/.conan/data/spdlog/1.4.2/_/_/source
Downloading v1.4.2.tar.gz: 100%|##########| 254k/254k [00:00<00:00, 3.37MB/s]
spdlog/1.4.2: Copying sources to build folder
spdlog/1.4.2: Building your package in /Users/martin/.conan/data/spdlog/1.4.2/_/_/build/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
spdlog/1.4.2: Generator cmake created conanbuildinfo.cmake
spdlog/1.4.2: Generator cmake_find_package created Findfmt.cmake
spdlog/1.4.2: Calling build()
spdlog/1.4.2:
spdlog/1.4.2: ERROR: Package '5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9' build failed
spdlog/1.4.2: WARN: Build folder /Users/martin/.conan/data/spdlog/1.4.2/_/_/build/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9
ERROR: spdlog/1.4.2: Error in build() method, line 77
tools.patch(tools.get(**self.conan_data["patches"][self.version]))
TypeError: get() got an unexpected keyword argument 'base_path'
```
</details>
</issue>
<code>
[start of recipes/spdlog/1.4.x/conanfile.py]
1 import os
2 from conans import CMake, ConanFile, tools
3 from conans.errors import ConanInvalidConfiguration
4
5
6 class SpdlogConan(ConanFile):
7 name = "spdlog"
8 description = "Fast C++ logging library"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://github.com/gabime/spdlog"
11 topics = ("conan", "spdlog", "logging", "header-only")
12 license = "MIT"
13 exports_sources = ["CMakeLists.txt", "patches/*"]
14 generators = "cmake", "cmake_find_package"
15 settings = "os", "arch", "compiler", "build_type"
16 options = {"shared": [True, False],
17 "fPIC": [True, False],
18 "header_only": [True, False],
19 "wchar_support": [True, False],
20 "wchar_filenames": [True, False],
21 "no_exceptions": [True, False]}
22 default_options = {"shared": False,
23 "fPIC": True,
24 "header_only": False,
25 "wchar_support": False,
26 "wchar_filenames": False,
27 "no_exceptions": False}
28
29 @property
30 def _source_subfolder(self):
31 return "source_subfolder"
32
33 def config_options(self):
34 if self.settings.os == "Windows":
35 del self.options.fPIC
36
37 def configure(self):
38 if self.options.header_only:
39 del self.options.shared
40 del self.options.fPIC
41 elif self.settings.os == "Windows" and self.options.shared:
42 raise ConanInvalidConfiguration("spdlog shared lib is not yet supported under windows")
43 if self.settings.os != "Windows" and \
44 (self.options.wchar_support or self.options.wchar_filenames):
45 raise ConanInvalidConfiguration("wchar is not yet supported under windows")
46
47 def requirements(self):
48 self.requires("fmt/6.0.0")
49
50 def source(self):
51 tools.get(**self.conan_data["sources"][self.version])
52 extracted_dir = self.name + "-" + self.version
53 os.rename(extracted_dir, self._source_subfolder)
54
55 def _configure_cmake(self):
56 cmake = CMake(self)
57 cmake.definitions["SPDLOG_BUILD_EXAMPLE"] = False
58 cmake.definitions["SPDLOG_BUILD_EXAMPLE_HO"] = False
59 cmake.definitions["SPDLOG_BUILD_TESTS"] = False
60 cmake.definitions["SPDLOG_BUILD_TESTS_HO"] = False
61 cmake.definitions["SPDLOG_BUILD_BENCH"] = False
62 cmake.definitions["SPDLOG_FMT_EXTERNAL"] = True
63 cmake.definitions["SPDLOG_BUILD_SHARED"] = not self.options.header_only and self.options.shared
64 cmake.definitions["SPDLOG_WCHAR_SUPPORT"] = self.options.wchar_support
65 cmake.definitions["SPDLOG_WCHAR_FILENAMES"] = self.options.wchar_filenames
66 cmake.definitions["SPDLOG_INSTALL"] = True
67 cmake.definitions["SPDLOG_NO_EXCEPTIONS"] = self.options.no_exceptions
68 cmake.configure()
69 return cmake
70
71 def _disable_werror(self):
72 tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "utils.cmake"), "/WX", "")
73
74 def build(self):
75 self._disable_werror()
76 if self.options.header_only:
77 tools.patch(tools.get(**self.conan_data["patches"][self.version]))
78 cmake = self._configure_cmake()
79 cmake.build()
80
81 def package(self):
82 self.copy("LICENSE", dst='licenses', src=self._source_subfolder)
83 cmake = self._configure_cmake()
84 cmake.install()
85 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
86 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
87 tools.rmdir(os.path.join(self.package_folder, "lib", "spdlog", "cmake"))
88
89 def package_id(self):
90 if self.options.header_only:
91 self.info.header_only()
92
93 def package_info(self):
94 if self.options.header_only:
95 self.cpp_info.defines = ["SPDLOG_HEADER_ONLY", "SPDLOG_FMT_EXTERNAL"]
96 else:
97 self.cpp_info.libs = tools.collect_libs(self)
98 self.cpp_info.defines = ["SPDLOG_COMPILED_LIB", "SPDLOG_FMT_EXTERNAL"]
99 if self.options.wchar_support:
100 self.cpp_info.defines.append("SPDLOG_WCHAR_TO_UTF8_SUPPORT")
101 if self.options.wchar_filenames:
102 self.cpp_info.defines.append("SPDLOG_WCHAR_FILENAMES")
103 if self.options.no_exceptions:
104 self.cpp_info.defines.append("SPDLOG_NO_EXCEPTIONS")
105 if tools.os_info.is_linux:
106 self.cpp_info.libs.append("pthread")
107
[end of recipes/spdlog/1.4.x/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/spdlog/1.4.x/conanfile.py b/recipes/spdlog/1.4.x/conanfile.py
--- a/recipes/spdlog/1.4.x/conanfile.py
+++ b/recipes/spdlog/1.4.x/conanfile.py
@@ -74,7 +74,7 @@
def build(self):
self._disable_werror()
if self.options.header_only:
- tools.patch(tools.get(**self.conan_data["patches"][self.version]))
+ tools.patch(**self.conan_data["patches"][self.version])
cmake = self._configure_cmake()
cmake.build()
| {"golden_diff": "diff --git a/recipes/spdlog/1.4.x/conanfile.py b/recipes/spdlog/1.4.x/conanfile.py\n--- a/recipes/spdlog/1.4.x/conanfile.py\n+++ b/recipes/spdlog/1.4.x/conanfile.py\n@@ -74,7 +74,7 @@\n def build(self):\n self._disable_werror()\n if self.options.header_only:\n- tools.patch(tools.get(**self.conan_data[\"patches\"][self.version]))\n+ tools.patch(**self.conan_data[\"patches\"][self.version])\n cmake = self._configure_cmake()\n cmake.build()\n", "issue": "[spdlog] spdlog/1.4.2: header_only=True is broken.\nThis configuration was never tested for 1.4.2, neither here nor in the previous bincrafters edition.\r\n\r\n1. (This is the error seen in the attached log.) Not an expert on how conandata.yml trick-shots work, but I think on conanfile.py:77 `tools.patch(tools.get(**self.conan_data[\"patches\"][self.version]))` should be `tools.patch(**self.conan_data[\"patches\"][self.version])`. Not sure how that one happened (controls Cee and Vee...). If I fix that, we get past this, but\r\n2. The patch file it uses no longer works, and this is the case as well in the original `bincrafters/stable` edition, because everyone just bumps up version numbers without checking. \ud83d\ude44\ud83d\udc4f\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **spdlog/1.4.2**\r\n * Operating System+version: **Mac OS Mojave**\r\n * Compiler+version: **n/a**\r\n * Docker image: **n/a**\r\n * Conan version: **conan 1.20.0**\r\n * Python version: **Python 3.7.5**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\ncompiler=apple-clang\r\ncompiler.libcxx=libc++\r\ncompiler.version=10.0\r\nos=Macos\r\nos_build=Macos\r\n[options]\r\nheader_only=True\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\n$ conan install spdlog/1.4.2@ -o header_only=True --build missing\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nConfiguration:\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Release\r\ncompiler=apple-clang\r\ncompiler.libcxx=libc++\r\ncompiler.version=10.0\r\nos=Macos\r\nos_build=Macos\r\n[options]\r\nheader_only=True\r\n[build_requires]\r\n[env]\r\n\r\nInstalling package: spdlog/1.4.2\r\nRequirements\r\n fmt/6.0.0 from 'conan-center' - Cache\r\n spdlog/1.4.2 from 'conan-center' - Cache\r\nPackages\r\n fmt/6.0.0:853c4b61e2571e98cd7b854c1cda6bc111b8b32c - Cache\r\n spdlog/1.4.2:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Build\r\n\r\nfmt/6.0.0: Already installed!\r\nDownloading conan_sources.tgz: 100%|##########| 1.34k/1.34k [00:00<00:00, 2.73MB/s]\r\nDecompressing conan_sources.tgz: 100%|##########| 1.34k/1.34k [00:00<00:00, 641kB/s]\r\nspdlog/1.4.2: Configuring sources in /Users/martin/.conan/data/spdlog/1.4.2/_/_/source\r\nDownloading v1.4.2.tar.gz: 100%|##########| 254k/254k [00:00<00:00, 3.37MB/s]\r\n\r\nspdlog/1.4.2: Copying sources to build folder\r\nspdlog/1.4.2: Building your package in /Users/martin/.conan/data/spdlog/1.4.2/_/_/build/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9\r\nspdlog/1.4.2: Generator cmake created conanbuildinfo.cmake\r\nspdlog/1.4.2: Generator cmake_find_package created Findfmt.cmake\r\nspdlog/1.4.2: Calling build()\r\nspdlog/1.4.2: \r\nspdlog/1.4.2: ERROR: Package '5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9' build failed\r\nspdlog/1.4.2: WARN: Build folder /Users/martin/.conan/data/spdlog/1.4.2/_/_/build/5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9\r\nERROR: spdlog/1.4.2: Error in build() method, line 77\r\n\ttools.patch(tools.get(**self.conan_data[\"patches\"][self.version]))\r\n\tTypeError: get() got an unexpected keyword argument 'base_path'\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "import os\nfrom conans import CMake, ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SpdlogConan(ConanFile):\n name = \"spdlog\"\n description = \"Fast C++ logging library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/gabime/spdlog\"\n topics = (\"conan\", \"spdlog\", \"logging\", \"header-only\")\n license = \"MIT\"\n exports_sources = [\"CMakeLists.txt\", \"patches/*\"]\n generators = \"cmake\", \"cmake_find_package\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False],\n \"fPIC\": [True, False],\n \"header_only\": [True, False],\n \"wchar_support\": [True, False],\n \"wchar_filenames\": [True, False],\n \"no_exceptions\": [True, False]}\n default_options = {\"shared\": False,\n \"fPIC\": True,\n \"header_only\": False,\n \"wchar_support\": False,\n \"wchar_filenames\": False,\n \"no_exceptions\": False}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.header_only:\n del self.options.shared\n del self.options.fPIC\n elif self.settings.os == \"Windows\" and self.options.shared:\n raise ConanInvalidConfiguration(\"spdlog shared lib is not yet supported under windows\")\n if self.settings.os != \"Windows\" and \\\n (self.options.wchar_support or self.options.wchar_filenames):\n raise ConanInvalidConfiguration(\"wchar is not yet supported under windows\")\n\n def requirements(self):\n self.requires(\"fmt/6.0.0\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.definitions[\"SPDLOG_BUILD_EXAMPLE\"] = False\n cmake.definitions[\"SPDLOG_BUILD_EXAMPLE_HO\"] = False\n cmake.definitions[\"SPDLOG_BUILD_TESTS\"] = False\n cmake.definitions[\"SPDLOG_BUILD_TESTS_HO\"] = False\n cmake.definitions[\"SPDLOG_BUILD_BENCH\"] = False\n cmake.definitions[\"SPDLOG_FMT_EXTERNAL\"] = True\n cmake.definitions[\"SPDLOG_BUILD_SHARED\"] = not self.options.header_only and self.options.shared\n cmake.definitions[\"SPDLOG_WCHAR_SUPPORT\"] = self.options.wchar_support\n cmake.definitions[\"SPDLOG_WCHAR_FILENAMES\"] = self.options.wchar_filenames\n cmake.definitions[\"SPDLOG_INSTALL\"] = True\n cmake.definitions[\"SPDLOG_NO_EXCEPTIONS\"] = self.options.no_exceptions\n cmake.configure()\n return cmake\n\n def _disable_werror(self):\n tools.replace_in_file(os.path.join(self._source_subfolder, \"cmake\", \"utils.cmake\"), \"/WX\", \"\")\n\n def build(self):\n self._disable_werror()\n if self.options.header_only:\n tools.patch(tools.get(**self.conan_data[\"patches\"][self.version]))\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(\"LICENSE\", dst='licenses', src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"spdlog\", \"cmake\"))\n\n def package_id(self):\n if self.options.header_only:\n self.info.header_only()\n\n def package_info(self):\n if self.options.header_only:\n self.cpp_info.defines = [\"SPDLOG_HEADER_ONLY\", \"SPDLOG_FMT_EXTERNAL\"]\n else:\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.defines = [\"SPDLOG_COMPILED_LIB\", \"SPDLOG_FMT_EXTERNAL\"]\n if self.options.wchar_support:\n self.cpp_info.defines.append(\"SPDLOG_WCHAR_TO_UTF8_SUPPORT\")\n if self.options.wchar_filenames:\n self.cpp_info.defines.append(\"SPDLOG_WCHAR_FILENAMES\")\n if self.options.no_exceptions:\n self.cpp_info.defines.append(\"SPDLOG_NO_EXCEPTIONS\")\n if tools.os_info.is_linux:\n self.cpp_info.libs.append(\"pthread\")\n", "path": "recipes/spdlog/1.4.x/conanfile.py"}]} | 2,978 | 145 |
gh_patches_debug_603 | rasdani/github-patches | git_diff | pex-tool__pex-1761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.87
On the docket:
+ [ ] A relative --tmpdir foils pex3 lock create. #1758
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.86"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.86"
+__version__ = "2.1.87"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.86\"\n+__version__ = \"2.1.87\"\n", "issue": "Release 2.1.87\nOn the docket:\r\n+ [ ] A relative --tmpdir foils pex3 lock create. #1758\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.86\"\n", "path": "pex/version.py"}]} | 618 | 96 |
gh_patches_debug_6507 | rasdani/github-patches | git_diff | aws__aws-sam-cli-2007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect inline help in "sam local generate-event" command
<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). -->
### Describe your idea/feature/enhancement
Using the CLI I had some problems with the inline help of the command "sam local generate-event". I was trying to pipe the event generated by that command with "sam local invoke" and it failed. The part of the inline help that it is incorrect is this:
`{...} After you generate a sample event, you can use it to test your Lambda function locally
$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id> {...}`
In the web documentation here (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-local-generate-event.html) the help is correct:
`After you generate a sample event, you can use it to test your Lambda function locally
sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>`
### Proposal
Replace the incorrect help by the correct one.
</issue>
<code>
[start of samcli/commands/local/generate_event/cli.py]
1 """
2 Sets up the cli for generate-event
3 """
4
5 import click
6
7 from samcli.cli.main import pass_context
8 from samcli.commands.local.generate_event.event_generation import GenerateEventCommand
9
10 HELP_TEXT = """
11 You can use this command to generate sample payloads from different event sources
12 such as S3, API Gateway, and SNS. These payloads contain the information that the
13 event sources send to your Lambda functions.\n
14 \b
15 Generate the event that S3 sends to your Lambda function when a new object is uploaded
16 $ sam local generate-event s3 [put/delete]\n
17 \b
18 You can even customize the event by adding parameter flags. To find which flags apply to your command,
19 run:\n
20 $ sam local generate-event s3 [put/delete] --help\n
21 Then you can add in those flags that you wish to customize using\n
22 $ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key>\n
23 \b
24 After you generate a sample event, you can use it to test your Lambda function locally
25 $ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id>
26 """
27
28
29 @click.command(name="generate-event", cls=GenerateEventCommand, help=HELP_TEXT)
30 @pass_context
31 def cli(self):
32 """
33 Generate an event for one of the services listed below:
34 """
35
[end of samcli/commands/local/generate_event/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/samcli/commands/local/generate_event/cli.py b/samcli/commands/local/generate_event/cli.py
--- a/samcli/commands/local/generate_event/cli.py
+++ b/samcli/commands/local/generate_event/cli.py
@@ -22,7 +22,7 @@
$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key>\n
\b
After you generate a sample event, you can use it to test your Lambda function locally
-$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id>
+$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>
"""
| {"golden_diff": "diff --git a/samcli/commands/local/generate_event/cli.py b/samcli/commands/local/generate_event/cli.py\n--- a/samcli/commands/local/generate_event/cli.py\n+++ b/samcli/commands/local/generate_event/cli.py\n@@ -22,7 +22,7 @@\n $ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key>\\n\n \\b\n After you generate a sample event, you can use it to test your Lambda function locally\n-$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id>\n+$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>\n \"\"\"\n", "issue": "Incorrect inline help in \"sam local generate-event\" command\n<!-- Make sure we don't have an existing Issue that reports the bug you are seeing (both open and closed). -->\r\n\r\n### Describe your idea/feature/enhancement\r\n\r\nUsing the CLI I had some problems with the inline help of the command \"sam local generate-event\". I was trying to pipe the event generated by that command with \"sam local invoke\" and it failed. The part of the inline help that it is incorrect is this:\r\n\r\n`{...} After you generate a sample event, you can use it to test your Lambda function locally\r\n $ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id> {...}`\r\n\r\nIn the web documentation here (https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/sam-cli-command-reference-sam-local-generate-event.html) the help is correct:\r\n\r\n`After you generate a sample event, you can use it to test your Lambda function locally\r\nsam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke -e - <function logical id>`\r\n### Proposal\r\n\r\nReplace the incorrect help by the correct one.\r\n\n", "before_files": [{"content": "\"\"\"\nSets up the cli for generate-event\n\"\"\"\n\nimport click\n\nfrom samcli.cli.main import pass_context\nfrom samcli.commands.local.generate_event.event_generation import GenerateEventCommand\n\nHELP_TEXT = \"\"\"\nYou can use this command to generate sample payloads from different event sources\nsuch as S3, API Gateway, and SNS. These payloads contain the information that the\nevent sources send to your Lambda functions.\\n\n\\b\nGenerate the event that S3 sends to your Lambda function when a new object is uploaded\n$ sam local generate-event s3 [put/delete]\\n\n\\b\nYou can even customize the event by adding parameter flags. To find which flags apply to your command,\nrun:\\n\n$ sam local generate-event s3 [put/delete] --help\\n\nThen you can add in those flags that you wish to customize using\\n\n$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key>\\n\n\\b\nAfter you generate a sample event, you can use it to test your Lambda function locally\n$ sam local generate-event s3 [put/delete] --bucket <bucket> --key <key> | sam local invoke <function logical id>\n\"\"\"\n\n\[email protected](name=\"generate-event\", cls=GenerateEventCommand, help=HELP_TEXT)\n@pass_context\ndef cli(self):\n \"\"\"\n Generate an event for one of the services listed below:\n \"\"\"\n", "path": "samcli/commands/local/generate_event/cli.py"}]} | 1,165 | 176 |
gh_patches_debug_31006 | rasdani/github-patches | git_diff | pystiche__pystiche-397 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typing for vgg_multi_layer_encoder is broken
#392 changed the generation of `vgg_(11|13|16|19)(_bn)?_multi_layer_encoder` from static to dynamic. While this reduces repetition, it also breaks the type checks with `mypy`:
```python
from pystiche.enc import vgg11_multi_layer_encoder
```
```
error: Module 'pystiche.enc' has no attribute 'vgg11_multi_layer_encoder'; maybe "alexnet_multi_layer_encoder"? [attr-defined]
```
Since `mypy` only reads but not imports `pystiche.enc.models.vgg` it has no knowledge that the functions will exist after import.
</issue>
<code>
[start of pystiche/enc/models/vgg.py]
1 import functools
2 import re
3 import warnings
4 from copy import copy
5 from typing import Any, Callable, Dict, List, Optional, Tuple, cast
6
7 import torchvision
8 from torch import hub, nn
9
10 from pystiche.misc import build_deprecation_message
11
12 from .utils import ModelMultiLayerEncoder
13 from .utils import select_url as _select_url
14
15 __all__ = [
16 "VGGMultiLayerEncoder",
17 "vgg11_multi_layer_encoder",
18 "vgg11_bn_multi_layer_encoder",
19 "vgg13_multi_layer_encoder",
20 "vgg13_bn_multi_layer_encoder",
21 "vgg16_multi_layer_encoder",
22 "vgg16_bn_multi_layer_encoder",
23 "vgg19_multi_layer_encoder",
24 "vgg19_bn_multi_layer_encoder",
25 ]
26
27 ARCH_PATTERN = re.compile(r"^vgg(?P<num_layers>(11|13|16|19)+)(?P<batch_norm>_bn)?$")
28
29 NUM_LAYERS_TO_CONFIGURATION = {
30 11: "A",
31 13: "B",
32 16: "D",
33 19: "E",
34 }
35
36
37 def _parse_arch(arch: str) -> Tuple[int, bool]:
38 match = ARCH_PATTERN.match(arch)
39 if match is None:
40 raise ValueError(
41 f"Unknown arch '{arch}'. It has to match 'vgg_(11|13|16|19)(_bn)?'"
42 )
43
44 num_layers = int(match.group("num_layers"))
45 batch_norm = match.group("batch_norm") is not None
46
47 return num_layers, batch_norm
48
49
50 def _make_description(arch: str, multi_layer_encoder: bool) -> str:
51 num_layers, batch_norm = _parse_arch(arch)
52
53 if multi_layer_encoder:
54 short = (
55 f"Multi-layer encoder based on :class:`~torchvision.models.VGG` "
56 f"{num_layers}{' with batch normalization' if batch_norm else ''}."
57 )
58 else:
59 short = f":class:`~torchvision.models.VGG` {num_layers} model"
60
61 long = (
62 f" The :class:`~torchvision.models.VGG` architecture was introduced by "
63 f"Krizhevsky, Sutskever, and Hinton in :cite:`KSH2012`. VGG{num_layers} "
64 f"corresponds to configuration ``{NUM_LAYERS_TO_CONFIGURATION[num_layers]}`` "
65 f"in the paper."
66 )
67 return "\n".join((short, "", long))
68
69
70 def _make_vgg_docstring(arch: str) -> str:
71 description = _make_description(arch, multi_layer_encoder=False)
72 args = r"""Args:
73 pretrained: If ``True``, loads weights from training on
74 :class:`~torchvision.models.ImageNet`. Defaults to ``False``.
75 framework: Framework that was used to train the model. Can be one of
76 ``"torch"`` (default) or ``"caffe"``.
77 .. note::
78
79 The weights for ``"caffe"`` were generated by Karen Simonyan and
80 Andrew Zisserman. See https://download.pystiche.org/models/LICENSE for
81 details.
82 progress: If ``True``, displays a progress bar to STDERR during download of
83 pretrained weights. Defaults to ``True``.
84 num_classes: Size of the output layer. Defaults to ``1000``.
85 .. note::
86
87 Pretrained weights are only available for ``num_classes == 1000``.
88 """
89 return "\n".join((description, "", args))
90
91
92 def select_url(arch: str, framework: str) -> str:
93 def format(key: Tuple[str, str]) -> str:
94 arch, framework = key
95 return "\n".join((f"arch={arch}", f"framework={framework}"))
96
97 return _select_url(MODEL_URLS, (arch, framework), format=format)
98
99
100 def _vgg_loader(arch: str) -> Callable[..., torchvision.models.VGG]:
101 loader = cast(
102 Callable[..., torchvision.models.VGG], getattr(torchvision.models, arch)
103 )
104
105 def vgg(
106 pretrained: bool = False,
107 framework: str = "torch",
108 progress: bool = True,
109 num_classes: int = 1000,
110 ) -> torchvision.models.VGG:
111 if pretrained and num_classes != 1000:
112 raise RuntimeError
113
114 model = loader(pretrained=False, num_classes=num_classes)
115
116 if not pretrained:
117 return model
118
119 state_dict = hub.load_state_dict_from_url(
120 select_url(arch, framework), progress=progress, check_hash=True,
121 )
122 model.load_state_dict(state_dict)
123 return model
124
125 vgg.__doc__ = _make_vgg_docstring(arch)
126
127 return vgg
128
129
130 TORCH_MODEL_URLS = torchvision.models.vgg.model_urls
131 ARCHS = tuple(TORCH_MODEL_URLS.keys())
132 MODEL_URLS = {(arch, "torch"): TORCH_MODEL_URLS[arch] for arch in ARCHS}
133 MODEL_URLS.update(
134 {
135 ("vgg16", "caffe"): "https://download.pystiche.org/models/vgg16-781be684.pth",
136 ("vgg19", "caffe"): "https://download.pystiche.org/models/vgg19-74e45263.pth",
137 }
138 )
139 MODELS = {arch: _vgg_loader(arch) for arch in ARCHS}
140
141
142 class VGGMultiLayerEncoder(ModelMultiLayerEncoder):
143 r"""Multi-layer encoder based on :class:`~torchvision.models.VGG`.
144
145 The :class:`~torchvision.models.VGG` architecture was introduced by Krizhevsky,
146 Sutskever, and Hinton in :cite:`KSH2012`
147
148 Args:
149 arch: :class:`~torchvision.models.VGG` architecture. Has to match
150 ``"vgg(11|13|16|19)(_bn)?"``.
151 pretrained: If ``True``, loads builtin weights. Defaults to ``True``.
152 framework: Name of the framework that was used to train the builtin weights.
153 Defaults to ``"torch"``.
154 kwargs: Optional arguments of :class:`ModelMultiLayerEncoder` .
155
156 Raises:
157 RuntimeError: If ``pretrained is True`` and no weights are available for the
158 combination of ``arch`` and ``framework``.
159 """
160
161 def __init__(self, arch: str, weights: Optional[str] = None, **kwargs: Any) -> None:
162 if weights is not None:
163 msg = build_deprecation_message(
164 "The parameter weights", "0.6.0", info="It was renamed to framework"
165 )
166 warnings.warn(msg, UserWarning)
167 kwargs["framework"] = weights
168
169 _parse_arch(arch)
170 self.arch = arch
171 super().__init__(**kwargs)
172
173 def state_dict_url(self, framework: str) -> str:
174 return select_url(self.arch, framework)
175
176 def collect_modules(
177 self, inplace: bool
178 ) -> Tuple[List[Tuple[str, nn.Module]], Dict[str, str]]:
179 model = MODELS[self.arch](pretrained=False)
180
181 modules = []
182 state_dict_key_map = {}
183 block = depth = 1
184 for idx, module in model.features.named_children():
185 if isinstance(module, nn.Conv2d):
186 name = f"conv{block}_{depth}"
187 elif isinstance(module, nn.BatchNorm2d):
188 name = f"bn{block}_{depth}"
189 elif isinstance(module, nn.ReLU):
190 module = nn.ReLU(inplace=inplace)
191 name = f"relu{block}_{depth}"
192 # each ReLU layer increases the depth of the current block
193 depth += 1
194 else: # isinstance(module, nn.MaxPool2d):
195 name = f"pool{block}"
196 # each pooling layer marks the end of the current block
197 block += 1
198 depth = 1
199
200 modules.append((name, module))
201 state_dict_key_map.update(
202 {
203 f"features.{idx}.{key}": f"{name}.{key}"
204 for key in module.state_dict().keys()
205 }
206 )
207
208 return modules, state_dict_key_map
209
210 def _properties(self) -> Dict[str, Any]:
211 dct = super()._properties()
212
213 dct["arch"] = self.arch
214 dct.move_to_end("arch", last=False) # type: ignore[attr-defined]
215
216 return dct
217
218
219 def _make_vgg_multi_layer_encoder_docstring(arch: str) -> str:
220 description = _make_description(arch, multi_layer_encoder=True)
221 args = r""" Args:
222 kwargs: Optional arguments of :class:`VGGMultiLayerEncoder` .
223 """
224 return "\n".join((description, "", args))
225
226
227 def _update_loader_magic(loader: Callable, name: str, doc: str) -> None:
228 loader.__module__ = VGGMultiLayerEncoder.__module__
229 loader.__name__ = loader.__qualname__ = name
230
231 annotations = copy(VGGMultiLayerEncoder.__init__.__annotations__)
232 del annotations["arch"]
233 annotations["return"] = VGGMultiLayerEncoder
234 loader.__annotations__ = annotations
235 loader.__doc__ = doc
236
237
238 for arch in ARCHS:
239 name = f"{arch}_multi_layer_encoder"
240 doc = _make_vgg_multi_layer_encoder_docstring(arch)
241 loader = functools.partial(VGGMultiLayerEncoder, arch)
242 _update_loader_magic(loader, name, doc)
243 locals()[name] = loader
244
[end of pystiche/enc/models/vgg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pystiche/enc/models/vgg.py b/pystiche/enc/models/vgg.py
--- a/pystiche/enc/models/vgg.py
+++ b/pystiche/enc/models/vgg.py
@@ -1,7 +1,5 @@
-import functools
import re
import warnings
-from copy import copy
from typing import Any, Callable, Dict, List, Optional, Tuple, cast
import torchvision
@@ -224,20 +222,39 @@
return "\n".join((description, "", args))
-def _update_loader_magic(loader: Callable, name: str, doc: str) -> None:
- loader.__module__ = VGGMultiLayerEncoder.__module__
- loader.__name__ = loader.__qualname__ = name
+def vgg11_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg11", **kwargs)
- annotations = copy(VGGMultiLayerEncoder.__init__.__annotations__)
- del annotations["arch"]
- annotations["return"] = VGGMultiLayerEncoder
- loader.__annotations__ = annotations
- loader.__doc__ = doc
+
+def vgg11_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg11_bn", **kwargs)
+
+
+def vgg13_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg13", **kwargs)
+
+
+def vgg13_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg13_bn", **kwargs)
+
+
+def vgg16_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg16", **kwargs)
+
+
+def vgg16_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg16_bn", **kwargs)
+
+
+def vgg19_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg19", **kwargs)
+
+
+def vgg19_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:
+ return VGGMultiLayerEncoder("vgg19_bn", **kwargs)
for arch in ARCHS:
name = f"{arch}_multi_layer_encoder"
doc = _make_vgg_multi_layer_encoder_docstring(arch)
- loader = functools.partial(VGGMultiLayerEncoder, arch)
- _update_loader_magic(loader, name, doc)
- locals()[name] = loader
+ globals()[name].__doc__ = doc
| {"golden_diff": "diff --git a/pystiche/enc/models/vgg.py b/pystiche/enc/models/vgg.py\n--- a/pystiche/enc/models/vgg.py\n+++ b/pystiche/enc/models/vgg.py\n@@ -1,7 +1,5 @@\n-import functools\n import re\n import warnings\n-from copy import copy\n from typing import Any, Callable, Dict, List, Optional, Tuple, cast\n \n import torchvision\n@@ -224,20 +222,39 @@\n return \"\\n\".join((description, \"\", args))\n \n \n-def _update_loader_magic(loader: Callable, name: str, doc: str) -> None:\n- loader.__module__ = VGGMultiLayerEncoder.__module__\n- loader.__name__ = loader.__qualname__ = name\n+def vgg11_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg11\", **kwargs)\n \n- annotations = copy(VGGMultiLayerEncoder.__init__.__annotations__)\n- del annotations[\"arch\"]\n- annotations[\"return\"] = VGGMultiLayerEncoder\n- loader.__annotations__ = annotations\n- loader.__doc__ = doc\n+\n+def vgg11_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg11_bn\", **kwargs)\n+\n+\n+def vgg13_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg13\", **kwargs)\n+\n+\n+def vgg13_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg13_bn\", **kwargs)\n+\n+\n+def vgg16_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg16\", **kwargs)\n+\n+\n+def vgg16_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg16_bn\", **kwargs)\n+\n+\n+def vgg19_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg19\", **kwargs)\n+\n+\n+def vgg19_bn_multi_layer_encoder(**kwargs: Any) -> VGGMultiLayerEncoder:\n+ return VGGMultiLayerEncoder(\"vgg19_bn\", **kwargs)\n \n \n for arch in ARCHS:\n name = f\"{arch}_multi_layer_encoder\"\n doc = _make_vgg_multi_layer_encoder_docstring(arch)\n- loader = functools.partial(VGGMultiLayerEncoder, arch)\n- _update_loader_magic(loader, name, doc)\n- locals()[name] = loader\n+ globals()[name].__doc__ = doc\n", "issue": "Typing for vgg_multi_layer_encoder is broken\n#392 changed the generation of `vgg_(11|13|16|19)(_bn)?_multi_layer_encoder` from static to dynamic. While this reduces repetition, it also breaks the type checks with `mypy`:\r\n\r\n```python\r\nfrom pystiche.enc import vgg11_multi_layer_encoder\r\n```\r\n\r\n```\r\nerror: Module 'pystiche.enc' has no attribute 'vgg11_multi_layer_encoder'; maybe \"alexnet_multi_layer_encoder\"? [attr-defined]\r\n```\r\n\r\nSince `mypy` only reads but not imports `pystiche.enc.models.vgg` it has no knowledge that the functions will exist after import.\n", "before_files": [{"content": "import functools\nimport re\nimport warnings\nfrom copy import copy\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, cast\n\nimport torchvision\nfrom torch import hub, nn\n\nfrom pystiche.misc import build_deprecation_message\n\nfrom .utils import ModelMultiLayerEncoder\nfrom .utils import select_url as _select_url\n\n__all__ = [\n \"VGGMultiLayerEncoder\",\n \"vgg11_multi_layer_encoder\",\n \"vgg11_bn_multi_layer_encoder\",\n \"vgg13_multi_layer_encoder\",\n \"vgg13_bn_multi_layer_encoder\",\n \"vgg16_multi_layer_encoder\",\n \"vgg16_bn_multi_layer_encoder\",\n \"vgg19_multi_layer_encoder\",\n \"vgg19_bn_multi_layer_encoder\",\n]\n\nARCH_PATTERN = re.compile(r\"^vgg(?P<num_layers>(11|13|16|19)+)(?P<batch_norm>_bn)?$\")\n\nNUM_LAYERS_TO_CONFIGURATION = {\n 11: \"A\",\n 13: \"B\",\n 16: \"D\",\n 19: \"E\",\n}\n\n\ndef _parse_arch(arch: str) -> Tuple[int, bool]:\n match = ARCH_PATTERN.match(arch)\n if match is None:\n raise ValueError(\n f\"Unknown arch '{arch}'. It has to match 'vgg_(11|13|16|19)(_bn)?'\"\n )\n\n num_layers = int(match.group(\"num_layers\"))\n batch_norm = match.group(\"batch_norm\") is not None\n\n return num_layers, batch_norm\n\n\ndef _make_description(arch: str, multi_layer_encoder: bool) -> str:\n num_layers, batch_norm = _parse_arch(arch)\n\n if multi_layer_encoder:\n short = (\n f\"Multi-layer encoder based on :class:`~torchvision.models.VGG` \"\n f\"{num_layers}{' with batch normalization' if batch_norm else ''}.\"\n )\n else:\n short = f\":class:`~torchvision.models.VGG` {num_layers} model\"\n\n long = (\n f\" The :class:`~torchvision.models.VGG` architecture was introduced by \"\n f\"Krizhevsky, Sutskever, and Hinton in :cite:`KSH2012`. VGG{num_layers} \"\n f\"corresponds to configuration ``{NUM_LAYERS_TO_CONFIGURATION[num_layers]}`` \"\n f\"in the paper.\"\n )\n return \"\\n\".join((short, \"\", long))\n\n\ndef _make_vgg_docstring(arch: str) -> str:\n description = _make_description(arch, multi_layer_encoder=False)\n args = r\"\"\"Args:\n pretrained: If ``True``, loads weights from training on\n :class:`~torchvision.models.ImageNet`. Defaults to ``False``.\n framework: Framework that was used to train the model. Can be one of\n ``\"torch\"`` (default) or ``\"caffe\"``.\n .. note::\n\n The weights for ``\"caffe\"`` were generated by Karen Simonyan and\n Andrew Zisserman. See https://download.pystiche.org/models/LICENSE for\n details.\n progress: If ``True``, displays a progress bar to STDERR during download of\n pretrained weights. Defaults to ``True``.\n num_classes: Size of the output layer. Defaults to ``1000``.\n .. note::\n\n Pretrained weights are only available for ``num_classes == 1000``.\n \"\"\"\n return \"\\n\".join((description, \"\", args))\n\n\ndef select_url(arch: str, framework: str) -> str:\n def format(key: Tuple[str, str]) -> str:\n arch, framework = key\n return \"\\n\".join((f\"arch={arch}\", f\"framework={framework}\"))\n\n return _select_url(MODEL_URLS, (arch, framework), format=format)\n\n\ndef _vgg_loader(arch: str) -> Callable[..., torchvision.models.VGG]:\n loader = cast(\n Callable[..., torchvision.models.VGG], getattr(torchvision.models, arch)\n )\n\n def vgg(\n pretrained: bool = False,\n framework: str = \"torch\",\n progress: bool = True,\n num_classes: int = 1000,\n ) -> torchvision.models.VGG:\n if pretrained and num_classes != 1000:\n raise RuntimeError\n\n model = loader(pretrained=False, num_classes=num_classes)\n\n if not pretrained:\n return model\n\n state_dict = hub.load_state_dict_from_url(\n select_url(arch, framework), progress=progress, check_hash=True,\n )\n model.load_state_dict(state_dict)\n return model\n\n vgg.__doc__ = _make_vgg_docstring(arch)\n\n return vgg\n\n\nTORCH_MODEL_URLS = torchvision.models.vgg.model_urls\nARCHS = tuple(TORCH_MODEL_URLS.keys())\nMODEL_URLS = {(arch, \"torch\"): TORCH_MODEL_URLS[arch] for arch in ARCHS}\nMODEL_URLS.update(\n {\n (\"vgg16\", \"caffe\"): \"https://download.pystiche.org/models/vgg16-781be684.pth\",\n (\"vgg19\", \"caffe\"): \"https://download.pystiche.org/models/vgg19-74e45263.pth\",\n }\n)\nMODELS = {arch: _vgg_loader(arch) for arch in ARCHS}\n\n\nclass VGGMultiLayerEncoder(ModelMultiLayerEncoder):\n r\"\"\"Multi-layer encoder based on :class:`~torchvision.models.VGG`.\n\n The :class:`~torchvision.models.VGG` architecture was introduced by Krizhevsky,\n Sutskever, and Hinton in :cite:`KSH2012`\n\n Args:\n arch: :class:`~torchvision.models.VGG` architecture. Has to match\n ``\"vgg(11|13|16|19)(_bn)?\"``.\n pretrained: If ``True``, loads builtin weights. Defaults to ``True``.\n framework: Name of the framework that was used to train the builtin weights.\n Defaults to ``\"torch\"``.\n kwargs: Optional arguments of :class:`ModelMultiLayerEncoder` .\n\n Raises:\n RuntimeError: If ``pretrained is True`` and no weights are available for the\n combination of ``arch`` and ``framework``.\n \"\"\"\n\n def __init__(self, arch: str, weights: Optional[str] = None, **kwargs: Any) -> None:\n if weights is not None:\n msg = build_deprecation_message(\n \"The parameter weights\", \"0.6.0\", info=\"It was renamed to framework\"\n )\n warnings.warn(msg, UserWarning)\n kwargs[\"framework\"] = weights\n\n _parse_arch(arch)\n self.arch = arch\n super().__init__(**kwargs)\n\n def state_dict_url(self, framework: str) -> str:\n return select_url(self.arch, framework)\n\n def collect_modules(\n self, inplace: bool\n ) -> Tuple[List[Tuple[str, nn.Module]], Dict[str, str]]:\n model = MODELS[self.arch](pretrained=False)\n\n modules = []\n state_dict_key_map = {}\n block = depth = 1\n for idx, module in model.features.named_children():\n if isinstance(module, nn.Conv2d):\n name = f\"conv{block}_{depth}\"\n elif isinstance(module, nn.BatchNorm2d):\n name = f\"bn{block}_{depth}\"\n elif isinstance(module, nn.ReLU):\n module = nn.ReLU(inplace=inplace)\n name = f\"relu{block}_{depth}\"\n # each ReLU layer increases the depth of the current block\n depth += 1\n else: # isinstance(module, nn.MaxPool2d):\n name = f\"pool{block}\"\n # each pooling layer marks the end of the current block\n block += 1\n depth = 1\n\n modules.append((name, module))\n state_dict_key_map.update(\n {\n f\"features.{idx}.{key}\": f\"{name}.{key}\"\n for key in module.state_dict().keys()\n }\n )\n\n return modules, state_dict_key_map\n\n def _properties(self) -> Dict[str, Any]:\n dct = super()._properties()\n\n dct[\"arch\"] = self.arch\n dct.move_to_end(\"arch\", last=False) # type: ignore[attr-defined]\n\n return dct\n\n\ndef _make_vgg_multi_layer_encoder_docstring(arch: str) -> str:\n description = _make_description(arch, multi_layer_encoder=True)\n args = r\"\"\" Args:\n kwargs: Optional arguments of :class:`VGGMultiLayerEncoder` .\n \"\"\"\n return \"\\n\".join((description, \"\", args))\n\n\ndef _update_loader_magic(loader: Callable, name: str, doc: str) -> None:\n loader.__module__ = VGGMultiLayerEncoder.__module__\n loader.__name__ = loader.__qualname__ = name\n\n annotations = copy(VGGMultiLayerEncoder.__init__.__annotations__)\n del annotations[\"arch\"]\n annotations[\"return\"] = VGGMultiLayerEncoder\n loader.__annotations__ = annotations\n loader.__doc__ = doc\n\n\nfor arch in ARCHS:\n name = f\"{arch}_multi_layer_encoder\"\n doc = _make_vgg_multi_layer_encoder_docstring(arch)\n loader = functools.partial(VGGMultiLayerEncoder, arch)\n _update_loader_magic(loader, name, doc)\n locals()[name] = loader\n", "path": "pystiche/enc/models/vgg.py"}]} | 3,425 | 630 |
gh_patches_debug_13175 | rasdani/github-patches | git_diff | plotly__plotly.py-1411 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Annotated heatmaps: font colour blends with the colour of the square.
When using the default color scheme, the font color contrasts normally with the fill of the each heatmap square. But if you try other standard color schemes with `reversescale=True`, when values are close to zero, the colors will merge. I think this is the wrong default behavior.
</issue>
<code>
[start of plotly/figure_factory/_annotated_heatmap.py]
1 from __future__ import absolute_import
2
3 from plotly import exceptions, optional_imports
4 import plotly.colors as clrs
5 from plotly.figure_factory import utils
6 from plotly.graph_objs import graph_objs
7 from plotly.validators.heatmap import ColorscaleValidator
8
9 # Optional imports, may be None for users that only use our core functionality.
10 np = optional_imports.get_module('numpy')
11
12
13 def validate_annotated_heatmap(z, x, y, annotation_text):
14 """
15 Annotated-heatmap-specific validations
16
17 Check that if a text matrix is supplied, it has the same
18 dimensions as the z matrix.
19
20 See FigureFactory.create_annotated_heatmap() for params
21
22 :raises: (PlotlyError) If z and text matrices do not have the same
23 dimensions.
24 """
25 if annotation_text is not None and isinstance(annotation_text, list):
26 utils.validate_equal_length(z, annotation_text)
27 for lst in range(len(z)):
28 if len(z[lst]) != len(annotation_text[lst]):
29 raise exceptions.PlotlyError("z and text should have the "
30 "same dimensions")
31
32 if x:
33 if len(x) != len(z[0]):
34 raise exceptions.PlotlyError("oops, the x list that you "
35 "provided does not match the "
36 "width of your z matrix ")
37
38 if y:
39 if len(y) != len(z):
40 raise exceptions.PlotlyError("oops, the y list that you "
41 "provided does not match the "
42 "length of your z matrix ")
43
44
45 def create_annotated_heatmap(z, x=None, y=None, annotation_text=None,
46 colorscale='RdBu', font_colors=None,
47 showscale=False, reversescale=False,
48 **kwargs):
49 """
50 BETA function that creates annotated heatmaps
51
52 This function adds annotations to each cell of the heatmap.
53
54 :param (list[list]|ndarray) z: z matrix to create heatmap.
55 :param (list) x: x axis labels.
56 :param (list) y: y axis labels.
57 :param (list[list]|ndarray) annotation_text: Text strings for
58 annotations. Should have the same dimensions as the z matrix. If no
59 text is added, the values of the z matrix are annotated. Default =
60 z matrix values.
61 :param (list|str) colorscale: heatmap colorscale.
62 :param (list) font_colors: List of two color strings: [min_text_color,
63 max_text_color] where min_text_color is applied to annotations for
64 heatmap values < (max_value - min_value)/2. If font_colors is not
65 defined, the colors are defined logically as black or white
66 depending on the heatmap's colorscale.
67 :param (bool) showscale: Display colorscale. Default = False
68 :param (bool) reversescale: Reverse colorscale. Default = False
69 :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.
70 These kwargs describe other attributes about the annotated Heatmap
71 trace such as the colorscale. For more information on valid kwargs
72 call help(plotly.graph_objs.Heatmap)
73
74 Example 1: Simple annotated heatmap with default configuration
75 ```
76 import plotly.plotly as py
77 import plotly.figure_factory as FF
78
79 z = [[0.300000, 0.00000, 0.65, 0.300000],
80 [1, 0.100005, 0.45, 0.4300],
81 [0.300000, 0.00000, 0.65, 0.300000],
82 [1, 0.100005, 0.45, 0.00000]]
83
84 figure = FF.create_annotated_heatmap(z)
85 py.iplot(figure)
86 ```
87 """
88
89 # Avoiding mutables in the call signature
90 font_colors = font_colors if font_colors is not None else []
91 validate_annotated_heatmap(z, x, y, annotation_text)
92
93 # validate colorscale
94 colorscale_validator = ColorscaleValidator()
95 colorscale = colorscale_validator.validate_coerce(colorscale)
96
97 annotations = _AnnotatedHeatmap(z, x, y, annotation_text,
98 colorscale, font_colors, reversescale,
99 **kwargs).make_annotations()
100
101 if x or y:
102 trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,
103 showscale=showscale, reversescale=reversescale, **kwargs)
104 layout = dict(annotations=annotations,
105 xaxis=dict(ticks='', dtick=1, side='top',
106 gridcolor='rgb(0, 0, 0)'),
107 yaxis=dict(ticks='', dtick=1, ticksuffix=' '))
108 else:
109 trace = dict(type='heatmap', z=z, colorscale=colorscale,
110 showscale=showscale, reversescale=reversescale, **kwargs)
111 layout = dict(annotations=annotations,
112 xaxis=dict(ticks='', side='top',
113 gridcolor='rgb(0, 0, 0)',
114 showticklabels=False),
115 yaxis=dict(ticks='', ticksuffix=' ',
116 showticklabels=False))
117
118 data = [trace]
119
120 return graph_objs.Figure(data=data, layout=layout)
121
122
123 def to_rgb_color_list(color_str, default):
124 if 'rgb' in color_str:
125 return [int(v) for v in color_str.strip('rgb()').split(',')]
126 elif '#' in color_str:
127 return clrs.hex_to_rgb(color_str)
128 else:
129 return default
130
131
132 def should_use_black_text(background_color):
133 return (background_color[0] * 0.299 +
134 background_color[1] * 0.587 +
135 background_color[2] * 0.114) > 186
136
137
138 class _AnnotatedHeatmap(object):
139 """
140 Refer to TraceFactory.create_annotated_heatmap() for docstring
141 """
142 def __init__(self, z, x, y, annotation_text, colorscale,
143 font_colors, reversescale, **kwargs):
144
145 self.z = z
146 if x:
147 self.x = x
148 else:
149 self.x = range(len(z[0]))
150 if y:
151 self.y = y
152 else:
153 self.y = range(len(z))
154 if annotation_text is not None:
155 self.annotation_text = annotation_text
156 else:
157 self.annotation_text = self.z
158 self.colorscale = colorscale
159 self.reversescale = reversescale
160 self.font_colors = font_colors
161
162 def get_text_color(self):
163 """
164 Get font color for annotations.
165
166 The annotated heatmap can feature two text colors: min_text_color and
167 max_text_color. The min_text_color is applied to annotations for
168 heatmap values < (max_value - min_value)/2. The user can define these
169 two colors. Otherwise the colors are defined logically as black or
170 white depending on the heatmap's colorscale.
171
172 :rtype (string, string) min_text_color, max_text_color: text
173 color for annotations for heatmap values <
174 (max_value - min_value)/2 and text color for annotations for
175 heatmap values >= (max_value - min_value)/2
176 """
177 # Plotly colorscales ranging from a lighter shade to a darker shade
178 colorscales = ['Greys', 'Greens', 'Blues',
179 'YIGnBu', 'YIOrRd', 'RdBu',
180 'Picnic', 'Jet', 'Hot', 'Blackbody',
181 'Earth', 'Electric', 'Viridis', 'Cividis']
182 # Plotly colorscales ranging from a darker shade to a lighter shade
183 colorscales_reverse = ['Reds']
184
185 white = '#FFFFFF'
186 black = '#000000'
187 if self.font_colors:
188 min_text_color = self.font_colors[0]
189 max_text_color = self.font_colors[-1]
190 elif self.colorscale in colorscales and self.reversescale:
191 min_text_color = black
192 max_text_color = white
193 elif self.colorscale in colorscales:
194 min_text_color = white
195 max_text_color = black
196 elif self.colorscale in colorscales_reverse and self.reversescale:
197 min_text_color = white
198 max_text_color = black
199 elif self.colorscale in colorscales_reverse:
200 min_text_color = black
201 max_text_color = white
202 elif isinstance(self.colorscale, list):
203
204 min_col = to_rgb_color_list(self.colorscale[0][1],
205 [255, 255, 255])
206 max_col = to_rgb_color_list(self.colorscale[-1][1],
207 [255, 255, 255])
208
209 # swap min/max colors if reverse scale
210 if self.reversescale:
211 min_col, max_col = max_col, min_col
212
213 if should_use_black_text(min_col):
214 min_text_color = black
215 else:
216 min_text_color = white
217
218 if should_use_black_text(max_col):
219 max_text_color = black
220 else:
221 max_text_color = white
222 else:
223 min_text_color = black
224 max_text_color = black
225 return min_text_color, max_text_color
226
227 def get_z_mid(self):
228 """
229 Get the mid value of z matrix
230
231 :rtype (float) z_avg: average val from z matrix
232 """
233 if np and isinstance(self.z, np.ndarray):
234 z_min = np.amin(self.z)
235 z_max = np.amax(self.z)
236 else:
237 z_min = min(min(self.z))
238 z_max = max(max(self.z))
239 z_mid = (z_max+z_min) / 2
240 return z_mid
241
242 def make_annotations(self):
243 """
244 Get annotations for each cell of the heatmap with graph_objs.Annotation
245
246 :rtype (list[dict]) annotations: list of annotations for each cell of
247 the heatmap
248 """
249 min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)
250 z_mid = _AnnotatedHeatmap.get_z_mid(self)
251 annotations = []
252 for n, row in enumerate(self.z):
253 for m, val in enumerate(row):
254 font_color = min_text_color if val < z_mid else max_text_color
255 annotations.append(
256 graph_objs.layout.Annotation(
257 text=str(self.annotation_text[n][m]),
258 x=self.x[m],
259 y=self.y[n],
260 xref='x1',
261 yref='y1',
262 font=dict(color=font_color),
263 showarrow=False))
264 return annotations
265
[end of plotly/figure_factory/_annotated_heatmap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plotly/figure_factory/_annotated_heatmap.py b/plotly/figure_factory/_annotated_heatmap.py
--- a/plotly/figure_factory/_annotated_heatmap.py
+++ b/plotly/figure_factory/_annotated_heatmap.py
@@ -1,4 +1,4 @@
-from __future__ import absolute_import
+from __future__ import absolute_import, division
from plotly import exceptions, optional_imports
import plotly.colors as clrs
@@ -234,8 +234,8 @@
z_min = np.amin(self.z)
z_max = np.amax(self.z)
else:
- z_min = min(min(self.z))
- z_max = max(max(self.z))
+ z_min = min([v for row in self.z for v in row])
+ z_max = max([v for row in self.z for v in row])
z_mid = (z_max+z_min) / 2
return z_mid
| {"golden_diff": "diff --git a/plotly/figure_factory/_annotated_heatmap.py b/plotly/figure_factory/_annotated_heatmap.py\n--- a/plotly/figure_factory/_annotated_heatmap.py\n+++ b/plotly/figure_factory/_annotated_heatmap.py\n@@ -1,4 +1,4 @@\n-from __future__ import absolute_import\n+from __future__ import absolute_import, division\n \n from plotly import exceptions, optional_imports\n import plotly.colors as clrs\n@@ -234,8 +234,8 @@\n z_min = np.amin(self.z)\n z_max = np.amax(self.z)\n else:\n- z_min = min(min(self.z))\n- z_max = max(max(self.z))\n+ z_min = min([v for row in self.z for v in row])\n+ z_max = max([v for row in self.z for v in row])\n z_mid = (z_max+z_min) / 2\n return z_mid\n", "issue": "Annotated heatmaps: font colour blends with the colour of the square.\nWhen using the default color scheme, the font color contrasts normally with the fill of the each heatmap square. But if you try other standard color schemes with `reversescale=True`, when values are close to zero, the colors will merge. I think this is the wrong default behavior.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom plotly import exceptions, optional_imports\nimport plotly.colors as clrs\nfrom plotly.figure_factory import utils\nfrom plotly.graph_objs import graph_objs\nfrom plotly.validators.heatmap import ColorscaleValidator\n\n# Optional imports, may be None for users that only use our core functionality.\nnp = optional_imports.get_module('numpy')\n\n\ndef validate_annotated_heatmap(z, x, y, annotation_text):\n \"\"\"\n Annotated-heatmap-specific validations\n\n Check that if a text matrix is supplied, it has the same\n dimensions as the z matrix.\n\n See FigureFactory.create_annotated_heatmap() for params\n\n :raises: (PlotlyError) If z and text matrices do not have the same\n dimensions.\n \"\"\"\n if annotation_text is not None and isinstance(annotation_text, list):\n utils.validate_equal_length(z, annotation_text)\n for lst in range(len(z)):\n if len(z[lst]) != len(annotation_text[lst]):\n raise exceptions.PlotlyError(\"z and text should have the \"\n \"same dimensions\")\n\n if x:\n if len(x) != len(z[0]):\n raise exceptions.PlotlyError(\"oops, the x list that you \"\n \"provided does not match the \"\n \"width of your z matrix \")\n\n if y:\n if len(y) != len(z):\n raise exceptions.PlotlyError(\"oops, the y list that you \"\n \"provided does not match the \"\n \"length of your z matrix \")\n\n\ndef create_annotated_heatmap(z, x=None, y=None, annotation_text=None,\n colorscale='RdBu', font_colors=None,\n showscale=False, reversescale=False,\n **kwargs):\n \"\"\"\n BETA function that creates annotated heatmaps\n\n This function adds annotations to each cell of the heatmap.\n\n :param (list[list]|ndarray) z: z matrix to create heatmap.\n :param (list) x: x axis labels.\n :param (list) y: y axis labels.\n :param (list[list]|ndarray) annotation_text: Text strings for\n annotations. Should have the same dimensions as the z matrix. If no\n text is added, the values of the z matrix are annotated. Default =\n z matrix values.\n :param (list|str) colorscale: heatmap colorscale.\n :param (list) font_colors: List of two color strings: [min_text_color,\n max_text_color] where min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. If font_colors is not\n defined, the colors are defined logically as black or white\n depending on the heatmap's colorscale.\n :param (bool) showscale: Display colorscale. Default = False\n :param (bool) reversescale: Reverse colorscale. Default = False\n :param kwargs: kwargs passed through plotly.graph_objs.Heatmap.\n These kwargs describe other attributes about the annotated Heatmap\n trace such as the colorscale. For more information on valid kwargs\n call help(plotly.graph_objs.Heatmap)\n\n Example 1: Simple annotated heatmap with default configuration\n ```\n import plotly.plotly as py\n import plotly.figure_factory as FF\n\n z = [[0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.4300],\n [0.300000, 0.00000, 0.65, 0.300000],\n [1, 0.100005, 0.45, 0.00000]]\n\n figure = FF.create_annotated_heatmap(z)\n py.iplot(figure)\n ```\n \"\"\"\n\n # Avoiding mutables in the call signature\n font_colors = font_colors if font_colors is not None else []\n validate_annotated_heatmap(z, x, y, annotation_text)\n\n # validate colorscale\n colorscale_validator = ColorscaleValidator()\n colorscale = colorscale_validator.validate_coerce(colorscale)\n\n annotations = _AnnotatedHeatmap(z, x, y, annotation_text,\n colorscale, font_colors, reversescale,\n **kwargs).make_annotations()\n\n if x or y:\n trace = dict(type='heatmap', z=z, x=x, y=y, colorscale=colorscale,\n showscale=showscale, reversescale=reversescale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', dtick=1, side='top',\n gridcolor='rgb(0, 0, 0)'),\n yaxis=dict(ticks='', dtick=1, ticksuffix=' '))\n else:\n trace = dict(type='heatmap', z=z, colorscale=colorscale,\n showscale=showscale, reversescale=reversescale, **kwargs)\n layout = dict(annotations=annotations,\n xaxis=dict(ticks='', side='top',\n gridcolor='rgb(0, 0, 0)',\n showticklabels=False),\n yaxis=dict(ticks='', ticksuffix=' ',\n showticklabels=False))\n\n data = [trace]\n\n return graph_objs.Figure(data=data, layout=layout)\n\n\ndef to_rgb_color_list(color_str, default):\n if 'rgb' in color_str:\n return [int(v) for v in color_str.strip('rgb()').split(',')]\n elif '#' in color_str:\n return clrs.hex_to_rgb(color_str)\n else:\n return default\n\n\ndef should_use_black_text(background_color):\n return (background_color[0] * 0.299 +\n background_color[1] * 0.587 +\n background_color[2] * 0.114) > 186\n\n\nclass _AnnotatedHeatmap(object):\n \"\"\"\n Refer to TraceFactory.create_annotated_heatmap() for docstring\n \"\"\"\n def __init__(self, z, x, y, annotation_text, colorscale,\n font_colors, reversescale, **kwargs):\n\n self.z = z\n if x:\n self.x = x\n else:\n self.x = range(len(z[0]))\n if y:\n self.y = y\n else:\n self.y = range(len(z))\n if annotation_text is not None:\n self.annotation_text = annotation_text\n else:\n self.annotation_text = self.z\n self.colorscale = colorscale\n self.reversescale = reversescale\n self.font_colors = font_colors\n\n def get_text_color(self):\n \"\"\"\n Get font color for annotations.\n\n The annotated heatmap can feature two text colors: min_text_color and\n max_text_color. The min_text_color is applied to annotations for\n heatmap values < (max_value - min_value)/2. The user can define these\n two colors. Otherwise the colors are defined logically as black or\n white depending on the heatmap's colorscale.\n\n :rtype (string, string) min_text_color, max_text_color: text\n color for annotations for heatmap values <\n (max_value - min_value)/2 and text color for annotations for\n heatmap values >= (max_value - min_value)/2\n \"\"\"\n # Plotly colorscales ranging from a lighter shade to a darker shade\n colorscales = ['Greys', 'Greens', 'Blues',\n 'YIGnBu', 'YIOrRd', 'RdBu',\n 'Picnic', 'Jet', 'Hot', 'Blackbody',\n 'Earth', 'Electric', 'Viridis', 'Cividis']\n # Plotly colorscales ranging from a darker shade to a lighter shade\n colorscales_reverse = ['Reds']\n\n white = '#FFFFFF'\n black = '#000000'\n if self.font_colors:\n min_text_color = self.font_colors[0]\n max_text_color = self.font_colors[-1]\n elif self.colorscale in colorscales and self.reversescale:\n min_text_color = black\n max_text_color = white\n elif self.colorscale in colorscales:\n min_text_color = white\n max_text_color = black\n elif self.colorscale in colorscales_reverse and self.reversescale:\n min_text_color = white\n max_text_color = black\n elif self.colorscale in colorscales_reverse:\n min_text_color = black\n max_text_color = white\n elif isinstance(self.colorscale, list):\n\n min_col = to_rgb_color_list(self.colorscale[0][1],\n [255, 255, 255])\n max_col = to_rgb_color_list(self.colorscale[-1][1],\n [255, 255, 255])\n\n # swap min/max colors if reverse scale\n if self.reversescale:\n min_col, max_col = max_col, min_col\n\n if should_use_black_text(min_col):\n min_text_color = black\n else:\n min_text_color = white\n\n if should_use_black_text(max_col):\n max_text_color = black\n else:\n max_text_color = white\n else:\n min_text_color = black\n max_text_color = black\n return min_text_color, max_text_color\n\n def get_z_mid(self):\n \"\"\"\n Get the mid value of z matrix\n\n :rtype (float) z_avg: average val from z matrix\n \"\"\"\n if np and isinstance(self.z, np.ndarray):\n z_min = np.amin(self.z)\n z_max = np.amax(self.z)\n else:\n z_min = min(min(self.z))\n z_max = max(max(self.z))\n z_mid = (z_max+z_min) / 2\n return z_mid\n\n def make_annotations(self):\n \"\"\"\n Get annotations for each cell of the heatmap with graph_objs.Annotation\n\n :rtype (list[dict]) annotations: list of annotations for each cell of\n the heatmap\n \"\"\"\n min_text_color, max_text_color = _AnnotatedHeatmap.get_text_color(self)\n z_mid = _AnnotatedHeatmap.get_z_mid(self)\n annotations = []\n for n, row in enumerate(self.z):\n for m, val in enumerate(row):\n font_color = min_text_color if val < z_mid else max_text_color\n annotations.append(\n graph_objs.layout.Annotation(\n text=str(self.annotation_text[n][m]),\n x=self.x[m],\n y=self.y[n],\n xref='x1',\n yref='y1',\n font=dict(color=font_color),\n showarrow=False))\n return annotations\n", "path": "plotly/figure_factory/_annotated_heatmap.py"}]} | 3,682 | 219 |
gh_patches_debug_31065 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-184 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Feature: count async methods as methods inside classes
Here we count how many methods the class has: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/counts.py#L89
After that we can detect its complexity.
But, currently we only count regular `def`s. We need to count `async def`s as well.
What need to be done?
1. We need to create a new node handler is the visitor: `visit_any_function`
2. We need to alias `visit_AsyncFunctionDef`, `visit_FunctionDef` to `visit_any_function`
3. We need to add new unit tests for `async` methods: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_complexity/test_counts/test_method_counts.py
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/complexity/counts.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from collections import defaultdict
5 from typing import DefaultDict, Union
6
7 from wemake_python_styleguide.logics.functions import is_method
8 from wemake_python_styleguide.types import AnyImport
9 from wemake_python_styleguide.violations.complexity import (
10 TooManyConditionsViolation,
11 TooManyImportsViolation,
12 TooManyMethodsViolation,
13 TooManyModuleMembersViolation,
14 )
15 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
16 from wemake_python_styleguide.visitors.decorators import alias
17
18 ConditionNodes = Union[ast.If, ast.While, ast.IfExp]
19 ModuleMembers = Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef]
20
21
22 @alias('visit_module_members', (
23 'visit_ClassDef',
24 'visit_AsyncFunctionDef',
25 'visit_FunctionDef',
26 ))
27 class ModuleMembersVisitor(BaseNodeVisitor):
28 """Counts classes and functions in a module."""
29
30 def __init__(self, *args, **kwargs) -> None:
31 """Creates a counter for tracked metrics."""
32 super().__init__(*args, **kwargs)
33 self._public_items_count = 0
34
35 def _check_members_count(self, node: ModuleMembers) -> None:
36 """This method increases the number of module members."""
37 parent = getattr(node, 'parent', None)
38 is_real_method = is_method(getattr(node, 'function_type', None))
39
40 if isinstance(parent, ast.Module) and not is_real_method:
41 self._public_items_count += 1
42
43 def _post_visit(self) -> None:
44 if self._public_items_count > self.options.max_module_members:
45 self.add_violation(TooManyModuleMembersViolation())
46
47 def visit_module_members(self, node: ModuleMembers) -> None:
48 """
49 Counts the number of ModuleMembers in a single module.
50
51 Raises:
52 TooManyModuleMembersViolation
53
54 """
55 self._check_members_count(node)
56 self.generic_visit(node)
57
58
59 @alias('visit_any_import', (
60 'visit_ImportFrom',
61 'visit_Import',
62 ))
63 class ImportMembersVisitor(BaseNodeVisitor):
64 """Counts imports in a module."""
65
66 def __init__(self, *args, **kwargs) -> None:
67 """Creates a counter for tracked metrics."""
68 super().__init__(*args, **kwargs)
69 self._imports_count = 0
70
71 def _post_visit(self) -> None:
72 if self._imports_count > self.options.max_imports:
73 self.add_violation(
74 TooManyImportsViolation(text=str(self._imports_count)),
75 )
76
77 def visit_any_import(self, node: AnyImport) -> None:
78 """
79 Counts the number of ``import`` and ``from ... import ...``.
80
81 Raises:
82 TooManyImportsViolation
83
84 """
85 self._imports_count += 1
86 self.generic_visit(node)
87
88
89 class MethodMembersVisitor(BaseNodeVisitor):
90 """Counts methods in a single class."""
91
92 def __init__(self, *args, **kwargs) -> None:
93 """Creates a counter for tracked methods in different classes."""
94 super().__init__(*args, **kwargs)
95 self._methods: DefaultDict[ast.ClassDef, int] = defaultdict(int)
96
97 def _check_method(self, node: ast.FunctionDef) -> None:
98 parent = getattr(node, 'parent', None)
99 if isinstance(parent, ast.ClassDef):
100 self._methods[parent] += 1
101
102 def _post_visit(self) -> None:
103 for node, count in self._methods.items():
104 if count > self.options.max_methods:
105 self.add_violation(TooManyMethodsViolation(text=node.name))
106
107 def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
108 """
109 Counts the number of methods in a single class.
110
111 Raises:
112 TooManyMethodsViolation
113
114 """
115 self._check_method(node)
116 self.generic_visit(node)
117
118
119 @alias('visit_condition', (
120 'visit_While',
121 'visit_IfExp',
122 'visit_If',
123 ))
124 class ConditionsVisitor(BaseNodeVisitor):
125 """Checks ``if`` and ``while`` statements for condition counts."""
126
127 def __init__(self, *args, **kwargs) -> None:
128 """Creates a counter for tracked conditions."""
129 super().__init__(*args, **kwargs)
130 self._conditions: DefaultDict[ast.AST, int] = defaultdict(int)
131
132 def _check_conditions(self, node: ast.AST) -> None:
133 for condition in ast.walk(node):
134 if isinstance(condition, (ast.And, ast.Or)):
135 self._conditions[node] += 1
136
137 def _post_visit(self) -> None:
138 for node, count in self._conditions.items():
139 if count > self.options.max_conditions - 1:
140 self.add_violation(
141 TooManyConditionsViolation(node, text=str(count)),
142 )
143
144 def visit_comprehension(self, node: ast.comprehension) -> None:
145 """
146 Counts the number of conditions in list comprehensions.
147
148 Raises:
149 TooManyConditionsViolation
150
151 """
152 if node.ifs:
153 # We only check the first `if`, since it is forbidden
154 # to have more than one at a time
155 # by `MultipleIfsInComprehensionViolation`
156 self._check_conditions(node.ifs[0])
157 self.generic_visit(node)
158
159 def visit_condition(self, node: ConditionNodes) -> None:
160 """
161 Counts the number of conditions.
162
163 Raises:
164 TooManyConditionsViolation
165
166 """
167 self._check_conditions(node.test)
168 self.generic_visit(node)
169
[end of wemake_python_styleguide/visitors/ast/complexity/counts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/complexity/counts.py b/wemake_python_styleguide/visitors/ast/complexity/counts.py
--- a/wemake_python_styleguide/visitors/ast/complexity/counts.py
+++ b/wemake_python_styleguide/visitors/ast/complexity/counts.py
@@ -17,6 +17,7 @@
ConditionNodes = Union[ast.If, ast.While, ast.IfExp]
ModuleMembers = Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef]
+MethodMembers = Union[ast.FunctionDef, ast.AsyncFunctionDef]
@alias('visit_module_members', (
@@ -86,6 +87,10 @@
self.generic_visit(node)
+@alias('visit_any_function', (
+ 'visit_FunctionDef',
+ 'visit_AsyncFunctionDef',
+))
class MethodMembersVisitor(BaseNodeVisitor):
"""Counts methods in a single class."""
@@ -94,7 +99,7 @@
super().__init__(*args, **kwargs)
self._methods: DefaultDict[ast.ClassDef, int] = defaultdict(int)
- def _check_method(self, node: ast.FunctionDef) -> None:
+ def _check_method(self, node: MethodMembers) -> None:
parent = getattr(node, 'parent', None)
if isinstance(parent, ast.ClassDef):
self._methods[parent] += 1
@@ -104,7 +109,7 @@
if count > self.options.max_methods:
self.add_violation(TooManyMethodsViolation(text=node.name))
- def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
+ def visit_any_function(self, node: MethodMembers) -> None:
"""
Counts the number of methods in a single class.
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/complexity/counts.py b/wemake_python_styleguide/visitors/ast/complexity/counts.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/counts.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/counts.py\n@@ -17,6 +17,7 @@\n \n ConditionNodes = Union[ast.If, ast.While, ast.IfExp]\n ModuleMembers = Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef]\n+MethodMembers = Union[ast.FunctionDef, ast.AsyncFunctionDef]\n \n \n @alias('visit_module_members', (\n@@ -86,6 +87,10 @@\n self.generic_visit(node)\n \n \n+@alias('visit_any_function', (\n+ 'visit_FunctionDef',\n+ 'visit_AsyncFunctionDef',\n+))\n class MethodMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts methods in a single class.\"\"\"\n \n@@ -94,7 +99,7 @@\n super().__init__(*args, **kwargs)\n self._methods: DefaultDict[ast.ClassDef, int] = defaultdict(int)\n \n- def _check_method(self, node: ast.FunctionDef) -> None:\n+ def _check_method(self, node: MethodMembers) -> None:\n parent = getattr(node, 'parent', None)\n if isinstance(parent, ast.ClassDef):\n self._methods[parent] += 1\n@@ -104,7 +109,7 @@\n if count > self.options.max_methods:\n self.add_violation(TooManyMethodsViolation(text=node.name))\n \n- def visit_FunctionDef(self, node: ast.FunctionDef) -> None:\n+ def visit_any_function(self, node: MethodMembers) -> None:\n \"\"\"\n Counts the number of methods in a single class.\n", "issue": "Feature: count async methods as methods inside classes\nHere we count how many methods the class has: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/counts.py#L89\r\n\r\nAfter that we can detect its complexity.\r\nBut, currently we only count regular `def`s. We need to count `async def`s as well.\r\n\r\nWhat need to be done?\r\n1. We need to create a new node handler is the visitor: `visit_any_function`\r\n2. We need to alias `visit_AsyncFunctionDef`, `visit_FunctionDef` to `visit_any_function`\r\n3. We need to add new unit tests for `async` methods: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_complexity/test_counts/test_method_counts.py\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import DefaultDict, Union\n\nfrom wemake_python_styleguide.logics.functions import is_method\nfrom wemake_python_styleguide.types import AnyImport\nfrom wemake_python_styleguide.violations.complexity import (\n TooManyConditionsViolation,\n TooManyImportsViolation,\n TooManyMethodsViolation,\n TooManyModuleMembersViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\nfrom wemake_python_styleguide.visitors.decorators import alias\n\nConditionNodes = Union[ast.If, ast.While, ast.IfExp]\nModuleMembers = Union[ast.AsyncFunctionDef, ast.FunctionDef, ast.ClassDef]\n\n\n@alias('visit_module_members', (\n 'visit_ClassDef',\n 'visit_AsyncFunctionDef',\n 'visit_FunctionDef',\n))\nclass ModuleMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts classes and functions in a module.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._public_items_count = 0\n\n def _check_members_count(self, node: ModuleMembers) -> None:\n \"\"\"This method increases the number of module members.\"\"\"\n parent = getattr(node, 'parent', None)\n is_real_method = is_method(getattr(node, 'function_type', None))\n\n if isinstance(parent, ast.Module) and not is_real_method:\n self._public_items_count += 1\n\n def _post_visit(self) -> None:\n if self._public_items_count > self.options.max_module_members:\n self.add_violation(TooManyModuleMembersViolation())\n\n def visit_module_members(self, node: ModuleMembers) -> None:\n \"\"\"\n Counts the number of ModuleMembers in a single module.\n\n Raises:\n TooManyModuleMembersViolation\n\n \"\"\"\n self._check_members_count(node)\n self.generic_visit(node)\n\n\n@alias('visit_any_import', (\n 'visit_ImportFrom',\n 'visit_Import',\n))\nclass ImportMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts imports in a module.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked metrics.\"\"\"\n super().__init__(*args, **kwargs)\n self._imports_count = 0\n\n def _post_visit(self) -> None:\n if self._imports_count > self.options.max_imports:\n self.add_violation(\n TooManyImportsViolation(text=str(self._imports_count)),\n )\n\n def visit_any_import(self, node: AnyImport) -> None:\n \"\"\"\n Counts the number of ``import`` and ``from ... import ...``.\n\n Raises:\n TooManyImportsViolation\n\n \"\"\"\n self._imports_count += 1\n self.generic_visit(node)\n\n\nclass MethodMembersVisitor(BaseNodeVisitor):\n \"\"\"Counts methods in a single class.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked methods in different classes.\"\"\"\n super().__init__(*args, **kwargs)\n self._methods: DefaultDict[ast.ClassDef, int] = defaultdict(int)\n\n def _check_method(self, node: ast.FunctionDef) -> None:\n parent = getattr(node, 'parent', None)\n if isinstance(parent, ast.ClassDef):\n self._methods[parent] += 1\n\n def _post_visit(self) -> None:\n for node, count in self._methods.items():\n if count > self.options.max_methods:\n self.add_violation(TooManyMethodsViolation(text=node.name))\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> None:\n \"\"\"\n Counts the number of methods in a single class.\n\n Raises:\n TooManyMethodsViolation\n\n \"\"\"\n self._check_method(node)\n self.generic_visit(node)\n\n\n@alias('visit_condition', (\n 'visit_While',\n 'visit_IfExp',\n 'visit_If',\n))\nclass ConditionsVisitor(BaseNodeVisitor):\n \"\"\"Checks ``if`` and ``while`` statements for condition counts.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Creates a counter for tracked conditions.\"\"\"\n super().__init__(*args, **kwargs)\n self._conditions: DefaultDict[ast.AST, int] = defaultdict(int)\n\n def _check_conditions(self, node: ast.AST) -> None:\n for condition in ast.walk(node):\n if isinstance(condition, (ast.And, ast.Or)):\n self._conditions[node] += 1\n\n def _post_visit(self) -> None:\n for node, count in self._conditions.items():\n if count > self.options.max_conditions - 1:\n self.add_violation(\n TooManyConditionsViolation(node, text=str(count)),\n )\n\n def visit_comprehension(self, node: ast.comprehension) -> None:\n \"\"\"\n Counts the number of conditions in list comprehensions.\n\n Raises:\n TooManyConditionsViolation\n\n \"\"\"\n if node.ifs:\n # We only check the first `if`, since it is forbidden\n # to have more than one at a time\n # by `MultipleIfsInComprehensionViolation`\n self._check_conditions(node.ifs[0])\n self.generic_visit(node)\n\n def visit_condition(self, node: ConditionNodes) -> None:\n \"\"\"\n Counts the number of conditions.\n\n Raises:\n TooManyConditionsViolation\n\n \"\"\"\n self._check_conditions(node.test)\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/counts.py"}]} | 2,352 | 404 |
gh_patches_debug_65364 | rasdani/github-patches | git_diff | urllib3__urllib3-526 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
assert_hostname=False seems to be ignored in 1.10
I have some code that is relying on assert_hostname=False to work.
I upgrade urllib3 to version 1.10 and the code fails with SSLError: hostname 'remote-host' doesn't match 'localhost'
I haven't looked through the code to try to determine why this happens.
Did anyone else notice this? If so, what is the fix.
Essentially I am creating a connection pool like this:
``` python
import urllib3
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
ca_certs="cert.pem",
assert_hostname=False)
try:
r = http.request('GET', 'https://remote-host:4443/')
print("Certificate verification NO HOSTNAME successful")
except urllib3.exceptions.SSLError as e:
print ("SSL Error:", e)
return -1
return 0
```
</issue>
<code>
[start of urllib3/util/ssl_.py]
1 from binascii import hexlify, unhexlify
2 from hashlib import md5, sha1
3
4 from ..exceptions import SSLError
5
6
7 SSLContext = None
8 HAS_SNI = False
9 create_default_context = None
10
11 import errno
12 import ssl
13
14 try: # Test for SSL features
15 from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
16 from ssl import HAS_SNI # Has SNI?
17 except ImportError:
18 pass
19
20
21 try:
22 from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
23 except ImportError:
24 OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
25 OP_NO_COMPRESSION = 0x20000
26
27 try:
28 from ssl import _DEFAULT_CIPHERS
29 except ImportError:
30 _DEFAULT_CIPHERS = (
31 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
32 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
33 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
34 )
35
36 try:
37 from ssl import SSLContext # Modern SSL?
38 except ImportError:
39 import sys
40
41 class SSLContext(object): # Platform-specific: Python 2 & 3.1
42 supports_set_ciphers = sys.version_info >= (2, 7)
43
44 def __init__(self, protocol_version):
45 self.protocol = protocol_version
46 # Use default values from a real SSLContext
47 self.check_hostname = False
48 self.verify_mode = ssl.CERT_NONE
49 self.ca_certs = None
50 self.options = 0
51 self.certfile = None
52 self.keyfile = None
53 self.ciphers = None
54
55 def load_cert_chain(self, certfile, keyfile):
56 self.certfile = certfile
57 self.keyfile = keyfile
58
59 def load_verify_locations(self, location):
60 self.ca_certs = location
61
62 def set_ciphers(self, cipher_suite):
63 if not self.supports_set_ciphers:
64 raise TypeError(
65 'Your version of Python does not support setting '
66 'a custom cipher suite. Please upgrade to Python '
67 '2.7, 3.2, or later if you need this functionality.'
68 )
69 self.ciphers = cipher_suite
70
71 def wrap_socket(self, socket, server_hostname=None):
72 kwargs = {
73 'keyfile': self.keyfile,
74 'certfile': self.certfile,
75 'ca_certs': self.ca_certs,
76 'cert_reqs': self.verify_mode,
77 'ssl_version': self.protocol,
78 }
79 if self.supports_set_ciphers: # Platform-specific: Python 2.7+
80 return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
81 else: # Platform-specific: Python 2.6
82 return wrap_socket(socket, **kwargs)
83
84
85 def assert_fingerprint(cert, fingerprint):
86 """
87 Checks if given fingerprint matches the supplied certificate.
88
89 :param cert:
90 Certificate as bytes object.
91 :param fingerprint:
92 Fingerprint as string of hexdigits, can be interspersed by colons.
93 """
94
95 # Maps the length of a digest to a possible hash function producing
96 # this digest.
97 hashfunc_map = {
98 16: md5,
99 20: sha1
100 }
101
102 fingerprint = fingerprint.replace(':', '').lower()
103 digest_length, odd = divmod(len(fingerprint), 2)
104
105 if odd or digest_length not in hashfunc_map:
106 raise SSLError('Fingerprint is of invalid length.')
107
108 # We need encode() here for py32; works on py2 and p33.
109 fingerprint_bytes = unhexlify(fingerprint.encode())
110
111 hashfunc = hashfunc_map[digest_length]
112
113 cert_digest = hashfunc(cert).digest()
114
115 if not cert_digest == fingerprint_bytes:
116 raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
117 .format(hexlify(fingerprint_bytes),
118 hexlify(cert_digest)))
119
120
121 def resolve_cert_reqs(candidate):
122 """
123 Resolves the argument to a numeric constant, which can be passed to
124 the wrap_socket function/method from the ssl module.
125 Defaults to :data:`ssl.CERT_NONE`.
126 If given a string it is assumed to be the name of the constant in the
127 :mod:`ssl` module or its abbrevation.
128 (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
129 If it's neither `None` nor a string we assume it is already the numeric
130 constant which can directly be passed to wrap_socket.
131 """
132 if candidate is None:
133 return CERT_NONE
134
135 if isinstance(candidate, str):
136 res = getattr(ssl, candidate, None)
137 if res is None:
138 res = getattr(ssl, 'CERT_' + candidate)
139 return res
140
141 return candidate
142
143
144 def resolve_ssl_version(candidate):
145 """
146 like resolve_cert_reqs
147 """
148 if candidate is None:
149 return PROTOCOL_SSLv23
150
151 if isinstance(candidate, str):
152 res = getattr(ssl, candidate, None)
153 if res is None:
154 res = getattr(ssl, 'PROTOCOL_' + candidate)
155 return res
156
157 return candidate
158
159
160 def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
161 options=None, ciphers=None):
162 """All arguments have the same meaning as ``ssl_wrap_socket``.
163
164 By default, this function does a lot of the same work that
165 ``ssl.create_default_context`` does on Python 3.4+. It:
166
167 - Disables SSLv2, SSLv3, and compression
168 - Sets a restricted set of server ciphers
169
170 If you wish to enable SSLv3, you can do::
171
172 from urllib3.util import ssl_
173 context = ssl_.create_urllib3_context()
174 context.options &= ~ssl_.OP_NO_SSLv3
175
176 You can do the same to enable compression (substituting ``COMPRESSION``
177 for ``SSLv3`` in the last line above).
178
179 :param ssl_version:
180 The desired protocol version to use. This will default to
181 PROTOCOL_SSLv23 which will negotiate the highest protocol that both
182 the server and your installation of OpenSSL support.
183 :param cert_reqs:
184 Whether to require the certificate verification. This defaults to
185 ``ssl.CERT_REQUIRED``.
186 :param options:
187 Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
188 ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
189 :param ciphers:
190 Which cipher suites to allow the server to select.
191 :returns:
192 Constructed SSLContext object with specified options
193 :rtype: SSLContext
194 """
195 context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
196
197 if options is None:
198 options = 0
199 # SSLv2 is easily broken and is considered harmful and dangerous
200 options |= OP_NO_SSLv2
201 # SSLv3 has several problems and is now dangerous
202 options |= OP_NO_SSLv3
203 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
204 # (issue #309)
205 options |= OP_NO_COMPRESSION
206
207 context.options |= options
208
209 if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
210 context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
211
212 context.verify_mode = cert_reqs
213 if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
214 context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
215 return context
216
217
218 def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
219 ca_certs=None, server_hostname=None,
220 ssl_version=None, ciphers=None, ssl_context=None):
221 """
222 All arguments except for server_hostname and ssl_context have the same
223 meaning as they do when using :func:`ssl.wrap_socket`.
224
225 :param server_hostname:
226 When SNI is supported, the expected hostname of the certificate
227 :param ssl_context:
228 A pre-made :class:`SSLContext` object. If none is provided, one will
229 be created using :func:`create_urllib3_context`.
230 :param ciphers:
231 A string of ciphers we wish the client to support. This is not
232 supported on Python 2.6 as the ssl module does not support it.
233 """
234 context = ssl_context
235 if context is None:
236 context = create_urllib3_context(ssl_version, cert_reqs,
237 ciphers=ciphers)
238
239 if ca_certs:
240 try:
241 context.load_verify_locations(ca_certs)
242 except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
243 raise SSLError(e)
244 # Py33 raises FileNotFoundError which subclasses OSError
245 # These are not equivalent unless we check the errno attribute
246 except OSError as e: # Platform-specific: Python 3.3 and beyond
247 if e.errno == errno.ENOENT:
248 raise SSLError(e)
249 raise
250 if certfile:
251 context.load_cert_chain(certfile, keyfile)
252 if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
253 return context.wrap_socket(sock, server_hostname=server_hostname)
254 return context.wrap_socket(sock)
255
[end of urllib3/util/ssl_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py
--- a/urllib3/util/ssl_.py
+++ b/urllib3/util/ssl_.py
@@ -211,7 +211,9 @@
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
return context
| {"golden_diff": "diff --git a/urllib3/util/ssl_.py b/urllib3/util/ssl_.py\n--- a/urllib3/util/ssl_.py\n+++ b/urllib3/util/ssl_.py\n@@ -211,7 +211,9 @@\n \n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2\n- context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)\n+ # We do our own verification, including fingerprints and alternative\n+ # hostnames. So disable it here\n+ context.check_hostname = False\n return context\n", "issue": "assert_hostname=False seems to be ignored in 1.10\nI have some code that is relying on assert_hostname=False to work.\nI upgrade urllib3 to version 1.10 and the code fails with SSLError: hostname 'remote-host' doesn't match 'localhost'\n\nI haven't looked through the code to try to determine why this happens.\n\nDid anyone else notice this? If so, what is the fix.\n\nEssentially I am creating a connection pool like this:\n\n``` python\nimport urllib3\n\nhttp = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',\n ca_certs=\"cert.pem\",\n assert_hostname=False)\ntry:\n r = http.request('GET', 'https://remote-host:4443/')\n print(\"Certificate verification NO HOSTNAME successful\")\n\nexcept urllib3.exceptions.SSLError as e:\n print (\"SSL Error:\", e)\n return -1\n\nreturn 0\n```\n\n", "before_files": [{"content": "from binascii import hexlify, unhexlify\nfrom hashlib import md5, sha1\n\nfrom ..exceptions import SSLError\n\n\nSSLContext = None\nHAS_SNI = False\ncreate_default_context = None\n\nimport errno\nimport ssl\n\ntry: # Test for SSL features\n from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23\n from ssl import HAS_SNI # Has SNI?\nexcept ImportError:\n pass\n\n\ntry:\n from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION\nexcept ImportError:\n OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000\n OP_NO_COMPRESSION = 0x20000\n\ntry:\n from ssl import _DEFAULT_CIPHERS\nexcept ImportError:\n _DEFAULT_CIPHERS = (\n 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'\n 'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'\n 'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'\n )\n\ntry:\n from ssl import SSLContext # Modern SSL?\nexcept ImportError:\n import sys\n\n class SSLContext(object): # Platform-specific: Python 2 & 3.1\n supports_set_ciphers = sys.version_info >= (2, 7)\n\n def __init__(self, protocol_version):\n self.protocol = protocol_version\n # Use default values from a real SSLContext\n self.check_hostname = False\n self.verify_mode = ssl.CERT_NONE\n self.ca_certs = None\n self.options = 0\n self.certfile = None\n self.keyfile = None\n self.ciphers = None\n\n def load_cert_chain(self, certfile, keyfile):\n self.certfile = certfile\n self.keyfile = keyfile\n\n def load_verify_locations(self, location):\n self.ca_certs = location\n\n def set_ciphers(self, cipher_suite):\n if not self.supports_set_ciphers:\n raise TypeError(\n 'Your version of Python does not support setting '\n 'a custom cipher suite. Please upgrade to Python '\n '2.7, 3.2, or later if you need this functionality.'\n )\n self.ciphers = cipher_suite\n\n def wrap_socket(self, socket, server_hostname=None):\n kwargs = {\n 'keyfile': self.keyfile,\n 'certfile': self.certfile,\n 'ca_certs': self.ca_certs,\n 'cert_reqs': self.verify_mode,\n 'ssl_version': self.protocol,\n }\n if self.supports_set_ciphers: # Platform-specific: Python 2.7+\n return wrap_socket(socket, ciphers=self.ciphers, **kwargs)\n else: # Platform-specific: Python 2.6\n return wrap_socket(socket, **kwargs)\n\n\ndef assert_fingerprint(cert, fingerprint):\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n # Maps the length of a digest to a possible hash function producing\n # this digest.\n hashfunc_map = {\n 16: md5,\n 20: sha1\n }\n\n fingerprint = fingerprint.replace(':', '').lower()\n digest_length, odd = divmod(len(fingerprint), 2)\n\n if odd or digest_length not in hashfunc_map:\n raise SSLError('Fingerprint is of invalid length.')\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n hashfunc = hashfunc_map[digest_length]\n\n cert_digest = hashfunc(cert).digest()\n\n if not cert_digest == fingerprint_bytes:\n raise SSLError('Fingerprints did not match. Expected \"{0}\", got \"{1}\".'\n .format(hexlify(fingerprint_bytes),\n hexlify(cert_digest)))\n\n\ndef resolve_cert_reqs(candidate):\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_NONE`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbrevation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_NONE\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'CERT_' + candidate)\n return res\n\n return candidate\n\n\ndef resolve_ssl_version(candidate):\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_SSLv23\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, 'PROTOCOL_' + candidate)\n return res\n\n return candidate\n\n\ndef create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,\n options=None, ciphers=None):\n \"\"\"All arguments have the same meaning as ``ssl_wrap_socket``.\n\n By default, this function does a lot of the same work that\n ``ssl.create_default_context`` does on Python 3.4+. It:\n\n - Disables SSLv2, SSLv3, and compression\n - Sets a restricted set of server ciphers\n\n If you wish to enable SSLv3, you can do::\n\n from urllib3.util import ssl_\n context = ssl_.create_urllib3_context()\n context.options &= ~ssl_.OP_NO_SSLv3\n\n You can do the same to enable compression (substituting ``COMPRESSION``\n for ``SSLv3`` in the last line above).\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.\n :param ciphers:\n Which cipher suites to allow the server to select.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n\n context.options |= options\n\n if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6\n context.set_ciphers(ciphers or _DEFAULT_CIPHERS)\n\n context.verify_mode = cert_reqs\n if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2\n context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)\n return context\n\n\ndef ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,\n ca_certs=None, server_hostname=None,\n ssl_version=None, ciphers=None, ssl_context=None):\n \"\"\"\n All arguments except for server_hostname and ssl_context have the same\n meaning as they do when using :func:`ssl.wrap_socket`.\n\n :param server_hostname:\n When SNI is supported, the expected hostname of the certificate\n :param ssl_context:\n A pre-made :class:`SSLContext` object. If none is provided, one will\n be created using :func:`create_urllib3_context`.\n :param ciphers:\n A string of ciphers we wish the client to support. This is not\n supported on Python 2.6 as the ssl module does not support it.\n \"\"\"\n context = ssl_context\n if context is None:\n context = create_urllib3_context(ssl_version, cert_reqs,\n ciphers=ciphers)\n\n if ca_certs:\n try:\n context.load_verify_locations(ca_certs)\n except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2\n raise SSLError(e)\n # Py33 raises FileNotFoundError which subclasses OSError\n # These are not equivalent unless we check the errno attribute\n except OSError as e: # Platform-specific: Python 3.3 and beyond\n if e.errno == errno.ENOENT:\n raise SSLError(e)\n raise\n if certfile:\n context.load_cert_chain(certfile, keyfile)\n if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI\n return context.wrap_socket(sock, server_hostname=server_hostname)\n return context.wrap_socket(sock)\n", "path": "urllib3/util/ssl_.py"}]} | 3,534 | 151 |
gh_patches_debug_6923 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-2831 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't package pyusb, 'Unable to find "libusb-1.0.so.0"'
To reproduce
1. `virtualenv env`
`source env/bin/activate`
`pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip`
`pip install pyusb`
`pip freeze > requirements.txt`
2. Create simple python file which imports pyusb
test.py
```
import usb.core
print 'Success!
```
3. Run the python file normally
```
$ python test.py
Success!
```
4. Package it with pyinstaller
```
pyinstaller --log-level=DEBUG test.py &> log.txt
```
log.txt
```
14 INFO: PyInstaller: 3.3.dev0+5fd013bfe
14 INFO: Python: 2.7.12+
14 INFO: Platform: Linux-4.8.0-53-generic-x86_64-with-Ubuntu-16.10-yakkety
14 INFO: wrote /home/joel/Code/random/pyinstaller/test.spec
15 DEBUG: Testing for UPX ...
17 INFO: UPX is not available.
17 DEBUG: script: /home/joel/Code/random/pyinstaller/test.py
17 INFO: Extending PYTHONPATH with paths
['/home/joel/Code/random/pyinstaller', '/home/joel/Code/random/pyinstaller']
17 INFO: checking Analysis
17 INFO: Building Analysis because out00-Analysis.toc is non existent
17 INFO: Initializing module dependency graph...
18 INFO: Initializing module graph hooks...
19 DEBUG: Hidden import: codecs
65 DEBUG: Hidden import 'codecs' already found
65 INFO: running Analysis out00-Analysis.toc
65 DEBUG: Analyzing /home/joel/Code/random/pyinstaller/env/bin/python
71 DEBUG: Skipping libdl.so.2 dependency of python
71 DEBUG: Skipping libc.so.6 dependency of python
71 DEBUG: Skipping libutil.so.1 dependency of python
71 DEBUG: Skipping libpthread.so.0 dependency of python
71 DEBUG: Skipping libm.so.6 dependency of python
72 DEBUG: Adding libz.so.1 dependency of python from /lib/x86_64-linux-gnu/libz.so.1
72 DEBUG: Analyzing /lib/x86_64-linux-gnu/libz.so.1
78 DEBUG: Skipping libc.so.6 dependency of libz.so.1
78 INFO: Caching module hooks...
81 INFO: Analyzing /home/joel/Code/random/pyinstaller/test.py
1078 INFO: Loading module hooks...
1079 INFO: Loading module hook "hook-usb.py"...
Unable to find "libusb-1.0.so.0" when adding binary and data files.
```
This causes the packaging process to halt with an empty `dist/` and `build/test/` folders.
I dug around and found out, that `hook-usb.py` reports the binaries to be `[('libusb-1.0.so.0', '')]` and I think this causes that `src_root_path_or_glob`(`utils.py`, lines 450+) to just contain 'libusb1.0.so.0', which produces just `[]`after it get's run through the `glob.glob()` and not the real path, which I think is`/lib/x86_64-linux-gnu/libusb-1.0.so.0` on my computer.
Environment:
```
Python 2.7.12+
Ubuntu 16.10
Virtualenv 15.1.0
```
requirements.txt
```
appdirs==1.4.3
packaging==16.8
PyInstaller==3.3.dev0+5fd013bfe
pyparsing==2.2.0
pyusb==1.0.0
six==1.10.0
```
Also, this seems to be somewhat related to #1682 and commit https://github.com/pyinstaller/pyinstaller/commit/23901eb1ce7e60f1aac2a73e6cb3b02f1a34b9b1
</issue>
<code>
[start of PyInstaller/hooks/hook-usb.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2017, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License with exception
5 # for distributing bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #-----------------------------------------------------------------------------
9
10 import ctypes.util
11 import os
12 import usb.core
13 import usb.backend
14
15 from PyInstaller.depend.utils import _resolveCtypesImports
16 from PyInstaller.compat import is_cygwin
17 from PyInstaller.utils.hooks import logger
18
19
20 # Include glob for library lookup in run-time hook.
21 hiddenimports = ['glob']
22
23 # https://github.com/walac/pyusb/blob/master/docs/faq.rst
24 # https://github.com/walac/pyusb/blob/master/docs/tutorial.rst
25
26 binaries=[]
27
28 # first try to use pyusb library locator
29 try:
30 # get the backend symbols before find
31 pyusb_backend_dir = set(dir(usb.backend))
32
33 # perform find, which will load a usb library if found
34 usb.core.find()
35
36 # get the backend symbols which have been added (loaded)
37 backends = set(dir(usb.backend)) - pyusb_backend_dir
38
39 # for each of the loaded backends, see if they have a library
40 binaries = []
41 for usblib in [getattr(usb.backend, be)._lib for be in backends]:
42 if usblib is not None:
43 binaries = [(usblib._name, '')]
44
45 except (ValueError, usb.core.USBError) as exc:
46 logger.warning("%s", exc)
47
48
49 # if nothing found, try to use our custom mechanism
50 if not binaries:
51 # Try to resolve your libusb libraries in the following order:
52 #
53 # libusb-1.0, libusb-0.1, openusb
54 #
55 # NOTE: Mind updating run-time hook when adding further libs.
56 libusb_candidates = (
57 # libusb10
58 'usb-1.0', 'usb', 'libusb-1.0',
59 # libusb01
60 'usb-0.1', 'libusb0',
61 # openusb
62 'openusb',
63 )
64
65 for candidate in libusb_candidates:
66 libname = ctypes.util.find_library(candidate)
67 if libname is not None:
68 break
69
70 if libname is not None:
71 # Use basename here because Python returns full library path
72 # on Mac OSX when using ctypes.util.find_library.
73 bins = [os.path.basename(libname)]
74 binaries = _resolveCtypesImports(bins)
75 elif is_cygwin:
76 bins = ['cygusb-1.0-0.dll', 'cygusb0.dll']
77 binaries = _resolveCtypesImports(bins)[:1] # use only the first one
78 else:
79 binaries = []
80
81 if binaries:
82 # `_resolveCtypesImports` returns a 3-tuple, but `binaries` are only
83 # 2-tuples, so remove the last element:
84 assert len(binaries[0]) == 3
85 binaries = [(binaries[0][1], '')]
86
[end of PyInstaller/hooks/hook-usb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/PyInstaller/hooks/hook-usb.py b/PyInstaller/hooks/hook-usb.py
--- a/PyInstaller/hooks/hook-usb.py
+++ b/PyInstaller/hooks/hook-usb.py
@@ -40,7 +40,9 @@
binaries = []
for usblib in [getattr(usb.backend, be)._lib for be in backends]:
if usblib is not None:
- binaries = [(usblib._name, '')]
+ binaries = _resolveCtypesImports([os.path.basename(usblib._name)])
+ assert len(binaries[0]) == 3
+ binaries = [(binaries[0][1], '')]
except (ValueError, usb.core.USBError) as exc:
logger.warning("%s", exc)
| {"golden_diff": "diff --git a/PyInstaller/hooks/hook-usb.py b/PyInstaller/hooks/hook-usb.py\n--- a/PyInstaller/hooks/hook-usb.py\n+++ b/PyInstaller/hooks/hook-usb.py\n@@ -40,7 +40,9 @@\n binaries = []\n for usblib in [getattr(usb.backend, be)._lib for be in backends]:\n if usblib is not None:\n- binaries = [(usblib._name, '')]\n+ binaries = _resolveCtypesImports([os.path.basename(usblib._name)])\n+ assert len(binaries[0]) == 3\n+ binaries = [(binaries[0][1], '')]\n \n except (ValueError, usb.core.USBError) as exc:\n logger.warning(\"%s\", exc)\n", "issue": "Can't package pyusb, 'Unable to find \"libusb-1.0.so.0\"'\nTo reproduce\r\n\r\n1. `virtualenv env`\r\n`source env/bin/activate`\r\n`pip install https://github.com/pyinstaller/pyinstaller/archive/develop.zip`\r\n`pip install pyusb`\r\n`pip freeze > requirements.txt`\r\n2. Create simple python file which imports pyusb\r\ntest.py\r\n```\r\nimport usb.core\r\nprint 'Success!\r\n```\r\n3. Run the python file normally\r\n```\r\n$ python test.py\r\nSuccess!\r\n```\r\n4. Package it with pyinstaller\r\n```\r\npyinstaller --log-level=DEBUG test.py &> log.txt\r\n```\r\nlog.txt\r\n```\r\n14 INFO: PyInstaller: 3.3.dev0+5fd013bfe\r\n14 INFO: Python: 2.7.12+\r\n14 INFO: Platform: Linux-4.8.0-53-generic-x86_64-with-Ubuntu-16.10-yakkety\r\n14 INFO: wrote /home/joel/Code/random/pyinstaller/test.spec\r\n15 DEBUG: Testing for UPX ...\r\n17 INFO: UPX is not available.\r\n17 DEBUG: script: /home/joel/Code/random/pyinstaller/test.py\r\n17 INFO: Extending PYTHONPATH with paths\r\n['/home/joel/Code/random/pyinstaller', '/home/joel/Code/random/pyinstaller']\r\n17 INFO: checking Analysis\r\n17 INFO: Building Analysis because out00-Analysis.toc is non existent\r\n17 INFO: Initializing module dependency graph...\r\n18 INFO: Initializing module graph hooks...\r\n19 DEBUG: Hidden import: codecs\r\n65 DEBUG: Hidden import 'codecs' already found\r\n65 INFO: running Analysis out00-Analysis.toc\r\n65 DEBUG: Analyzing /home/joel/Code/random/pyinstaller/env/bin/python\r\n71 DEBUG: Skipping libdl.so.2 dependency of python\r\n71 DEBUG: Skipping libc.so.6 dependency of python\r\n71 DEBUG: Skipping libutil.so.1 dependency of python\r\n71 DEBUG: Skipping libpthread.so.0 dependency of python\r\n71 DEBUG: Skipping libm.so.6 dependency of python\r\n72 DEBUG: Adding libz.so.1 dependency of python from /lib/x86_64-linux-gnu/libz.so.1\r\n72 DEBUG: Analyzing /lib/x86_64-linux-gnu/libz.so.1\r\n78 DEBUG: Skipping libc.so.6 dependency of libz.so.1\r\n78 INFO: Caching module hooks...\r\n81 INFO: Analyzing /home/joel/Code/random/pyinstaller/test.py\r\n1078 INFO: Loading module hooks...\r\n1079 INFO: Loading module hook \"hook-usb.py\"...\r\nUnable to find \"libusb-1.0.so.0\" when adding binary and data files.\r\n```\r\nThis causes the packaging process to halt with an empty `dist/` and `build/test/` folders.\r\n\r\nI dug around and found out, that `hook-usb.py` reports the binaries to be `[('libusb-1.0.so.0', '')]` and I think this causes that `src_root_path_or_glob`(`utils.py`, lines 450+) to just contain 'libusb1.0.so.0', which produces just `[]`after it get's run through the `glob.glob()` and not the real path, which I think is`/lib/x86_64-linux-gnu/libusb-1.0.so.0` on my computer.\r\n\r\nEnvironment:\r\n```\r\nPython 2.7.12+\r\nUbuntu 16.10\r\nVirtualenv 15.1.0\r\n```\r\nrequirements.txt\r\n```\r\nappdirs==1.4.3\r\npackaging==16.8\r\nPyInstaller==3.3.dev0+5fd013bfe\r\npyparsing==2.2.0\r\npyusb==1.0.0\r\nsix==1.10.0\r\n```\r\n\r\nAlso, this seems to be somewhat related to #1682 and commit https://github.com/pyinstaller/pyinstaller/commit/23901eb1ce7e60f1aac2a73e6cb3b02f1a34b9b1\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2017, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License with exception\n# for distributing bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport ctypes.util\nimport os\nimport usb.core\nimport usb.backend\n\nfrom PyInstaller.depend.utils import _resolveCtypesImports\nfrom PyInstaller.compat import is_cygwin\nfrom PyInstaller.utils.hooks import logger\n\n\n# Include glob for library lookup in run-time hook.\nhiddenimports = ['glob']\n\n# https://github.com/walac/pyusb/blob/master/docs/faq.rst\n# https://github.com/walac/pyusb/blob/master/docs/tutorial.rst\n\nbinaries=[]\n\n# first try to use pyusb library locator\ntry:\n # get the backend symbols before find\n pyusb_backend_dir = set(dir(usb.backend))\n\n # perform find, which will load a usb library if found\n usb.core.find()\n\n # get the backend symbols which have been added (loaded)\n backends = set(dir(usb.backend)) - pyusb_backend_dir\n\n # for each of the loaded backends, see if they have a library\n binaries = []\n for usblib in [getattr(usb.backend, be)._lib for be in backends]:\n if usblib is not None:\n binaries = [(usblib._name, '')]\n\nexcept (ValueError, usb.core.USBError) as exc:\n logger.warning(\"%s\", exc)\n\n\n# if nothing found, try to use our custom mechanism\nif not binaries:\n # Try to resolve your libusb libraries in the following order:\n #\n # libusb-1.0, libusb-0.1, openusb\n #\n # NOTE: Mind updating run-time hook when adding further libs.\n libusb_candidates = (\n # libusb10\n 'usb-1.0', 'usb', 'libusb-1.0',\n # libusb01\n 'usb-0.1', 'libusb0',\n # openusb\n 'openusb',\n )\n\n for candidate in libusb_candidates:\n libname = ctypes.util.find_library(candidate)\n if libname is not None:\n break\n\n if libname is not None:\n # Use basename here because Python returns full library path\n # on Mac OSX when using ctypes.util.find_library.\n bins = [os.path.basename(libname)]\n binaries = _resolveCtypesImports(bins)\n elif is_cygwin:\n bins = ['cygusb-1.0-0.dll', 'cygusb0.dll']\n binaries = _resolveCtypesImports(bins)[:1] # use only the first one\n else:\n binaries = []\n\n if binaries:\n # `_resolveCtypesImports` returns a 3-tuple, but `binaries` are only\n # 2-tuples, so remove the last element:\n assert len(binaries[0]) == 3\n binaries = [(binaries[0][1], '')]\n", "path": "PyInstaller/hooks/hook-usb.py"}]} | 2,318 | 176 |
gh_patches_debug_20311 | rasdani/github-patches | git_diff | mozilla__pontoon-2448 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Microsoft Terminology Service returning 500
Requests to Microsoft Terminology Service API used by Machinery and Concordance Search currently return "500 Internal Server Error". Let's fix that.
</issue>
<code>
[start of pontoon/machinery/views.py]
1 import json
2 import logging
3 import requests
4 import xml.etree.ElementTree as ET
5
6 from sacremoses import MosesDetokenizer
7 from urllib.parse import quote
8
9 from django.conf import settings
10 from django.contrib.auth.decorators import login_required
11 from django.core.paginator import EmptyPage, Paginator
12 from django.http import JsonResponse
13 from django.shortcuts import render
14 from django.template.loader import get_template
15 from django.utils.datastructures import MultiValueDictKeyError
16
17 from pontoon.base import utils
18 from pontoon.base.models import Entity, Locale, Project, Translation
19 from pontoon.machinery.utils import (
20 get_concordance_search_data,
21 get_google_translate_data,
22 get_translation_memory_data,
23 )
24
25
26 log = logging.getLogger(__name__)
27
28
29 def machinery(request):
30 locale = utils.get_project_locale_from_request(request, Locale.objects) or "en-GB"
31
32 return render(
33 request,
34 "machinery/machinery.html",
35 {
36 "locale": Locale.objects.get(code=locale),
37 "locales": Locale.objects.all(),
38 "is_google_translate_supported": bool(settings.GOOGLE_TRANSLATE_API_KEY),
39 "is_microsoft_translator_supported": bool(
40 settings.MICROSOFT_TRANSLATOR_API_KEY
41 ),
42 "is_systran_translate_supported": bool(settings.SYSTRAN_TRANSLATE_API_KEY),
43 },
44 )
45
46
47 def translation_memory(request):
48 """Get translations from internal translations memory."""
49 try:
50 text = request.GET["text"]
51 locale = Locale.objects.get(code=request.GET["locale"])
52 pk = request.GET.get("pk", None)
53
54 if pk is not None:
55 pk = int(pk)
56
57 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
58 return JsonResponse(
59 {"status": False, "message": f"Bad Request: {e}"},
60 status=400,
61 )
62
63 data = get_translation_memory_data(text, locale, pk)
64 return JsonResponse(data, safe=False)
65
66
67 def concordance_search(request):
68 """Search for translations in the internal translations memory."""
69 try:
70 text = request.GET["text"]
71 locale = Locale.objects.get(code=request.GET["locale"])
72 page_results_limit = int(request.GET.get("limit", 100))
73 page = int(request.GET.get("page", 1))
74 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
75 return JsonResponse(
76 {"status": False, "message": f"Bad Request: {e}"},
77 status=400,
78 )
79
80 paginator = Paginator(get_concordance_search_data(text, locale), page_results_limit)
81
82 try:
83 data = paginator.page(page)
84 except EmptyPage:
85 return JsonResponse({"results": [], "has_next": False})
86
87 # ArrayAgg (used in get_concordance_search_data()) does not support using
88 # distinct=True in combination with ordering, so we need to do one of them
89 # manually - after pagination, to reduce the number of rows processed.
90 projects = Project.objects.order_by("disabled", "-priority").values_list(
91 "name", flat=True
92 )
93 for r in data.object_list:
94 r["project_names"] = [p for p in projects if p in r["project_names"]]
95
96 return JsonResponse(
97 {"results": data.object_list, "has_next": data.has_next()}, safe=False
98 )
99
100
101 @login_required(redirect_field_name="", login_url="/403")
102 def microsoft_translator(request):
103 """Get translation from Microsoft machine translation service."""
104 try:
105 text = request.GET["text"]
106 locale_code = request.GET["locale"]
107
108 if not locale_code:
109 raise ValueError("Locale code is empty")
110
111 api_key = settings.MICROSOFT_TRANSLATOR_API_KEY
112 if not api_key:
113 raise ValueError("Missing api key")
114
115 except (MultiValueDictKeyError, ValueError) as e:
116 return JsonResponse(
117 {"status": False, "message": f"Bad Request: {e}"},
118 status=400,
119 )
120
121 url = "https://api.cognitive.microsofttranslator.com/translate"
122 headers = {"Ocp-Apim-Subscription-Key": api_key, "Content-Type": "application/json"}
123 payload = {
124 "api-version": "3.0",
125 "from": "en",
126 "to": locale_code,
127 "textType": "html",
128 }
129 body = [{"Text": text}]
130
131 try:
132 r = requests.post(url, params=payload, headers=headers, json=body)
133 r.raise_for_status()
134
135 root = json.loads(r.content)
136
137 if "error" in root:
138 log.error(f"Microsoft Translator error: {root}")
139 return JsonResponse(
140 {"status": False, "message": f"Bad Request: {root}"},
141 status=400,
142 )
143
144 return JsonResponse({"translation": root[0]["translations"][0]["text"]})
145
146 except requests.exceptions.RequestException as e:
147 return JsonResponse(
148 {"status": False, "message": f"{e}"},
149 status=r.status_code,
150 )
151
152
153 @login_required(redirect_field_name="", login_url="/403")
154 def google_translate(request):
155 """Get translation from Google machine translation service."""
156 try:
157 text = request.GET["text"]
158 locale_code = request.GET["locale"]
159
160 if not locale_code:
161 raise ValueError("Locale code is empty")
162
163 except (MultiValueDictKeyError, ValueError) as e:
164 return JsonResponse(
165 {"status": False, "message": f"Bad Request: {e}"},
166 status=400,
167 )
168
169 data = get_google_translate_data(text, locale_code)
170
171 if not data["status"]:
172 return JsonResponse(data, status=400)
173
174 return JsonResponse(data)
175
176
177 @login_required(redirect_field_name="", login_url="/403")
178 def systran_translate(request):
179 """Get translations from SYSTRAN machine translation service."""
180 try:
181 text = request.GET["text"]
182 locale_code = request.GET["locale"]
183
184 if not locale_code:
185 raise ValueError("Locale code is empty")
186
187 locale = Locale.objects.filter(systran_translate_code=locale_code).first()
188
189 api_key = settings.SYSTRAN_TRANSLATE_API_KEY
190 if not api_key:
191 raise ValueError("Missing api key")
192
193 except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
194 return JsonResponse(
195 {"status": False, "message": f"Bad Request: {e}"},
196 status=400,
197 )
198
199 url = "https://api-translate.systran.net/translation/text/translate"
200
201 payload = {
202 "key": api_key,
203 "input": text,
204 "source": "en",
205 "target": locale_code,
206 "profile": locale.systran_translate_profile,
207 "format": "text",
208 }
209
210 try:
211 r = requests.post(url, params=payload)
212 r.raise_for_status()
213
214 root = json.loads(r.content)
215
216 if "error" in root:
217 log.error(f"SYSTRAN error: {root}")
218 return JsonResponse(
219 {"status": False, "message": f"Bad Request: {root}"},
220 status=400,
221 )
222
223 return JsonResponse({"translation": root["outputs"][0]["output"]})
224
225 except requests.exceptions.RequestException as e:
226 return JsonResponse(
227 {"status": False, "message": f"{e}"},
228 status=r.status_code,
229 )
230
231
232 def caighdean(request):
233 """Get translation from Caighdean machine translation service."""
234 try:
235 entityid = int(request.GET["id"])
236 entity = Entity.objects.get(id=entityid)
237 except (Entity.DoesNotExist, MultiValueDictKeyError, ValueError) as e:
238 return JsonResponse(
239 {"status": False, "message": f"Bad Request: {e}"},
240 status=400,
241 )
242
243 try:
244 text = entity.translation_set.get(
245 locale__code="gd",
246 plural_form=None if entity.string_plural == "" else 0,
247 approved=True,
248 ).string
249 except Translation.DoesNotExist:
250 return JsonResponse({})
251
252 url = "https://cadhan.com/api/intergaelic/3.0"
253
254 data = {
255 "teacs": text,
256 "foinse": "gd",
257 }
258
259 try:
260 r = requests.post(url, data=data)
261 r.raise_for_status()
262
263 root = json.loads(r.content)
264 tokens = [x[1] for x in root]
265 translation = (
266 MosesDetokenizer().detokenize(tokens, return_str=True).replace("\\n", "\n")
267 )
268
269 return JsonResponse({"original": text, "translation": translation})
270
271 except requests.exceptions.RequestException as e:
272 return JsonResponse(
273 {"status": False, "message": f"{e}"},
274 status=r.status_code,
275 )
276
277
278 def microsoft_terminology(request):
279 """Get translations from Microsoft Terminology Service."""
280 try:
281 text = request.GET["text"]
282 locale_code = request.GET["locale"]
283
284 if not locale_code:
285 raise ValueError("Locale code is empty")
286
287 except (MultiValueDictKeyError, ValueError) as e:
288 return JsonResponse(
289 {"status": False, "message": f"Bad Request: {e}"},
290 status=400,
291 )
292
293 obj = {}
294 url = "https://api.terminology.microsoft.com/Terminology.svc"
295 headers = {
296 "SOAPAction": (
297 '"http://api.terminology.microsoft.com/terminology/Terminology/GetTranslations"'
298 ),
299 "Content-Type": "text/xml; charset=utf-8",
300 }
301 payload = {
302 "text": quote(text.encode("utf-8")),
303 "to": locale_code,
304 "max_result": 5,
305 }
306 template = get_template("machinery/microsoft_terminology.jinja")
307
308 payload = template.render(payload)
309
310 try:
311 r = requests.post(url, data=payload, headers=headers)
312 r.raise_for_status()
313
314 translations = []
315 xpath = ".//{http://api.terminology.microsoft.com/terminology}"
316 root = ET.fromstring(r.content)
317 results = root.find(xpath + "GetTranslationsResult")
318
319 if results is not None:
320 for translation in results:
321 translations.append(
322 {
323 "source": translation.find(xpath + "OriginalText").text,
324 "target": translation.find(xpath + "TranslatedText").text,
325 }
326 )
327
328 obj["translations"] = translations
329 return JsonResponse(obj)
330
331 except requests.exceptions.RequestException as e:
332 return JsonResponse(
333 {"status": False, "message": f"{e}"},
334 status=r.status_code,
335 )
336
[end of pontoon/machinery/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/machinery/views.py b/pontoon/machinery/views.py
--- a/pontoon/machinery/views.py
+++ b/pontoon/machinery/views.py
@@ -312,16 +312,20 @@
r.raise_for_status()
translations = []
- xpath = ".//{http://api.terminology.microsoft.com/terminology}"
+ namespaces = {"a": "https://api.terminology.microsoft.com/terminology"}
root = ET.fromstring(r.content)
- results = root.find(xpath + "GetTranslationsResult")
+ results = root.find(
+ ".//{http://api.terminology.microsoft.com/terminology}GetTranslationsResult"
+ )
if results is not None:
for translation in results:
translations.append(
{
- "source": translation.find(xpath + "OriginalText").text,
- "target": translation.find(xpath + "TranslatedText").text,
+ "source": translation.find("a:OriginalText", namespaces).text,
+ "target": translation.find(
+ ".//a:TranslatedText", namespaces
+ ).text,
}
)
| {"golden_diff": "diff --git a/pontoon/machinery/views.py b/pontoon/machinery/views.py\n--- a/pontoon/machinery/views.py\n+++ b/pontoon/machinery/views.py\n@@ -312,16 +312,20 @@\n r.raise_for_status()\n \n translations = []\n- xpath = \".//{http://api.terminology.microsoft.com/terminology}\"\n+ namespaces = {\"a\": \"https://api.terminology.microsoft.com/terminology\"}\n root = ET.fromstring(r.content)\n- results = root.find(xpath + \"GetTranslationsResult\")\n+ results = root.find(\n+ \".//{http://api.terminology.microsoft.com/terminology}GetTranslationsResult\"\n+ )\n \n if results is not None:\n for translation in results:\n translations.append(\n {\n- \"source\": translation.find(xpath + \"OriginalText\").text,\n- \"target\": translation.find(xpath + \"TranslatedText\").text,\n+ \"source\": translation.find(\"a:OriginalText\", namespaces).text,\n+ \"target\": translation.find(\n+ \".//a:TranslatedText\", namespaces\n+ ).text,\n }\n )\n", "issue": "Microsoft Terminology Service returning 500\nRequests to Microsoft Terminology Service API used by Machinery and Concordance Search currently return \"500 Internal Server Error\". Let's fix that.\n", "before_files": [{"content": "import json\nimport logging\nimport requests\nimport xml.etree.ElementTree as ET\n\nfrom sacremoses import MosesDetokenizer\nfrom urllib.parse import quote\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import EmptyPage, Paginator\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.utils.datastructures import MultiValueDictKeyError\n\nfrom pontoon.base import utils\nfrom pontoon.base.models import Entity, Locale, Project, Translation\nfrom pontoon.machinery.utils import (\n get_concordance_search_data,\n get_google_translate_data,\n get_translation_memory_data,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\ndef machinery(request):\n locale = utils.get_project_locale_from_request(request, Locale.objects) or \"en-GB\"\n\n return render(\n request,\n \"machinery/machinery.html\",\n {\n \"locale\": Locale.objects.get(code=locale),\n \"locales\": Locale.objects.all(),\n \"is_google_translate_supported\": bool(settings.GOOGLE_TRANSLATE_API_KEY),\n \"is_microsoft_translator_supported\": bool(\n settings.MICROSOFT_TRANSLATOR_API_KEY\n ),\n \"is_systran_translate_supported\": bool(settings.SYSTRAN_TRANSLATE_API_KEY),\n },\n )\n\n\ndef translation_memory(request):\n \"\"\"Get translations from internal translations memory.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale = Locale.objects.get(code=request.GET[\"locale\"])\n pk = request.GET.get(\"pk\", None)\n\n if pk is not None:\n pk = int(pk)\n\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n data = get_translation_memory_data(text, locale, pk)\n return JsonResponse(data, safe=False)\n\n\ndef concordance_search(request):\n \"\"\"Search for translations in the internal translations memory.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale = Locale.objects.get(code=request.GET[\"locale\"])\n page_results_limit = int(request.GET.get(\"limit\", 100))\n page = int(request.GET.get(\"page\", 1))\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n paginator = Paginator(get_concordance_search_data(text, locale), page_results_limit)\n\n try:\n data = paginator.page(page)\n except EmptyPage:\n return JsonResponse({\"results\": [], \"has_next\": False})\n\n # ArrayAgg (used in get_concordance_search_data()) does not support using\n # distinct=True in combination with ordering, so we need to do one of them\n # manually - after pagination, to reduce the number of rows processed.\n projects = Project.objects.order_by(\"disabled\", \"-priority\").values_list(\n \"name\", flat=True\n )\n for r in data.object_list:\n r[\"project_names\"] = [p for p in projects if p in r[\"project_names\"]]\n\n return JsonResponse(\n {\"results\": data.object_list, \"has_next\": data.has_next()}, safe=False\n )\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef microsoft_translator(request):\n \"\"\"Get translation from Microsoft machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n if not api_key:\n raise ValueError(\"Missing api key\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n url = \"https://api.cognitive.microsofttranslator.com/translate\"\n headers = {\"Ocp-Apim-Subscription-Key\": api_key, \"Content-Type\": \"application/json\"}\n payload = {\n \"api-version\": \"3.0\",\n \"from\": \"en\",\n \"to\": locale_code,\n \"textType\": \"html\",\n }\n body = [{\"Text\": text}]\n\n try:\n r = requests.post(url, params=payload, headers=headers, json=body)\n r.raise_for_status()\n\n root = json.loads(r.content)\n\n if \"error\" in root:\n log.error(f\"Microsoft Translator error: {root}\")\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {root}\"},\n status=400,\n )\n\n return JsonResponse({\"translation\": root[0][\"translations\"][0][\"text\"]})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef google_translate(request):\n \"\"\"Get translation from Google machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n data = get_google_translate_data(text, locale_code)\n\n if not data[\"status\"]:\n return JsonResponse(data, status=400)\n\n return JsonResponse(data)\n\n\n@login_required(redirect_field_name=\"\", login_url=\"/403\")\ndef systran_translate(request):\n \"\"\"Get translations from SYSTRAN machine translation service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n locale = Locale.objects.filter(systran_translate_code=locale_code).first()\n\n api_key = settings.SYSTRAN_TRANSLATE_API_KEY\n if not api_key:\n raise ValueError(\"Missing api key\")\n\n except (Locale.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n url = \"https://api-translate.systran.net/translation/text/translate\"\n\n payload = {\n \"key\": api_key,\n \"input\": text,\n \"source\": \"en\",\n \"target\": locale_code,\n \"profile\": locale.systran_translate_profile,\n \"format\": \"text\",\n }\n\n try:\n r = requests.post(url, params=payload)\n r.raise_for_status()\n\n root = json.loads(r.content)\n\n if \"error\" in root:\n log.error(f\"SYSTRAN error: {root}\")\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {root}\"},\n status=400,\n )\n\n return JsonResponse({\"translation\": root[\"outputs\"][0][\"output\"]})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\ndef caighdean(request):\n \"\"\"Get translation from Caighdean machine translation service.\"\"\"\n try:\n entityid = int(request.GET[\"id\"])\n entity = Entity.objects.get(id=entityid)\n except (Entity.DoesNotExist, MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n try:\n text = entity.translation_set.get(\n locale__code=\"gd\",\n plural_form=None if entity.string_plural == \"\" else 0,\n approved=True,\n ).string\n except Translation.DoesNotExist:\n return JsonResponse({})\n\n url = \"https://cadhan.com/api/intergaelic/3.0\"\n\n data = {\n \"teacs\": text,\n \"foinse\": \"gd\",\n }\n\n try:\n r = requests.post(url, data=data)\n r.raise_for_status()\n\n root = json.loads(r.content)\n tokens = [x[1] for x in root]\n translation = (\n MosesDetokenizer().detokenize(tokens, return_str=True).replace(\"\\\\n\", \"\\n\")\n )\n\n return JsonResponse({\"original\": text, \"translation\": translation})\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n\n\ndef microsoft_terminology(request):\n \"\"\"Get translations from Microsoft Terminology Service.\"\"\"\n try:\n text = request.GET[\"text\"]\n locale_code = request.GET[\"locale\"]\n\n if not locale_code:\n raise ValueError(\"Locale code is empty\")\n\n except (MultiValueDictKeyError, ValueError) as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"Bad Request: {e}\"},\n status=400,\n )\n\n obj = {}\n url = \"https://api.terminology.microsoft.com/Terminology.svc\"\n headers = {\n \"SOAPAction\": (\n '\"http://api.terminology.microsoft.com/terminology/Terminology/GetTranslations\"'\n ),\n \"Content-Type\": \"text/xml; charset=utf-8\",\n }\n payload = {\n \"text\": quote(text.encode(\"utf-8\")),\n \"to\": locale_code,\n \"max_result\": 5,\n }\n template = get_template(\"machinery/microsoft_terminology.jinja\")\n\n payload = template.render(payload)\n\n try:\n r = requests.post(url, data=payload, headers=headers)\n r.raise_for_status()\n\n translations = []\n xpath = \".//{http://api.terminology.microsoft.com/terminology}\"\n root = ET.fromstring(r.content)\n results = root.find(xpath + \"GetTranslationsResult\")\n\n if results is not None:\n for translation in results:\n translations.append(\n {\n \"source\": translation.find(xpath + \"OriginalText\").text,\n \"target\": translation.find(xpath + \"TranslatedText\").text,\n }\n )\n\n obj[\"translations\"] = translations\n return JsonResponse(obj)\n\n except requests.exceptions.RequestException as e:\n return JsonResponse(\n {\"status\": False, \"message\": f\"{e}\"},\n status=r.status_code,\n )\n", "path": "pontoon/machinery/views.py"}]} | 3,805 | 254 |
gh_patches_debug_29651 | rasdani/github-patches | git_diff | facebookresearch__hydra-2520 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Document hydra.utils.{get_class,get_method}
Users would benefit from documentation for the following:
[`hydra.utils.get_class`](https://github.com/facebookresearch/hydra/blob/1cbe86ebecbeb134a3f2041120d57447a7394314/hydra/utils.py#L21)
[`hydra.utils.get_method`](https://github.com/facebookresearch/hydra/blob/1cbe86ebecbeb134a3f2041120d57447a7394314/hydra/utils.py#L32)
</issue>
<code>
[start of hydra/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 import logging.config
4 import os
5 from pathlib import Path
6 from typing import Any, Callable
7
8 import hydra._internal.instantiate._instantiate2
9 import hydra.types
10 from hydra._internal.utils import _locate
11 from hydra.core.hydra_config import HydraConfig
12
13 log = logging.getLogger(__name__)
14
15 # Instantiation related symbols
16 instantiate = hydra._internal.instantiate._instantiate2.instantiate
17 call = instantiate
18 ConvertMode = hydra.types.ConvertMode
19
20
21 def get_class(path: str) -> type:
22 try:
23 cls = _locate(path)
24 if not isinstance(cls, type):
25 raise ValueError(
26 f"Located non-class of type '{type(cls).__name__}'"
27 + f" while loading '{path}'"
28 )
29 return cls
30 except Exception as e:
31 log.error(f"Error initializing class at {path}: {e}")
32 raise e
33
34
35 def get_method(path: str) -> Callable[..., Any]:
36 try:
37 obj = _locate(path)
38 if not callable(obj):
39 raise ValueError(
40 f"Located non-callable of type '{type(obj).__name__}'"
41 + f" while loading '{path}'"
42 )
43 cl: Callable[..., Any] = obj
44 return cl
45 except Exception as e:
46 log.error(f"Error getting callable at {path} : {e}")
47 raise e
48
49
50 # Alias for get_method
51 get_static_method = get_method
52
53
54 def get_original_cwd() -> str:
55 """
56 :return: the original working directory the Hydra application was launched from
57 """
58 if not HydraConfig.initialized():
59 raise ValueError(
60 "get_original_cwd() must only be used after HydraConfig is initialized"
61 )
62 ret = HydraConfig.get().runtime.cwd
63 assert ret is not None and isinstance(ret, str)
64 return ret
65
66
67 def to_absolute_path(path: str) -> str:
68 """
69 converts the specified path to be absolute path.
70 if the input path is relative, it's interpreted as relative to the original working directory
71 if it's absolute, it's returned as is
72 :param path: path to convert
73 :return:
74 """
75 p = Path(path)
76 if not HydraConfig.initialized():
77 base = Path(os.getcwd())
78 else:
79 base = Path(get_original_cwd())
80 if p.is_absolute():
81 ret = p
82 else:
83 ret = base / p
84 return str(ret)
85
[end of hydra/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/utils.py b/hydra/utils.py
--- a/hydra/utils.py
+++ b/hydra/utils.py
@@ -19,6 +19,14 @@
def get_class(path: str) -> type:
+ """
+ Look up a class based on a dotpath.
+ Fails if the path does not point to a class.
+
+ >>> import my_module
+ >>> from hydra.utils import get_class
+ >>> assert get_class("my_module.MyClass") is my_module.MyClass
+ """
try:
cls = _locate(path)
if not isinstance(cls, type):
@@ -28,11 +36,19 @@
)
return cls
except Exception as e:
- log.error(f"Error initializing class at {path}: {e}")
+ log.error(f"Error getting class at {path}: {e}")
raise e
def get_method(path: str) -> Callable[..., Any]:
+ """
+ Look up a callable based on a dotpath.
+ Fails if the path does not point to a callable object.
+
+ >>> import my_module
+ >>> from hydra.utils import get_method
+ >>> assert get_method("my_module.my_function") is my_module.my_function
+ """
try:
obj = _locate(path)
if not callable(obj):
@@ -51,6 +67,22 @@
get_static_method = get_method
+def get_object(path: str) -> Any:
+ """
+ Look up a callable based on a dotpath.
+
+ >>> import my_module
+ >>> from hydra.utils import get_object
+ >>> assert get_object("my_module.my_object") is my_module.my_object
+ """
+ try:
+ obj = _locate(path)
+ return obj
+ except Exception as e:
+ log.error(f"Error getting object at {path} : {e}")
+ raise e
+
+
def get_original_cwd() -> str:
"""
:return: the original working directory the Hydra application was launched from
| {"golden_diff": "diff --git a/hydra/utils.py b/hydra/utils.py\n--- a/hydra/utils.py\n+++ b/hydra/utils.py\n@@ -19,6 +19,14 @@\n \n \n def get_class(path: str) -> type:\n+ \"\"\"\n+ Look up a class based on a dotpath.\n+ Fails if the path does not point to a class.\n+\n+ >>> import my_module\n+ >>> from hydra.utils import get_class\n+ >>> assert get_class(\"my_module.MyClass\") is my_module.MyClass\n+ \"\"\"\n try:\n cls = _locate(path)\n if not isinstance(cls, type):\n@@ -28,11 +36,19 @@\n )\n return cls\n except Exception as e:\n- log.error(f\"Error initializing class at {path}: {e}\")\n+ log.error(f\"Error getting class at {path}: {e}\")\n raise e\n \n \n def get_method(path: str) -> Callable[..., Any]:\n+ \"\"\"\n+ Look up a callable based on a dotpath.\n+ Fails if the path does not point to a callable object.\n+\n+ >>> import my_module\n+ >>> from hydra.utils import get_method\n+ >>> assert get_method(\"my_module.my_function\") is my_module.my_function\n+ \"\"\"\n try:\n obj = _locate(path)\n if not callable(obj):\n@@ -51,6 +67,22 @@\n get_static_method = get_method\n \n \n+def get_object(path: str) -> Any:\n+ \"\"\"\n+ Look up a callable based on a dotpath.\n+\n+ >>> import my_module\n+ >>> from hydra.utils import get_object\n+ >>> assert get_object(\"my_module.my_object\") is my_module.my_object\n+ \"\"\"\n+ try:\n+ obj = _locate(path)\n+ return obj\n+ except Exception as e:\n+ log.error(f\"Error getting object at {path} : {e}\")\n+ raise e\n+\n+\n def get_original_cwd() -> str:\n \"\"\"\n :return: the original working directory the Hydra application was launched from\n", "issue": "Document hydra.utils.{get_class,get_method}\nUsers would benefit from documentation for the following:\r\n[`hydra.utils.get_class`](https://github.com/facebookresearch/hydra/blob/1cbe86ebecbeb134a3f2041120d57447a7394314/hydra/utils.py#L21)\r\n[`hydra.utils.get_method`](https://github.com/facebookresearch/hydra/blob/1cbe86ebecbeb134a3f2041120d57447a7394314/hydra/utils.py#L32)\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport logging.config\nimport os\nfrom pathlib import Path\nfrom typing import Any, Callable\n\nimport hydra._internal.instantiate._instantiate2\nimport hydra.types\nfrom hydra._internal.utils import _locate\nfrom hydra.core.hydra_config import HydraConfig\n\nlog = logging.getLogger(__name__)\n\n# Instantiation related symbols\ninstantiate = hydra._internal.instantiate._instantiate2.instantiate\ncall = instantiate\nConvertMode = hydra.types.ConvertMode\n\n\ndef get_class(path: str) -> type:\n try:\n cls = _locate(path)\n if not isinstance(cls, type):\n raise ValueError(\n f\"Located non-class of type '{type(cls).__name__}'\"\n + f\" while loading '{path}'\"\n )\n return cls\n except Exception as e:\n log.error(f\"Error initializing class at {path}: {e}\")\n raise e\n\n\ndef get_method(path: str) -> Callable[..., Any]:\n try:\n obj = _locate(path)\n if not callable(obj):\n raise ValueError(\n f\"Located non-callable of type '{type(obj).__name__}'\"\n + f\" while loading '{path}'\"\n )\n cl: Callable[..., Any] = obj\n return cl\n except Exception as e:\n log.error(f\"Error getting callable at {path} : {e}\")\n raise e\n\n\n# Alias for get_method\nget_static_method = get_method\n\n\ndef get_original_cwd() -> str:\n \"\"\"\n :return: the original working directory the Hydra application was launched from\n \"\"\"\n if not HydraConfig.initialized():\n raise ValueError(\n \"get_original_cwd() must only be used after HydraConfig is initialized\"\n )\n ret = HydraConfig.get().runtime.cwd\n assert ret is not None and isinstance(ret, str)\n return ret\n\n\ndef to_absolute_path(path: str) -> str:\n \"\"\"\n converts the specified path to be absolute path.\n if the input path is relative, it's interpreted as relative to the original working directory\n if it's absolute, it's returned as is\n :param path: path to convert\n :return:\n \"\"\"\n p = Path(path)\n if not HydraConfig.initialized():\n base = Path(os.getcwd())\n else:\n base = Path(get_original_cwd())\n if p.is_absolute():\n ret = p\n else:\n ret = base / p\n return str(ret)\n", "path": "hydra/utils.py"}]} | 1,384 | 465 |
gh_patches_debug_17981 | rasdani/github-patches | git_diff | mdn__kuma-6760 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
T - Update robots.tx (Warnings have spiked on Search Console)
**Summary**
We are seeing a large spike in warnings on Google Search Console. The spike started on December 22, 2019.
<img width="952" alt="Coverage" src="https://user-images.githubusercontent.com/557852/72343521-5fb44000-36cf-11ea-8f1f-12955b98e34d.png">
**Steps To Reproduce (STR)**
1. Go to Search Console
2. Open Coverage Report
3. open "Indexed, though blocked by robots.txt"
**Actual behavior**
It looks like a bunch of URLs are blocked from indexing by Google that should not be blocked. Examples are:
* https://developer.mozilla.org/it/docs/Web/API/GlobalEventHandlers/onresize$translate
* https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/end%24edit
* https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidateInit/sdpMid$translate?tolocale=de
When I test these URLs in Search Console's robots.txt tester it tells me that those URLs are allowed, and they should be. $translate, or $edit are not in robots.txt
**Expected behavior**
Those URLs should not be blocked, so that Google can crawl the pages, discover the no-index meta tag and remove them from its index.
**Additional context**
I assume that something is broken when it comes to handling the $ chracter in the URL. Maybe we just remove everything with the $url from our robots.txt. As far as I can tell, almost all of those URLs now live under wiki.developer.mozilla.org anyway.
</issue>
<code>
[start of kuma/landing/views.py]
1 from django.conf import settings
2 from django.http import HttpResponse
3 from django.shortcuts import redirect, render
4 from django.views import static
5 from django.views.decorators.cache import never_cache
6 from django.views.generic import RedirectView
7
8 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
9 from kuma.core.utils import is_wiki
10 from kuma.feeder.models import Bundle
11 from kuma.feeder.sections import SECTION_HACKS
12 from kuma.search.models import Filter
13
14 from .utils import favicon_url
15
16
17 @shared_cache_control
18 def contribute_json(request):
19 return static.serve(request, "contribute.json", document_root=settings.ROOT)
20
21
22 @shared_cache_control
23 def home(request):
24 """Home page."""
25 context = {}
26 # Need for both wiki and react homepage
27 context["updates"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
28
29 # The default template name
30 template_name = "landing/react_homepage.html"
31 if is_wiki(request):
32 template_name = "landing/homepage.html"
33 context["default_filters"] = Filter.objects.default_filters()
34 return render(request, template_name, context)
35
36
37 @ensure_wiki_domain
38 @never_cache
39 def maintenance_mode(request):
40 if settings.MAINTENANCE_MODE:
41 return render(request, "landing/maintenance-mode.html")
42 else:
43 return redirect("home")
44
45
46 @ensure_wiki_domain
47 @shared_cache_control
48 def promote_buttons(request):
49 """Bug 646192: MDN affiliate buttons"""
50 return render(request, "landing/promote_buttons.html")
51
52
53 ROBOTS_ALLOWED_TXT = """\
54 User-agent: *
55 Sitemap: https://developer.mozilla.org/sitemap.xml
56
57 Disallow: /admin/
58 Disallow: /api/
59 Disallow: /*/dashboards/*
60 Disallow: /*docs/feeds
61 Disallow: /*docs/templates
62 Disallow: /*docs*Template:
63 Disallow: /*docs/all
64 Disallow: /*docs/tag*
65 Disallow: /*docs/needs-review*
66 Disallow: /*docs/localization-tag*
67 Disallow: /*docs/with-errors
68 Disallow: /*docs/without-parent
69 Disallow: /*docs/top-level
70 Disallow: /*docs/new
71 Disallow: /*docs/get-documents
72 Disallow: /*docs/submit_akismet_spam
73 Disallow: /*docs/load*
74 Disallow: /*docs/Experiment:*
75 Disallow: /*$api
76 Disallow: /*$compare
77 Disallow: /*$revision
78 Disallow: /*$history
79 Disallow: /*$children
80 Disallow: /*$flag
81 Disallow: /*$locales
82 Disallow: /*$toc
83 Disallow: /*$move
84 Disallow: /*$quick-review
85 Disallow: /*$samples
86 Disallow: /*$revert
87 Disallow: /*$repair_breadcrumbs
88 Disallow: /*$delete
89 Disallow: /*$restore
90 Disallow: /*$purge
91 Disallow: /*$subscribe
92 Disallow: /*$subscribe_to_tree
93 Disallow: /*$vote
94 Disallow: /*docs.json
95 Disallow: /*docs/ckeditor_config.js
96 Disallow: /*/files/
97 Disallow: /media
98 Disallow: /*move-requested
99 Disallow: /*preview-wiki-content
100 Disallow: /*profiles*/edit
101 Disallow: /skins
102 Disallow: /*type=feed
103 Disallow: /*users/
104 """ + "\n".join(
105 "Disallow: /{locale}/search".format(locale=locale)
106 for locale in settings.ENABLED_LOCALES
107 )
108
109 ROBOTS_GO_AWAY_TXT = """\
110 User-Agent: *
111 Disallow: /
112 """
113
114
115 @shared_cache_control
116 def robots_txt(request):
117 """Serve robots.txt that allows or forbids robots."""
118 host = request.get_host()
119 if host in settings.ALLOW_ROBOTS_DOMAINS:
120 robots = ""
121 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
122 robots = ROBOTS_ALLOWED_TXT
123 else:
124 robots = ROBOTS_GO_AWAY_TXT
125 return HttpResponse(robots, content_type="text/plain")
126
127
128 class FaviconRedirect(RedirectView):
129 """Redirect to the favicon in the static img folder (bug 1402497)"""
130
131 def get_redirect_url(self, *args, **kwargs):
132 return favicon_url()
133
[end of kuma/landing/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -50,6 +50,13 @@
return render(request, "landing/promote_buttons.html")
+ROBOTS_ALL_ALLOWED_TXT = """\
+User-agent: *
+Sitemap: https://wiki.developer.mozilla.org/sitemap.xml
+
+Disallow:
+"""
+
ROBOTS_ALLOWED_TXT = """\
User-agent: *
Sitemap: https://developer.mozilla.org/sitemap.xml
@@ -119,7 +126,10 @@
if host in settings.ALLOW_ROBOTS_DOMAINS:
robots = ""
elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
- robots = ROBOTS_ALLOWED_TXT
+ if host == settings.WIKI_HOST:
+ robots = ROBOTS_ALL_ALLOWED_TXT
+ else:
+ robots = ROBOTS_ALLOWED_TXT
else:
robots = ROBOTS_GO_AWAY_TXT
return HttpResponse(robots, content_type="text/plain")
| {"golden_diff": "diff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -50,6 +50,13 @@\n return render(request, \"landing/promote_buttons.html\")\n \n \n+ROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\n+User-agent: *\n+Sitemap: https://wiki.developer.mozilla.org/sitemap.xml\n+\n+Disallow:\n+\"\"\"\n+\n ROBOTS_ALLOWED_TXT = \"\"\"\\\n User-agent: *\n Sitemap: https://developer.mozilla.org/sitemap.xml\n@@ -119,7 +126,10 @@\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n- robots = ROBOTS_ALLOWED_TXT\n+ if host == settings.WIKI_HOST:\n+ robots = ROBOTS_ALL_ALLOWED_TXT\n+ else:\n+ robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type=\"text/plain\")\n", "issue": "T - Update robots.tx (Warnings have spiked on Search Console)\n**Summary**\r\nWe are seeing a large spike in warnings on Google Search Console. The spike started on December 22, 2019.\r\n\r\n<img width=\"952\" alt=\"Coverage\" src=\"https://user-images.githubusercontent.com/557852/72343521-5fb44000-36cf-11ea-8f1f-12955b98e34d.png\">\r\n\r\n**Steps To Reproduce (STR)**\r\n1. Go to Search Console\r\n2. Open Coverage Report\r\n3. open \"Indexed, though blocked by robots.txt\"\r\n\r\n\r\n**Actual behavior**\r\nIt looks like a bunch of URLs are blocked from indexing by Google that should not be blocked. Examples are:\r\n* https://developer.mozilla.org/it/docs/Web/API/GlobalEventHandlers/onresize$translate\r\n* https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/end%24edit\r\n* https://developer.mozilla.org/en-US/docs/Web/API/RTCIceCandidateInit/sdpMid$translate?tolocale=de\r\n\r\nWhen I test these URLs in Search Console's robots.txt tester it tells me that those URLs are allowed, and they should be. $translate, or $edit are not in robots.txt\r\n\r\n\r\n**Expected behavior**\r\nThose URLs should not be blocked, so that Google can crawl the pages, discover the no-index meta tag and remove them from its index.\r\n\r\n\r\n**Additional context**\r\nI assume that something is broken when it comes to handling the $ chracter in the URL. Maybe we just remove everything with the $url from our robots.txt. As far as I can tell, almost all of those URLs now live under wiki.developer.mozilla.org anyway.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, \"contribute.json\", document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n context = {}\n # Need for both wiki and react homepage\n context[\"updates\"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n\n # The default template name\n template_name = \"landing/react_homepage.html\"\n if is_wiki(request):\n template_name = \"landing/homepage.html\"\n context[\"default_filters\"] = Filter.objects.default_filters()\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, \"landing/maintenance-mode.html\")\n else:\n return redirect(\"home\")\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, \"landing/promote_buttons.html\")\n\n\nROBOTS_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /admin/\nDisallow: /api/\nDisallow: /*/dashboards/*\nDisallow: /*docs/feeds\nDisallow: /*docs/templates\nDisallow: /*docs*Template:\nDisallow: /*docs/all\nDisallow: /*docs/tag*\nDisallow: /*docs/needs-review*\nDisallow: /*docs/localization-tag*\nDisallow: /*docs/with-errors\nDisallow: /*docs/without-parent\nDisallow: /*docs/top-level\nDisallow: /*docs/new\nDisallow: /*docs/get-documents\nDisallow: /*docs/submit_akismet_spam\nDisallow: /*docs/load*\nDisallow: /*docs/Experiment:*\nDisallow: /*$api\nDisallow: /*$compare\nDisallow: /*$revision\nDisallow: /*$history\nDisallow: /*$children\nDisallow: /*$flag\nDisallow: /*$locales\nDisallow: /*$toc\nDisallow: /*$move\nDisallow: /*$quick-review\nDisallow: /*$samples\nDisallow: /*$revert\nDisallow: /*$repair_breadcrumbs\nDisallow: /*$delete\nDisallow: /*$restore\nDisallow: /*$purge\nDisallow: /*$subscribe\nDisallow: /*$subscribe_to_tree\nDisallow: /*$vote\nDisallow: /*docs.json\nDisallow: /*docs/ckeditor_config.js\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*move-requested\nDisallow: /*preview-wiki-content\nDisallow: /*profiles*/edit\nDisallow: /skins\nDisallow: /*type=feed\nDisallow: /*users/\n\"\"\" + \"\\n\".join(\n \"Disallow: /{locale}/search\".format(locale=locale)\n for locale in settings.ENABLED_LOCALES\n)\n\nROBOTS_GO_AWAY_TXT = \"\"\"\\\nUser-Agent: *\nDisallow: /\n\"\"\"\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type=\"text/plain\")\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}]} | 2,133 | 240 |
gh_patches_debug_28659 | rasdani/github-patches | git_diff | searxng__searxng-2303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: seznam(CZ) ignored in search results
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Versions of SearX(NG):**
2022.11.25-1314c1c5, vanilla
2022.11.27-90b429bb, vanilla
2022.11.28, vanilla
2.3.7+20221122, fork
1.1.0-37-4d9586e2, vanilla
**How did you install SearX(NG)?**
I didn't. I tried several public instances and all suffer from the same issue.
**What happened?**
When searching for some Czech terms, like "seznam.cz", there is never the "seznam(CZ)" tag under individual search results. When trying to limit the search to the seznam(CZ) engine:
`!szn seznam.cz`
I'm getting an error:
`Sorry!we didn't find any results. Please use another query or search in more categories.`
When trying to limit the search to bing using "!bi" keyword, it works as expected (all search results have the bing tag under them).
**How to reproduce:**
`!szn <search term>`
**Expected behavior:**
When limiting searching to the seznam(CZ) engine, all search results should have the "seznam(CZ)" tag under them.
When searching w/o limiting, "seznam(CZ)" tag should be mixed in with other tags among search results.
**Screenshots:**

**Additional context:**
Besides public instances w/ the latest docker image (2022.11.25-1314c1c5). It neither works w/ forks, upstream searX instances nor higher NG versions than those utilizing dockerized images from docker hub.
https://searx.be/
https://searx.tiekoetter.com/
https://searx.mistli.net/
https://search.privacyguides.net/
https://searx.webheberg.info/
https://spot.murena.io/
https://darmarit.org/searx/
</issue>
<code>
[start of searx/engines/seznam.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """
3 Seznam
4 """
5
6 from urllib.parse import urlencode
7 from lxml import html
8 from searx.network import get
9 from searx.exceptions import SearxEngineAccessDeniedException
10 from searx.utils import (
11 extract_text,
12 eval_xpath_list,
13 eval_xpath_getindex,
14 eval_xpath,
15 )
16
17 # about
18 about = {
19 "website": "https://www.seznam.cz/",
20 "wikidata_id": "Q3490485",
21 "official_api_documentation": "https://api.sklik.cz/",
22 "use_official_api": False,
23 "require_api_key": False,
24 "results": "HTML",
25 "language": "cz",
26 }
27
28 categories = ['general', 'web']
29 base_url = 'https://search.seznam.cz/'
30
31
32 def request(query, params):
33 response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
34 dom = html.fromstring(response_index.text)
35
36 url_params = {
37 'q': query,
38 'oq': query,
39 }
40 for e in eval_xpath_list(dom, '//input[@type="hidden"]'):
41 name = e.get('name')
42 value = e.get('value')
43 url_params[name] = value
44
45 params['url'] = base_url + '?' + urlencode(url_params)
46 params['cookies'] = response_index.cookies
47 return params
48
49
50 def response(resp):
51 if resp.url.path.startswith('/verify'):
52 raise SearxEngineAccessDeniedException()
53
54 results = []
55
56 dom = html.fromstring(resp.content.decode())
57 for result_element in eval_xpath_list(dom, '//div[@data-dot="results"]/div'):
58 result_data = eval_xpath_getindex(result_element, './/div[contains(@class, "bec586")]', 0, default=None)
59 if result_data is None:
60 continue
61 title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
62 results.append(
63 {
64 'url': title_element.get('href'),
65 'title': extract_text(title_element),
66 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
67 }
68 )
69
70 return results
71
[end of searx/engines/seznam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py
--- a/searx/engines/seznam.py
+++ b/searx/engines/seznam.py
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-"""
- Seznam
+# lint: pylint
+"""Seznam
+
"""
from urllib.parse import urlencode
@@ -11,7 +12,6 @@
extract_text,
eval_xpath_list,
eval_xpath_getindex,
- eval_xpath,
)
# about
@@ -54,8 +54,12 @@
results = []
dom = html.fromstring(resp.content.decode())
- for result_element in eval_xpath_list(dom, '//div[@data-dot="results"]/div'):
- result_data = eval_xpath_getindex(result_element, './/div[contains(@class, "bec586")]', 0, default=None)
+ for result_element in eval_xpath_list(
+ dom, '//div[@id="searchpage-root"]//div[@class="Layout--left"]/div[@class="f2c528"]'
+ ):
+ result_data = eval_xpath_getindex(
+ result_element, './/div[@class="c8774a" or @class="e69e8d a11657"]', 0, default=None
+ )
if result_data is None:
continue
title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
@@ -63,7 +67,7 @@
{
'url': title_element.get('href'),
'title': extract_text(title_element),
- 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
+ 'content': extract_text(result_data),
}
)
| {"golden_diff": "diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py\n--- a/searx/engines/seznam.py\n+++ b/searx/engines/seznam.py\n@@ -1,6 +1,7 @@\n # SPDX-License-Identifier: AGPL-3.0-or-later\n-\"\"\"\n- Seznam\n+# lint: pylint\n+\"\"\"Seznam\n+\n \"\"\"\n \n from urllib.parse import urlencode\n@@ -11,7 +12,6 @@\n extract_text,\n eval_xpath_list,\n eval_xpath_getindex,\n- eval_xpath,\n )\n \n # about\n@@ -54,8 +54,12 @@\n results = []\n \n dom = html.fromstring(resp.content.decode())\n- for result_element in eval_xpath_list(dom, '//div[@data-dot=\"results\"]/div'):\n- result_data = eval_xpath_getindex(result_element, './/div[contains(@class, \"bec586\")]', 0, default=None)\n+ for result_element in eval_xpath_list(\n+ dom, '//div[@id=\"searchpage-root\"]//div[@class=\"Layout--left\"]/div[@class=\"f2c528\"]'\n+ ):\n+ result_data = eval_xpath_getindex(\n+ result_element, './/div[@class=\"c8774a\" or @class=\"e69e8d a11657\"]', 0, default=None\n+ )\n if result_data is None:\n continue\n title_element = eval_xpath_getindex(result_element, './/h3/a', 0)\n@@ -63,7 +67,7 @@\n {\n 'url': title_element.get('href'),\n 'title': extract_text(title_element),\n- 'content': extract_text(eval_xpath(result_data, './/div[@class=\"_3eded7\"]')),\n+ 'content': extract_text(result_data),\n }\n )\n", "issue": "Bug: seznam(CZ) ignored in search results\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Versions of SearX(NG):**\r\n2022.11.25-1314c1c5, vanilla\r\n2022.11.27-90b429bb, vanilla\r\n2022.11.28, vanilla\r\n2.3.7+20221122, fork\r\n1.1.0-37-4d9586e2, vanilla\r\n\r\n**How did you install SearX(NG)?**\r\nI didn't. I tried several public instances and all suffer from the same issue.\r\n\r\n**What happened?**\r\nWhen searching for some Czech terms, like \"seznam.cz\", there is never the \"seznam(CZ)\" tag under individual search results. When trying to limit the search to the seznam(CZ) engine:\r\n`!szn seznam.cz`\r\nI'm getting an error:\r\n`Sorry!we didn't find any results. Please use another query or search in more categories.`\r\nWhen trying to limit the search to bing using \"!bi\" keyword, it works as expected (all search results have the bing tag under them).\r\n\r\n**How to reproduce:**\r\n`!szn <search term>`\r\n\r\n**Expected behavior:**\r\nWhen limiting searching to the seznam(CZ) engine, all search results should have the \"seznam(CZ)\" tag under them.\r\nWhen searching w/o limiting, \"seznam(CZ)\" tag should be mixed in with other tags among search results.\r\n\r\n**Screenshots:**\r\n\r\n\r\n**Additional context:**\r\nBesides public instances w/ the latest docker image (2022.11.25-1314c1c5). It neither works w/ forks, upstream searX instances nor higher NG versions than those utilizing dockerized images from docker hub.\r\nhttps://searx.be/\r\nhttps://searx.tiekoetter.com/\r\nhttps://searx.mistli.net/\r\nhttps://search.privacyguides.net/\r\nhttps://searx.webheberg.info/\r\nhttps://spot.murena.io/\r\nhttps://darmarit.org/searx/\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Seznam\n\"\"\"\n\nfrom urllib.parse import urlencode\nfrom lxml import html\nfrom searx.network import get\nfrom searx.exceptions import SearxEngineAccessDeniedException\nfrom searx.utils import (\n extract_text,\n eval_xpath_list,\n eval_xpath_getindex,\n eval_xpath,\n)\n\n# about\nabout = {\n \"website\": \"https://www.seznam.cz/\",\n \"wikidata_id\": \"Q3490485\",\n \"official_api_documentation\": \"https://api.sklik.cz/\",\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": \"HTML\",\n \"language\": \"cz\",\n}\n\ncategories = ['general', 'web']\nbase_url = 'https://search.seznam.cz/'\n\n\ndef request(query, params):\n response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)\n dom = html.fromstring(response_index.text)\n\n url_params = {\n 'q': query,\n 'oq': query,\n }\n for e in eval_xpath_list(dom, '//input[@type=\"hidden\"]'):\n name = e.get('name')\n value = e.get('value')\n url_params[name] = value\n\n params['url'] = base_url + '?' + urlencode(url_params)\n params['cookies'] = response_index.cookies\n return params\n\n\ndef response(resp):\n if resp.url.path.startswith('/verify'):\n raise SearxEngineAccessDeniedException()\n\n results = []\n\n dom = html.fromstring(resp.content.decode())\n for result_element in eval_xpath_list(dom, '//div[@data-dot=\"results\"]/div'):\n result_data = eval_xpath_getindex(result_element, './/div[contains(@class, \"bec586\")]', 0, default=None)\n if result_data is None:\n continue\n title_element = eval_xpath_getindex(result_element, './/h3/a', 0)\n results.append(\n {\n 'url': title_element.get('href'),\n 'title': extract_text(title_element),\n 'content': extract_text(eval_xpath(result_data, './/div[@class=\"_3eded7\"]')),\n }\n )\n\n return results\n", "path": "searx/engines/seznam.py"}]} | 1,722 | 424 |
gh_patches_debug_17634 | rasdani/github-patches | git_diff | liqd__a4-opin-726 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
use small images in user avatar in moderators list
</issue>
<code>
[start of euth/users/serializers.py]
1 from rest_framework import serializers
2
3 from .models import User
4
5
6 class UserSerializer(serializers.ModelSerializer):
7 avatar = serializers.ImageField()
8
9 class Meta:
10 model = User
11 fields = ('id', 'username', 'avatar', 'default_avatar')
12 read_only_fields = ('id', 'username', 'avatar', 'default_avatar')
13
14
15 # mails should not be exposed in API, so there is a separate one for this
16 class UserWithMailSerializer(UserSerializer):
17 class Meta(UserSerializer.Meta):
18 fields = ('id', 'username', 'avatar', 'default_avatar', 'email')
19 read_only_fields = ('id', 'username', 'avatar', 'default_avatar',
20 'email')
21
[end of euth/users/serializers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/users/serializers.py b/euth/users/serializers.py
--- a/euth/users/serializers.py
+++ b/euth/users/serializers.py
@@ -1,16 +1,22 @@
+from easy_thumbnails.files import get_thumbnailer
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
- avatar = serializers.ImageField()
+ avatar = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('id', 'username', 'avatar', 'default_avatar')
read_only_fields = ('id', 'username', 'avatar', 'default_avatar')
+ def get_avatar(self, obj):
+ if obj.avatar:
+ image = get_thumbnailer(obj.avatar)['avatar_small']
+ return image.url
+
# mails should not be exposed in API, so there is a separate one for this
class UserWithMailSerializer(UserSerializer):
| {"golden_diff": "diff --git a/euth/users/serializers.py b/euth/users/serializers.py\n--- a/euth/users/serializers.py\n+++ b/euth/users/serializers.py\n@@ -1,16 +1,22 @@\n+from easy_thumbnails.files import get_thumbnailer\n from rest_framework import serializers\n \n from .models import User\n \n \n class UserSerializer(serializers.ModelSerializer):\n- avatar = serializers.ImageField()\n+ avatar = serializers.SerializerMethodField()\n \n class Meta:\n model = User\n fields = ('id', 'username', 'avatar', 'default_avatar')\n read_only_fields = ('id', 'username', 'avatar', 'default_avatar')\n \n+ def get_avatar(self, obj):\n+ if obj.avatar:\n+ image = get_thumbnailer(obj.avatar)['avatar_small']\n+ return image.url\n+\n \n # mails should not be exposed in API, so there is a separate one for this\n class UserWithMailSerializer(UserSerializer):\n", "issue": "use small images in user avatar in moderators list\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import User\n\n\nclass UserSerializer(serializers.ModelSerializer):\n avatar = serializers.ImageField()\n\n class Meta:\n model = User\n fields = ('id', 'username', 'avatar', 'default_avatar')\n read_only_fields = ('id', 'username', 'avatar', 'default_avatar')\n\n\n# mails should not be exposed in API, so there is a separate one for this\nclass UserWithMailSerializer(UserSerializer):\n class Meta(UserSerializer.Meta):\n fields = ('id', 'username', 'avatar', 'default_avatar', 'email')\n read_only_fields = ('id', 'username', 'avatar', 'default_avatar',\n 'email')\n", "path": "euth/users/serializers.py"}]} | 727 | 205 |
gh_patches_debug_647 | rasdani/github-patches | git_diff | pex-tool__pex-2095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.129
On the docket:
+ [x] Pex resolves VCS and local project requirements from locks incorrectly. #2092
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.128"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.128"
+__version__ = "2.1.129"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.128\"\n+__version__ = \"2.1.129\"\n", "issue": "Release 2.1.129\nOn the docket:\r\n+ [x] Pex resolves VCS and local project requirements from locks incorrectly. #2092\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.128\"\n", "path": "pex/version.py"}]} | 621 | 98 |
gh_patches_debug_19888 | rasdani/github-patches | git_diff | deepset-ai__haystack-6177 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve the `WhisperTranscriber` API docs
The `run` function in the API reference seems to be missing the return type. This should probably be fixed in the codebase.
</issue>
<code>
[start of haystack/nodes/audio/whisper_transcriber.py]
1 import json
2 from typing import List, Optional, Dict, Any, Union, BinaryIO, Literal
3
4 import requests
5 from requests import PreparedRequest
6
7 from haystack import MultiLabel, Document
8 from haystack.errors import OpenAIError, OpenAIRateLimitError
9 from haystack.nodes.base import BaseComponent
10 from haystack.utils.import_utils import is_whisper_available
11 from haystack.lazy_imports import LazyImport
12
13
14 with LazyImport(message="Run 'pip install farm-haystack[inference]'") as torch_import:
15 import torch
16
17
18 WhisperModel = Literal["tiny", "small", "medium", "large", "large-v2"]
19
20
21 class WhisperTranscriber(BaseComponent):
22 """
23 Transcribes audio files using OpenAI's Whisper. This class supports two underlying implementations:
24
25 - API (default): Uses the OpenAI API and requires an API key. See the [OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.
26 - Local (requires installing Whisper): Uses the local installation
27 of [Whisper](https://github.com/openai/whisper).
28
29 To use Whisper locally, install it following the instructions on
30 the Whisper [GitHub repo](https://github.com/openai/whisper) and omit the `api_key` parameter.
31 You can work around a dependency conflict caused by openai-whisper pinning an older tiktoken version than required
32 by Haystack if you install via `pip install --no-deps numba llvmlite 'openai-whisper>=20230918'`.
33
34 To use the API implementation, provide an api_key. You can get one by signing up
35 for an [OpenAI account](https://beta.openai.com/).
36
37 For the supported audio formats, languages, and other parameters, see the
38 [Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper
39 [github repo](https://github.com/openai/whisper).
40 """
41
42 # If it's not a decision component, there is only one outgoing edge
43 outgoing_edges = 1
44
45 def __init__(
46 self,
47 api_key: Optional[str] = None,
48 model_name_or_path: WhisperModel = "medium",
49 device: Optional[Union[str, "torch.device"]] = None,
50 api_base: str = "https://api.openai.com/v1",
51 ) -> None:
52 """
53 Creates a WhisperTranscriber instance.
54
55 :param api_key: OpenAI API key. If None, a local installation of Whisper is used.
56 :param model_name_or_path: Name of the model to use. If using a local installation of Whisper, set this to one of the following values: "tiny", "small", "medium", "large", "large-v2". If using
57 the API, set this value to: "whisper-1" (default).
58 :param device: Device to use for inference. Only used if you're using a local
59 installation of Whisper. If None, the device is automatically selected.
60 :param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.
61 """
62 super().__init__()
63 self.api_key = api_key
64 self.api_base = api_base
65 self.use_local_whisper = is_whisper_available() and self.api_key is None
66
67 if self.use_local_whisper:
68 import whisper
69
70 self._model = whisper.load_model(model_name_or_path, device=device)
71 else:
72 if api_key is None:
73 raise ValueError(
74 "Provide a valid api_key for OpenAI API. Alternatively, "
75 "install OpenAI Whisper (see [Whisper](https://github.com/openai/whisper) for more details)."
76 )
77
78 def transcribe(
79 self,
80 audio_file: Union[str, BinaryIO],
81 language: Optional[str] = None,
82 return_segments: bool = False,
83 translate: bool = False,
84 **kwargs,
85 ) -> Dict[str, Any]:
86 """
87 Transcribe an audio file.
88
89 :param audio_file: Path to the audio file or a binary file-like object.
90 :param language: Language of the audio file. If None, the language is automatically detected.
91 :param return_segments: If True, returns the transcription for each segment of the audio file. Supported with
92 local installation of whisper only.
93 :param translate: If True, translates the transcription to English.
94
95 """
96 transcript: Dict[str, Any] = {}
97
98 new_kwargs = {k: v for k, v in kwargs.items() if v is not None}
99 if language is not None:
100 new_kwargs["language"] = language
101
102 if self.use_local_whisper:
103 new_kwargs["return_segments"] = return_segments
104 transcript = self._invoke_local(audio_file, translate, **new_kwargs)
105 elif self.api_key:
106 transcript = self._invoke_api(audio_file, translate, **new_kwargs)
107 return transcript
108
109 def _invoke_api(
110 self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
111 ) -> Dict[str, Any]:
112 if isinstance(audio_file, str):
113 with open(audio_file, "rb") as f:
114 return self._invoke_api(f, translate, **kwargs)
115 else:
116 headers = {"Authorization": f"Bearer {self.api_key}"}
117 request = PreparedRequest()
118 url: str = (
119 f"{self.api_base}/audio/transcriptions" if not translate else f"{self.api_base}/audio/translations"
120 )
121
122 request.prepare(
123 method="POST",
124 url=url,
125 headers=headers,
126 data={"model": "whisper-1", **kwargs},
127 files=[("file", (audio_file.name, audio_file, "application/octet-stream"))],
128 )
129 response = requests.post(url, data=request.body, headers=request.headers, timeout=600)
130
131 if response.status_code != 200:
132 openai_error: OpenAIError
133 if response.status_code == 429:
134 openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
135 else:
136 openai_error = OpenAIError(
137 f"OpenAI returned an error.\n"
138 f"Status code: {response.status_code}\n"
139 f"Response body: {response.text}",
140 status_code=response.status_code,
141 )
142 raise openai_error
143
144 return json.loads(response.content)
145
146 def _invoke_local(
147 self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs
148 ) -> Dict[str, Any]:
149 torch_import.check()
150
151 if isinstance(audio_file, str):
152 with open(audio_file, "rb") as f:
153 return self._invoke_local(f, translate, **kwargs)
154 else:
155 return_segments = kwargs.pop("return_segments", None)
156 kwargs["task"] = "translate" if translate else "transcribe"
157 transcription = self._model.transcribe(audio_file.name, **kwargs)
158 if not return_segments:
159 transcription.pop("segments", None)
160
161 return transcription
162
163 def run(
164 self,
165 query: Optional[str] = None,
166 file_paths: Optional[List[str]] = None,
167 labels: Optional[MultiLabel] = None,
168 documents: Optional[List[Document]] = None,
169 meta: Optional[dict] = None,
170 ): # type: ignore
171 """
172 Transcribe audio files.
173
174 :param query: Ignored
175 :param file_paths: List of paths to audio files.
176 :param labels: Ignored
177 :param documents: Ignored
178 :param meta: Ignored
179 """
180 transcribed_documents: List[Document] = []
181 if file_paths:
182 for file_path in file_paths:
183 transcription = self.transcribe(file_path)
184 d = Document.from_dict(transcription, field_map={"text": "content"})
185 transcribed_documents.append(d)
186
187 output = {"documents": transcribed_documents}
188 return output, "output_1"
189
190 def run_batch(
191 self,
192 queries: Optional[Union[str, List[str]]] = None,
193 file_paths: Optional[List[str]] = None,
194 labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,
195 documents: Optional[Union[List[Document], List[List[Document]]]] = None,
196 meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
197 params: Optional[dict] = None,
198 debug: Optional[bool] = None,
199 ): # type: ignore
200 """
201 Transcribe audio files.
202
203 :param queries: Ignored
204 :param file_paths: List of paths to audio files.
205 :param labels: Ignored
206 :param documents: Ignored
207 :param meta: Ignored
208 :param params: Ignored
209 :param debug: Ignored
210 """
211 if file_paths and isinstance(file_paths[0], list):
212 all_files = []
213 for files_list in file_paths:
214 all_files += files_list
215 return self.run(file_paths=all_files)
216 return self.run(file_paths=file_paths)
217
[end of haystack/nodes/audio/whisper_transcriber.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/nodes/audio/whisper_transcriber.py b/haystack/nodes/audio/whisper_transcriber.py
--- a/haystack/nodes/audio/whisper_transcriber.py
+++ b/haystack/nodes/audio/whisper_transcriber.py
@@ -91,6 +91,7 @@
:param return_segments: If True, returns the transcription for each segment of the audio file. Supported with
local installation of whisper only.
:param translate: If True, translates the transcription to English.
+ :return: A dictionary containing the transcription text and metadata like timings, segments etc.
"""
transcript: Dict[str, Any] = {}
@@ -176,6 +177,8 @@
:param labels: Ignored
:param documents: Ignored
:param meta: Ignored
+ :return: A dictionary containing a list of Document objects, one for each input file.
+
"""
transcribed_documents: List[Document] = []
if file_paths:
| {"golden_diff": "diff --git a/haystack/nodes/audio/whisper_transcriber.py b/haystack/nodes/audio/whisper_transcriber.py\n--- a/haystack/nodes/audio/whisper_transcriber.py\n+++ b/haystack/nodes/audio/whisper_transcriber.py\n@@ -91,6 +91,7 @@\n :param return_segments: If True, returns the transcription for each segment of the audio file. Supported with\n local installation of whisper only.\n :param translate: If True, translates the transcription to English.\n+ :return: A dictionary containing the transcription text and metadata like timings, segments etc.\n \n \"\"\"\n transcript: Dict[str, Any] = {}\n@@ -176,6 +177,8 @@\n :param labels: Ignored\n :param documents: Ignored\n :param meta: Ignored\n+ :return: A dictionary containing a list of Document objects, one for each input file.\n+\n \"\"\"\n transcribed_documents: List[Document] = []\n if file_paths:\n", "issue": "Improve the `WhisperTranscriber` API docs\nThe `run` function in the API reference seems to be missing the return type. This should probably be fixed in the codebase. \r\n\n", "before_files": [{"content": "import json\nfrom typing import List, Optional, Dict, Any, Union, BinaryIO, Literal\n\nimport requests\nfrom requests import PreparedRequest\n\nfrom haystack import MultiLabel, Document\nfrom haystack.errors import OpenAIError, OpenAIRateLimitError\nfrom haystack.nodes.base import BaseComponent\nfrom haystack.utils.import_utils import is_whisper_available\nfrom haystack.lazy_imports import LazyImport\n\n\nwith LazyImport(message=\"Run 'pip install farm-haystack[inference]'\") as torch_import:\n import torch\n\n\nWhisperModel = Literal[\"tiny\", \"small\", \"medium\", \"large\", \"large-v2\"]\n\n\nclass WhisperTranscriber(BaseComponent):\n \"\"\"\n Transcribes audio files using OpenAI's Whisper. This class supports two underlying implementations:\n\n - API (default): Uses the OpenAI API and requires an API key. See the [OpenAI blog post](https://beta.openai.com/docs/api-reference/whisper for more details.\n - Local (requires installing Whisper): Uses the local installation\n of [Whisper](https://github.com/openai/whisper).\n\n To use Whisper locally, install it following the instructions on\n the Whisper [GitHub repo](https://github.com/openai/whisper) and omit the `api_key` parameter.\n You can work around a dependency conflict caused by openai-whisper pinning an older tiktoken version than required\n by Haystack if you install via `pip install --no-deps numba llvmlite 'openai-whisper>=20230918'`.\n\n To use the API implementation, provide an api_key. You can get one by signing up\n for an [OpenAI account](https://beta.openai.com/).\n\n For the supported audio formats, languages, and other parameters, see the\n [Whisper API documentation](https://platform.openai.com/docs/guides/speech-to-text) and the official Whisper\n [github repo](https://github.com/openai/whisper).\n \"\"\"\n\n # If it's not a decision component, there is only one outgoing edge\n outgoing_edges = 1\n\n def __init__(\n self,\n api_key: Optional[str] = None,\n model_name_or_path: WhisperModel = \"medium\",\n device: Optional[Union[str, \"torch.device\"]] = None,\n api_base: str = \"https://api.openai.com/v1\",\n ) -> None:\n \"\"\"\n Creates a WhisperTranscriber instance.\n\n :param api_key: OpenAI API key. If None, a local installation of Whisper is used.\n :param model_name_or_path: Name of the model to use. If using a local installation of Whisper, set this to one of the following values: \"tiny\", \"small\", \"medium\", \"large\", \"large-v2\". If using\n the API, set this value to: \"whisper-1\" (default).\n :param device: Device to use for inference. Only used if you're using a local\n installation of Whisper. If None, the device is automatically selected.\n :param api_base: The OpenAI API Base url, defaults to `https://api.openai.com/v1`.\n \"\"\"\n super().__init__()\n self.api_key = api_key\n self.api_base = api_base\n self.use_local_whisper = is_whisper_available() and self.api_key is None\n\n if self.use_local_whisper:\n import whisper\n\n self._model = whisper.load_model(model_name_or_path, device=device)\n else:\n if api_key is None:\n raise ValueError(\n \"Provide a valid api_key for OpenAI API. Alternatively, \"\n \"install OpenAI Whisper (see [Whisper](https://github.com/openai/whisper) for more details).\"\n )\n\n def transcribe(\n self,\n audio_file: Union[str, BinaryIO],\n language: Optional[str] = None,\n return_segments: bool = False,\n translate: bool = False,\n **kwargs,\n ) -> Dict[str, Any]:\n \"\"\"\n Transcribe an audio file.\n\n :param audio_file: Path to the audio file or a binary file-like object.\n :param language: Language of the audio file. If None, the language is automatically detected.\n :param return_segments: If True, returns the transcription for each segment of the audio file. Supported with\n local installation of whisper only.\n :param translate: If True, translates the transcription to English.\n\n \"\"\"\n transcript: Dict[str, Any] = {}\n\n new_kwargs = {k: v for k, v in kwargs.items() if v is not None}\n if language is not None:\n new_kwargs[\"language\"] = language\n\n if self.use_local_whisper:\n new_kwargs[\"return_segments\"] = return_segments\n transcript = self._invoke_local(audio_file, translate, **new_kwargs)\n elif self.api_key:\n transcript = self._invoke_api(audio_file, translate, **new_kwargs)\n return transcript\n\n def _invoke_api(\n self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs\n ) -> Dict[str, Any]:\n if isinstance(audio_file, str):\n with open(audio_file, \"rb\") as f:\n return self._invoke_api(f, translate, **kwargs)\n else:\n headers = {\"Authorization\": f\"Bearer {self.api_key}\"}\n request = PreparedRequest()\n url: str = (\n f\"{self.api_base}/audio/transcriptions\" if not translate else f\"{self.api_base}/audio/translations\"\n )\n\n request.prepare(\n method=\"POST\",\n url=url,\n headers=headers,\n data={\"model\": \"whisper-1\", **kwargs},\n files=[(\"file\", (audio_file.name, audio_file, \"application/octet-stream\"))],\n )\n response = requests.post(url, data=request.body, headers=request.headers, timeout=600)\n\n if response.status_code != 200:\n openai_error: OpenAIError\n if response.status_code == 429:\n openai_error = OpenAIRateLimitError(f\"API rate limit exceeded: {response.text}\")\n else:\n openai_error = OpenAIError(\n f\"OpenAI returned an error.\\n\"\n f\"Status code: {response.status_code}\\n\"\n f\"Response body: {response.text}\",\n status_code=response.status_code,\n )\n raise openai_error\n\n return json.loads(response.content)\n\n def _invoke_local(\n self, audio_file: Union[str, BinaryIO], translate: Optional[bool] = False, **kwargs\n ) -> Dict[str, Any]:\n torch_import.check()\n\n if isinstance(audio_file, str):\n with open(audio_file, \"rb\") as f:\n return self._invoke_local(f, translate, **kwargs)\n else:\n return_segments = kwargs.pop(\"return_segments\", None)\n kwargs[\"task\"] = \"translate\" if translate else \"transcribe\"\n transcription = self._model.transcribe(audio_file.name, **kwargs)\n if not return_segments:\n transcription.pop(\"segments\", None)\n\n return transcription\n\n def run(\n self,\n query: Optional[str] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[MultiLabel] = None,\n documents: Optional[List[Document]] = None,\n meta: Optional[dict] = None,\n ): # type: ignore\n \"\"\"\n Transcribe audio files.\n\n :param query: Ignored\n :param file_paths: List of paths to audio files.\n :param labels: Ignored\n :param documents: Ignored\n :param meta: Ignored\n \"\"\"\n transcribed_documents: List[Document] = []\n if file_paths:\n for file_path in file_paths:\n transcription = self.transcribe(file_path)\n d = Document.from_dict(transcription, field_map={\"text\": \"content\"})\n transcribed_documents.append(d)\n\n output = {\"documents\": transcribed_documents}\n return output, \"output_1\"\n\n def run_batch(\n self,\n queries: Optional[Union[str, List[str]]] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,\n documents: Optional[Union[List[Document], List[List[Document]]]] = None,\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n params: Optional[dict] = None,\n debug: Optional[bool] = None,\n ): # type: ignore\n \"\"\"\n Transcribe audio files.\n\n :param queries: Ignored\n :param file_paths: List of paths to audio files.\n :param labels: Ignored\n :param documents: Ignored\n :param meta: Ignored\n :param params: Ignored\n :param debug: Ignored\n \"\"\"\n if file_paths and isinstance(file_paths[0], list):\n all_files = []\n for files_list in file_paths:\n all_files += files_list\n return self.run(file_paths=all_files)\n return self.run(file_paths=file_paths)\n", "path": "haystack/nodes/audio/whisper_transcriber.py"}]} | 3,128 | 229 |
gh_patches_debug_6785 | rasdani/github-patches | git_diff | ibis-project__ibis-8445 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bug: regression: slow to compute name of deeply nested expressions
### What happened?
```python
import ibis
x = ibis.literal(1)
for i in range(100):
x = x + 1
# if I hardcode the name, this is instantaneous
# x = x.name(f"step_{i}")
x
```
If I ctrl-c this after a few seconds, I see see eg
```
File ~/code/ibis/ibis/expr/operations/core.py:89, in Value.name(self)
86 @property
87 def name(self) -> str:
88 names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
---> 89 return f"{self.__class__.__name__}({', '.join(names)})"
File ~/code/ibis/ibis/expr/operations/core.py:88, in <genexpr>(.0)
86 @property
87 def name(self) -> str:
---> 88 names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
89 return f"{self.__class__.__name__}({', '.join(names)})"
[... skipping similar frames: <genexpr> at line 88 (97 times), Value.name at line 89 (97 times)]
File ~/code/ibis/ibis/expr/operations/core.py:89, in Value.name(self)
86 @property
87 def name(self) -> str:
88 names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
---> 89 return f"{self.__class__.__name__}({', '.join(names)})"
File ~/code/ibis/ibis/expr/operations/core.py:88, in <genexpr>(.0)
86 @property
87 def name(self) -> str:
---> 88 names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
89 return f"{self.__class__.__name__}({', '.join(names)})"
```
### What version of ibis are you using?
main 09b6adaeecf1b6388866856c795022cfca4b2679
### What backend(s) are you using, if any?
NA
### Relevant log output
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of ibis/expr/operations/core.py]
1 from __future__ import annotations
2
3 from abc import abstractmethod
4 from typing import Generic, Optional
5
6 from public import public
7 from typing_extensions import Any, Self, TypeVar
8
9 import ibis.expr.datashape as ds
10 import ibis.expr.datatypes as dt
11 import ibis.expr.rules as rlz
12 from ibis import util
13 from ibis.common.annotations import attribute
14 from ibis.common.graph import Node as Traversable
15 from ibis.common.grounds import Concrete
16 from ibis.common.patterns import Coercible, CoercionError
17 from ibis.common.typing import DefaultTypeVars
18
19
20 @public
21 class Node(Concrete, Traversable):
22 def equals(self, other) -> bool:
23 if not isinstance(other, Node):
24 raise TypeError(
25 f"invalid equality comparison between Node and {type(other)}"
26 )
27 return self.__cached_equals__(other)
28
29 @util.deprecated(as_of="4.0", instead="remove intermediate .op() calls")
30 def op(self) -> Self:
31 """Make `Node` backwards compatible with code that uses `Expr.op()`."""
32 return self
33
34 # Avoid custom repr for performance reasons
35 __repr__ = object.__repr__
36
37 # TODO(kszucs): hidrate the __children__ traversable attribute
38 # @attribute
39 # def __children__(self):
40 # return super().__children__
41
42
43 T = TypeVar("T", bound=dt.DataType, covariant=True)
44 S = TypeVar("S", bound=ds.DataShape, default=ds.Any, covariant=True)
45
46
47 @public
48 class Value(Node, Coercible, DefaultTypeVars, Generic[T, S]):
49 @classmethod
50 def __coerce__(
51 cls, value: Any, T: Optional[type] = None, S: Optional[type] = None
52 ) -> Self:
53 # note that S=Shape is unused here since the pattern will check the
54 # shape of the value expression after executing Value.__coerce__()
55 from ibis.expr.operations.generic import NULL, Literal
56 from ibis.expr.types import Expr
57
58 if isinstance(value, Expr):
59 value = value.op()
60
61 if isinstance(value, Value):
62 if value == NULL:
63 # treat the NULL literal the same as None to implicitly cast to
64 # the requested datatype if any
65 value = None
66 else:
67 return value
68
69 if T is dt.Integer:
70 dtype = dt.infer(int(value))
71 elif T is dt.Floating:
72 dtype = dt.infer(float(value))
73 else:
74 try:
75 dtype = dt.DataType.from_typehint(T)
76 except TypeError:
77 dtype = dt.infer(value)
78
79 try:
80 return Literal(value, dtype=dtype)
81 except TypeError:
82 raise CoercionError(f"Unable to coerce {value!r} to Value[{T!r}]")
83
84 # TODO(kszucs): cover it with tests
85 # TODO(kszucs): figure out how to represent not named arguments
86 @property
87 def name(self) -> str:
88 names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
89 return f"{self.__class__.__name__}({', '.join(names)})"
90
91 @property
92 @abstractmethod
93 def dtype(self) -> T:
94 """Ibis datatype of the produced value expression.
95
96 Returns
97 -------
98 dt.DataType
99
100 """
101
102 @property
103 @abstractmethod
104 def shape(self) -> S:
105 """Shape of the produced value expression.
106
107 Possible values are: "scalar" and "columnar"
108
109 Returns
110 -------
111 ds.Shape
112
113 """
114
115 @attribute
116 def relations(self):
117 """Set of relations the value node depends on."""
118 children = (n.relations for n in self.__children__ if isinstance(n, Value))
119 return frozenset().union(*children)
120
121 @property
122 @util.deprecated(as_of="7.0", instead="use .dtype property instead")
123 def output_dtype(self):
124 return self.dtype
125
126 @property
127 @util.deprecated(as_of="7.0", instead="use .shape property instead")
128 def output_shape(self):
129 return self.shape
130
131 def to_expr(self):
132 import ibis.expr.types as ir
133
134 if self.shape.is_columnar():
135 typename = self.dtype.column
136 else:
137 typename = self.dtype.scalar
138
139 return getattr(ir, typename)(self)
140
141
142 # convenience aliases
143 Scalar = Value[T, ds.Scalar]
144 Column = Value[T, ds.Columnar]
145
146
147 @public
148 class Alias(Value):
149 arg: Value
150 name: str
151
152 shape = rlz.shape_like("arg")
153 dtype = rlz.dtype_like("arg")
154
155
156 @public
157 class Unary(Value):
158 """A unary operation."""
159
160 arg: Value
161
162 @attribute
163 def shape(self) -> ds.DataShape:
164 return self.arg.shape
165
166 @attribute
167 def relations(self):
168 return self.arg.relations
169
170
171 @public
172 class Binary(Value):
173 """A binary operation."""
174
175 left: Value
176 right: Value
177
178 @attribute
179 def shape(self) -> ds.DataShape:
180 return max(self.left.shape, self.right.shape)
181
182 @attribute
183 def relations(self):
184 return self.left.relations | self.right.relations
185
186
187 @public
188 class Argument(Value):
189 name: str
190 shape: ds.DataShape
191 dtype: dt.DataType
192
193 @attribute
194 def param(self) -> str:
195 return f"__ibis_param_{self.name}__"
196
197
198 public(ValueOp=Value, UnaryOp=Unary, BinaryOp=Binary, Scalar=Scalar, Column=Column)
199
[end of ibis/expr/operations/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ibis/expr/operations/core.py b/ibis/expr/operations/core.py
--- a/ibis/expr/operations/core.py
+++ b/ibis/expr/operations/core.py
@@ -85,7 +85,11 @@
# TODO(kszucs): figure out how to represent not named arguments
@property
def name(self) -> str:
- names = (arg.name for arg in self.__args__ if hasattr(arg, "name"))
+ names = (
+ name
+ for arg in self.__args__
+ if (name := getattr(arg, "name", None)) is not None
+ )
return f"{self.__class__.__name__}({', '.join(names)})"
@property
| {"golden_diff": "diff --git a/ibis/expr/operations/core.py b/ibis/expr/operations/core.py\n--- a/ibis/expr/operations/core.py\n+++ b/ibis/expr/operations/core.py\n@@ -85,7 +85,11 @@\n # TODO(kszucs): figure out how to represent not named arguments\n @property\n def name(self) -> str:\n- names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\n+ names = (\n+ name\n+ for arg in self.__args__\n+ if (name := getattr(arg, \"name\", None)) is not None\n+ )\n return f\"{self.__class__.__name__}({', '.join(names)})\"\n \n @property\n", "issue": "bug: regression: slow to compute name of deeply nested expressions\n### What happened?\n\n```python\r\nimport ibis\r\n\r\nx = ibis.literal(1)\r\nfor i in range(100):\r\n x = x + 1\r\n # if I hardcode the name, this is instantaneous\r\n # x = x.name(f\"step_{i}\")\r\nx\r\n```\r\n\r\nIf I ctrl-c this after a few seconds, I see see eg\r\n\r\n```\r\nFile ~/code/ibis/ibis/expr/operations/core.py:89, in Value.name(self)\r\n 86 @property\r\n 87 def name(self) -> str:\r\n 88 names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\r\n---> 89 return f\"{self.__class__.__name__}({', '.join(names)})\"\r\n\r\nFile ~/code/ibis/ibis/expr/operations/core.py:88, in <genexpr>(.0)\r\n 86 @property\r\n 87 def name(self) -> str:\r\n---> 88 names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\r\n 89 return f\"{self.__class__.__name__}({', '.join(names)})\"\r\n\r\n [... skipping similar frames: <genexpr> at line 88 (97 times), Value.name at line 89 (97 times)]\r\n\r\nFile ~/code/ibis/ibis/expr/operations/core.py:89, in Value.name(self)\r\n 86 @property\r\n 87 def name(self) -> str:\r\n 88 names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\r\n---> 89 return f\"{self.__class__.__name__}({', '.join(names)})\"\r\n\r\nFile ~/code/ibis/ibis/expr/operations/core.py:88, in <genexpr>(.0)\r\n 86 @property\r\n 87 def name(self) -> str:\r\n---> 88 names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\r\n 89 return f\"{self.__class__.__name__}({', '.join(names)})\"\r\n```\n\n### What version of ibis are you using?\n\nmain 09b6adaeecf1b6388866856c795022cfca4b2679\n\n### What backend(s) are you using, if any?\n\nNA\n\n### Relevant log output\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom abc import abstractmethod\nfrom typing import Generic, Optional\n\nfrom public import public\nfrom typing_extensions import Any, Self, TypeVar\n\nimport ibis.expr.datashape as ds\nimport ibis.expr.datatypes as dt\nimport ibis.expr.rules as rlz\nfrom ibis import util\nfrom ibis.common.annotations import attribute\nfrom ibis.common.graph import Node as Traversable\nfrom ibis.common.grounds import Concrete\nfrom ibis.common.patterns import Coercible, CoercionError\nfrom ibis.common.typing import DefaultTypeVars\n\n\n@public\nclass Node(Concrete, Traversable):\n def equals(self, other) -> bool:\n if not isinstance(other, Node):\n raise TypeError(\n f\"invalid equality comparison between Node and {type(other)}\"\n )\n return self.__cached_equals__(other)\n\n @util.deprecated(as_of=\"4.0\", instead=\"remove intermediate .op() calls\")\n def op(self) -> Self:\n \"\"\"Make `Node` backwards compatible with code that uses `Expr.op()`.\"\"\"\n return self\n\n # Avoid custom repr for performance reasons\n __repr__ = object.__repr__\n\n # TODO(kszucs): hidrate the __children__ traversable attribute\n # @attribute\n # def __children__(self):\n # return super().__children__\n\n\nT = TypeVar(\"T\", bound=dt.DataType, covariant=True)\nS = TypeVar(\"S\", bound=ds.DataShape, default=ds.Any, covariant=True)\n\n\n@public\nclass Value(Node, Coercible, DefaultTypeVars, Generic[T, S]):\n @classmethod\n def __coerce__(\n cls, value: Any, T: Optional[type] = None, S: Optional[type] = None\n ) -> Self:\n # note that S=Shape is unused here since the pattern will check the\n # shape of the value expression after executing Value.__coerce__()\n from ibis.expr.operations.generic import NULL, Literal\n from ibis.expr.types import Expr\n\n if isinstance(value, Expr):\n value = value.op()\n\n if isinstance(value, Value):\n if value == NULL:\n # treat the NULL literal the same as None to implicitly cast to\n # the requested datatype if any\n value = None\n else:\n return value\n\n if T is dt.Integer:\n dtype = dt.infer(int(value))\n elif T is dt.Floating:\n dtype = dt.infer(float(value))\n else:\n try:\n dtype = dt.DataType.from_typehint(T)\n except TypeError:\n dtype = dt.infer(value)\n\n try:\n return Literal(value, dtype=dtype)\n except TypeError:\n raise CoercionError(f\"Unable to coerce {value!r} to Value[{T!r}]\")\n\n # TODO(kszucs): cover it with tests\n # TODO(kszucs): figure out how to represent not named arguments\n @property\n def name(self) -> str:\n names = (arg.name for arg in self.__args__ if hasattr(arg, \"name\"))\n return f\"{self.__class__.__name__}({', '.join(names)})\"\n\n @property\n @abstractmethod\n def dtype(self) -> T:\n \"\"\"Ibis datatype of the produced value expression.\n\n Returns\n -------\n dt.DataType\n\n \"\"\"\n\n @property\n @abstractmethod\n def shape(self) -> S:\n \"\"\"Shape of the produced value expression.\n\n Possible values are: \"scalar\" and \"columnar\"\n\n Returns\n -------\n ds.Shape\n\n \"\"\"\n\n @attribute\n def relations(self):\n \"\"\"Set of relations the value node depends on.\"\"\"\n children = (n.relations for n in self.__children__ if isinstance(n, Value))\n return frozenset().union(*children)\n\n @property\n @util.deprecated(as_of=\"7.0\", instead=\"use .dtype property instead\")\n def output_dtype(self):\n return self.dtype\n\n @property\n @util.deprecated(as_of=\"7.0\", instead=\"use .shape property instead\")\n def output_shape(self):\n return self.shape\n\n def to_expr(self):\n import ibis.expr.types as ir\n\n if self.shape.is_columnar():\n typename = self.dtype.column\n else:\n typename = self.dtype.scalar\n\n return getattr(ir, typename)(self)\n\n\n# convenience aliases\nScalar = Value[T, ds.Scalar]\nColumn = Value[T, ds.Columnar]\n\n\n@public\nclass Alias(Value):\n arg: Value\n name: str\n\n shape = rlz.shape_like(\"arg\")\n dtype = rlz.dtype_like(\"arg\")\n\n\n@public\nclass Unary(Value):\n \"\"\"A unary operation.\"\"\"\n\n arg: Value\n\n @attribute\n def shape(self) -> ds.DataShape:\n return self.arg.shape\n\n @attribute\n def relations(self):\n return self.arg.relations\n\n\n@public\nclass Binary(Value):\n \"\"\"A binary operation.\"\"\"\n\n left: Value\n right: Value\n\n @attribute\n def shape(self) -> ds.DataShape:\n return max(self.left.shape, self.right.shape)\n\n @attribute\n def relations(self):\n return self.left.relations | self.right.relations\n\n\n@public\nclass Argument(Value):\n name: str\n shape: ds.DataShape\n dtype: dt.DataType\n\n @attribute\n def param(self) -> str:\n return f\"__ibis_param_{self.name}__\"\n\n\npublic(ValueOp=Value, UnaryOp=Unary, BinaryOp=Binary, Scalar=Scalar, Column=Column)\n", "path": "ibis/expr/operations/core.py"}]} | 2,850 | 172 |
gh_patches_debug_2057 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-3295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
size parameter required set to be no when state is absent in os_volume
##### Issue Type:
- Documentation Report
##### Plugin Name:
os_volume
##### Ansible Version:
```
2.4
```
##### Ansible Configuration:
<!-- Please mention any settings you've changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables). -->
##### Environment:
centos 6
##### Summary:
def _absent_volume(module, cloud):
try:
cloud.delete_volume(
name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
No need to add size parameter while calling os_volume as delete_volume function does not need size parameter . http://docs.ansible.com/ansible/os_volume_module.html this document needs to be modified.
size parameter required set to be 'NO' when state is absent in os_volume
##### Steps To Reproduce:
<!-- For bugs, please show exactly how to reproduce the problem.
For new features, show how the feature would be used. -->
``````
<!-- (Paste example playbooks or commands here) -->
``` - name: "Delete Volumes attached"
os_volume:
state: "absent"
display_name: "{{ item.id }}"
timeout: "360"
auth:
auth_url: "{{ openstack_auth_url }}"
username: "{{ openstack_username }}"
password: "{{ openstack_password }}"
project_name: "{{ openstack_tenant }}"
environment:
OS_VOLUME_API_VERSION: "1"
OS_IMAGE_API_VERSION: "1"
security_groups: default
<!-- You can also paste gist.github.com links for larger files. -->
##### Expected Results:
<!-- What did you expect to happen when running the steps above? -->
##### Actual Results:
<!-- What actually happened? If possible run with high verbosity (-vvvv) -->
``````
<!-- (Paste verbatim command output here) -->
```
```
</issue>
<code>
[start of cloud/openstack/os_volume.py]
1 #!/usr/bin/python
2
3 # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
4 #
5 # This module is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # This software is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with this software. If not, see <http://www.gnu.org/licenses/>.
17
18
19 try:
20 import shade
21 HAS_SHADE = True
22 except ImportError:
23 HAS_SHADE = False
24
25
26 DOCUMENTATION = '''
27 ---
28 module: os_volume
29 short_description: Create/Delete Cinder Volumes
30 extends_documentation_fragment: openstack
31 version_added: "2.0"
32 author: "Monty Taylor (@emonty)"
33 description:
34 - Create or Remove cinder block storage volumes
35 options:
36 size:
37 description:
38 - Size of volume in GB
39 required: only when state is 'present'
40 default: None
41 display_name:
42 description:
43 - Name of volume
44 required: true
45 display_description:
46 description:
47 - String describing the volume
48 required: false
49 default: None
50 volume_type:
51 description:
52 - Volume type for volume
53 required: false
54 default: None
55 image:
56 description:
57 - Image name or id for boot from volume
58 required: false
59 default: None
60 snapshot_id:
61 description:
62 - Volume snapshot id to create from
63 required: false
64 default: None
65 state:
66 description:
67 - Should the resource be present or absent.
68 choices: [present, absent]
69 default: present
70 requirements:
71 - "python >= 2.6"
72 - "shade"
73 '''
74
75 EXAMPLES = '''
76 # Creates a new volume
77 - name: create a volume
78 hosts: localhost
79 tasks:
80 - name: create 40g test volume
81 os_volume:
82 state: present
83 cloud: mordred
84 availability_zone: az2
85 size: 40
86 display_name: test_volume
87 '''
88
89
90 def _present_volume(module, cloud):
91 if cloud.volume_exists(module.params['display_name']):
92 v = cloud.get_volume(module.params['display_name'])
93 module.exit_json(changed=False, id=v['id'], volume=v)
94
95 volume_args = dict(
96 size=module.params['size'],
97 volume_type=module.params['volume_type'],
98 display_name=module.params['display_name'],
99 display_description=module.params['display_description'],
100 snapshot_id=module.params['snapshot_id'],
101 availability_zone=module.params['availability_zone'],
102 )
103 if module.params['image']:
104 image_id = cloud.get_image_id(module.params['image'])
105 volume_args['imageRef'] = image_id
106
107 volume = cloud.create_volume(
108 wait=module.params['wait'], timeout=module.params['timeout'],
109 **volume_args)
110 module.exit_json(changed=True, id=volume['id'], volume=volume)
111
112
113 def _absent_volume(module, cloud):
114 try:
115 cloud.delete_volume(
116 name_or_id=module.params['display_name'],
117 wait=module.params['wait'],
118 timeout=module.params['timeout'])
119 except shade.OpenStackCloudTimeout:
120 module.exit_json(changed=False)
121 module.exit_json(changed=True)
122
123
124 def main():
125 argument_spec = openstack_full_argument_spec(
126 size=dict(default=None),
127 volume_type=dict(default=None),
128 display_name=dict(required=True, aliases=['name']),
129 display_description=dict(default=None, aliases=['description']),
130 image=dict(default=None),
131 snapshot_id=dict(default=None),
132 state=dict(default='present', choices=['absent', 'present']),
133 )
134 module_kwargs = openstack_module_kwargs(
135 mutually_exclusive=[
136 ['image', 'snapshot_id'],
137 ],
138 )
139 module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
140
141 if not HAS_SHADE:
142 module.fail_json(msg='shade is required for this module')
143
144 state = module.params['state']
145
146 if state == 'present' and not module.params['size']:
147 module.fail_json(msg="Size is required when state is 'present'")
148
149 try:
150 cloud = shade.openstack_cloud(**module.params)
151 if state == 'present':
152 _present_volume(module, cloud)
153 if state == 'absent':
154 _absent_volume(module, cloud)
155 except shade.OpenStackCloudException as e:
156 module.fail_json(msg=str(e))
157
158 # this is magic, see lib/ansible/module_common.py
159 from ansible.module_utils.basic import *
160 from ansible.module_utils.openstack import *
161 if __name__ == '__main__':
162 main()
163
[end of cloud/openstack/os_volume.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py
--- a/cloud/openstack/os_volume.py
+++ b/cloud/openstack/os_volume.py
@@ -35,8 +35,9 @@
options:
size:
description:
- - Size of volume in GB
- required: only when state is 'present'
+ - Size of volume in GB. This parameter is required when the
+ I(state) parameter is 'present'.
+ required: false
default: None
display_name:
description:
| {"golden_diff": "diff --git a/cloud/openstack/os_volume.py b/cloud/openstack/os_volume.py\n--- a/cloud/openstack/os_volume.py\n+++ b/cloud/openstack/os_volume.py\n@@ -35,8 +35,9 @@\n options:\n size:\n description:\n- - Size of volume in GB\n- required: only when state is 'present'\n+ - Size of volume in GB. This parameter is required when the\n+ I(state) parameter is 'present'.\n+ required: false\n default: None\n display_name:\n description:\n", "issue": "size parameter required set to be no when state is absent in os_volume\n##### Issue Type:\n- Documentation Report\n##### Plugin Name:\n\nos_volume \n##### Ansible Version:\n\n```\n2.4\n```\n##### Ansible Configuration:\n\n<!-- Please mention any settings you've changed/added/removed in ansible.cfg\n(or using the ANSIBLE_* environment variables). -->\n##### Environment:\n\ncentos 6\n##### Summary:\n\ndef _absent_volume(module, cloud):\n try:\n cloud.delete_volume(\n name_or_id=module.params['display_name'],\n wait=module.params['wait'],\n timeout=module.params['timeout'])\n\nNo need to add size parameter while calling os_volume as delete_volume function does not need size parameter . http://docs.ansible.com/ansible/os_volume_module.html this document needs to be modified. \nsize parameter required set to be 'NO' when state is absent in os_volume \n##### Steps To Reproduce:\n\n<!-- For bugs, please show exactly how to reproduce the problem.\nFor new features, show how the feature would be used. -->\n\n``````\n<!-- (Paste example playbooks or commands here) -->\n``` - name: \"Delete Volumes attached\"\n os_volume:\n state: \"absent\"\n display_name: \"{{ item.id }}\"\n timeout: \"360\"\n auth:\n auth_url: \"{{ openstack_auth_url }}\"\n username: \"{{ openstack_username }}\"\n password: \"{{ openstack_password }}\"\n project_name: \"{{ openstack_tenant }}\"\n environment:\n OS_VOLUME_API_VERSION: \"1\"\n OS_IMAGE_API_VERSION: \"1\"\n security_groups: default\n\n<!-- You can also paste gist.github.com links for larger files. -->\n\n##### Expected Results:\n\n<!-- What did you expect to happen when running the steps above? -->\n\n##### Actual Results:\n\n<!-- What actually happened? If possible run with high verbosity (-vvvv) -->\n\n``````\n\n<!-- (Paste verbatim command output here) -->\n\n```\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\ntry:\n import shade\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_volume\nshort_description: Create/Delete Cinder Volumes\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\nauthor: \"Monty Taylor (@emonty)\"\ndescription:\n - Create or Remove cinder block storage volumes\noptions:\n size:\n description:\n - Size of volume in GB\n required: only when state is 'present'\n default: None\n display_name:\n description:\n - Name of volume\n required: true\n display_description:\n description:\n - String describing the volume\n required: false\n default: None\n volume_type:\n description:\n - Volume type for volume\n required: false\n default: None\n image:\n description:\n - Image name or id for boot from volume\n required: false\n default: None\n snapshot_id:\n description:\n - Volume snapshot id to create from\n required: false\n default: None\n state:\n description:\n - Should the resource be present or absent.\n choices: [present, absent]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Creates a new volume\n- name: create a volume\n hosts: localhost\n tasks:\n - name: create 40g test volume\n os_volume:\n state: present\n cloud: mordred\n availability_zone: az2\n size: 40\n display_name: test_volume\n'''\n\n\ndef _present_volume(module, cloud):\n if cloud.volume_exists(module.params['display_name']):\n v = cloud.get_volume(module.params['display_name'])\n module.exit_json(changed=False, id=v['id'], volume=v)\n\n volume_args = dict(\n size=module.params['size'],\n volume_type=module.params['volume_type'],\n display_name=module.params['display_name'],\n display_description=module.params['display_description'],\n snapshot_id=module.params['snapshot_id'],\n availability_zone=module.params['availability_zone'],\n )\n if module.params['image']:\n image_id = cloud.get_image_id(module.params['image'])\n volume_args['imageRef'] = image_id\n\n volume = cloud.create_volume(\n wait=module.params['wait'], timeout=module.params['timeout'],\n **volume_args)\n module.exit_json(changed=True, id=volume['id'], volume=volume)\n\n\ndef _absent_volume(module, cloud):\n try:\n cloud.delete_volume(\n name_or_id=module.params['display_name'],\n wait=module.params['wait'],\n timeout=module.params['timeout'])\n except shade.OpenStackCloudTimeout:\n module.exit_json(changed=False)\n module.exit_json(changed=True)\n\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n size=dict(default=None),\n volume_type=dict(default=None),\n display_name=dict(required=True, aliases=['name']),\n display_description=dict(default=None, aliases=['description']),\n image=dict(default=None),\n snapshot_id=dict(default=None),\n state=dict(default='present', choices=['absent', 'present']),\n )\n module_kwargs = openstack_module_kwargs(\n mutually_exclusive=[\n ['image', 'snapshot_id'],\n ],\n )\n module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n state = module.params['state']\n\n if state == 'present' and not module.params['size']:\n module.fail_json(msg=\"Size is required when state is 'present'\")\n\n try:\n cloud = shade.openstack_cloud(**module.params)\n if state == 'present':\n _present_volume(module, cloud)\n if state == 'absent':\n _absent_volume(module, cloud)\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e))\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_volume.py"}]} | 2,408 | 120 |
gh_patches_debug_34060 | rasdani/github-patches | git_diff | python-discord__bot-848 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pep command cannot fetch PEP 0.
The [PEP 0](https://www.python.org/dev/peps/) is an index of all the other PEPs;
instead of being in the github repo directly, it's generated automatically through a script present there.
~~If implemented, scraping the site for it seems to be the better option as generating it would require the whole up to date repo.~~
The header is a constant in the pep0 package and can be gotten from there in case of future changes
or as suggested in the comment below https://github.com/python/peps/blob/master/pep0/constants.py#L10-L20
</issue>
<code>
[start of bot/cogs/utils.py]
1 import difflib
2 import logging
3 import re
4 import unicodedata
5 from asyncio import TimeoutError, sleep
6 from email.parser import HeaderParser
7 from io import StringIO
8 from typing import Tuple, Union
9
10 from dateutil import relativedelta
11 from discord import Colour, Embed, Message, Role
12 from discord.ext.commands import BadArgument, Cog, Context, command
13
14 from bot.bot import Bot
15 from bot.constants import Channels, MODERATION_ROLES, Mention, STAFF_ROLES
16 from bot.decorators import in_channel, with_role
17 from bot.utils.time import humanize_delta
18
19 log = logging.getLogger(__name__)
20
21 ZEN_OF_PYTHON = """\
22 Beautiful is better than ugly.
23 Explicit is better than implicit.
24 Simple is better than complex.
25 Complex is better than complicated.
26 Flat is better than nested.
27 Sparse is better than dense.
28 Readability counts.
29 Special cases aren't special enough to break the rules.
30 Although practicality beats purity.
31 Errors should never pass silently.
32 Unless explicitly silenced.
33 In the face of ambiguity, refuse the temptation to guess.
34 There should be one-- and preferably only one --obvious way to do it.
35 Although that way may not be obvious at first unless you're Dutch.
36 Now is better than never.
37 Although never is often better than *right* now.
38 If the implementation is hard to explain, it's a bad idea.
39 If the implementation is easy to explain, it may be a good idea.
40 Namespaces are one honking great idea -- let's do more of those!
41 """
42
43
44 class Utils(Cog):
45 """A selection of utilities which don't have a clear category."""
46
47 def __init__(self, bot: Bot):
48 self.bot = bot
49
50 self.base_pep_url = "http://www.python.org/dev/peps/pep-"
51 self.base_github_pep_url = "https://raw.githubusercontent.com/python/peps/master/pep-"
52
53 @command(name='pep', aliases=('get_pep', 'p'))
54 async def pep_command(self, ctx: Context, pep_number: str) -> None:
55 """Fetches information about a PEP and sends it to the channel."""
56 if pep_number.isdigit():
57 pep_number = int(pep_number)
58 else:
59 await ctx.invoke(self.bot.get_command("help"), "pep")
60 return
61
62 possible_extensions = ['.txt', '.rst']
63 found_pep = False
64 for extension in possible_extensions:
65 # Attempt to fetch the PEP
66 pep_url = f"{self.base_github_pep_url}{pep_number:04}{extension}"
67 log.trace(f"Requesting PEP {pep_number} with {pep_url}")
68 response = await self.bot.http_session.get(pep_url)
69
70 if response.status == 200:
71 log.trace("PEP found")
72 found_pep = True
73
74 pep_content = await response.text()
75
76 # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179
77 pep_header = HeaderParser().parse(StringIO(pep_content))
78
79 # Assemble the embed
80 pep_embed = Embed(
81 title=f"**PEP {pep_number} - {pep_header['Title']}**",
82 description=f"[Link]({self.base_pep_url}{pep_number:04})",
83 )
84
85 pep_embed.set_thumbnail(url="https://www.python.org/static/opengraph-icon-200x200.png")
86
87 # Add the interesting information
88 fields_to_check = ("Status", "Python-Version", "Created", "Type")
89 for field in fields_to_check:
90 # Check for a PEP metadata field that is present but has an empty value
91 # embed field values can't contain an empty string
92 if pep_header.get(field, ""):
93 pep_embed.add_field(name=field, value=pep_header[field])
94
95 elif response.status != 404:
96 # any response except 200 and 404 is expected
97 found_pep = True # actually not, but it's easier to display this way
98 log.trace(f"The user requested PEP {pep_number}, but the response had an unexpected status code: "
99 f"{response.status}.\n{response.text}")
100
101 error_message = "Unexpected HTTP error during PEP search. Please let us know."
102 pep_embed = Embed(title="Unexpected error", description=error_message)
103 pep_embed.colour = Colour.red()
104 break
105
106 if not found_pep:
107 log.trace("PEP was not found")
108 not_found = f"PEP {pep_number} does not exist."
109 pep_embed = Embed(title="PEP not found", description=not_found)
110 pep_embed.colour = Colour.red()
111
112 await ctx.message.channel.send(embed=pep_embed)
113
114 @command()
115 @in_channel(Channels.bot_commands, bypass_roles=STAFF_ROLES)
116 async def charinfo(self, ctx: Context, *, characters: str) -> None:
117 """Shows you information on up to 25 unicode characters."""
118 match = re.match(r"<(a?):(\w+):(\d+)>", characters)
119 if match:
120 embed = Embed(
121 title="Non-Character Detected",
122 description=(
123 "Only unicode characters can be processed, but a custom Discord emoji "
124 "was found. Please remove it and try again."
125 )
126 )
127 embed.colour = Colour.red()
128 await ctx.send(embed=embed)
129 return
130
131 if len(characters) > 25:
132 embed = Embed(title=f"Too many characters ({len(characters)}/25)")
133 embed.colour = Colour.red()
134 await ctx.send(embed=embed)
135 return
136
137 def get_info(char: str) -> Tuple[str, str]:
138 digit = f"{ord(char):x}"
139 if len(digit) <= 4:
140 u_code = f"\\u{digit:>04}"
141 else:
142 u_code = f"\\U{digit:>08}"
143 url = f"https://www.compart.com/en/unicode/U+{digit:>04}"
144 name = f"[{unicodedata.name(char, '')}]({url})"
145 info = f"`{u_code.ljust(10)}`: {name} - {char}"
146 return info, u_code
147
148 charlist, rawlist = zip(*(get_info(c) for c in characters))
149
150 embed = Embed(description="\n".join(charlist))
151 embed.set_author(name="Character Info")
152
153 if len(characters) > 1:
154 embed.add_field(name='Raw', value=f"`{''.join(rawlist)}`", inline=False)
155
156 await ctx.send(embed=embed)
157
158 @command()
159 @with_role(*MODERATION_ROLES)
160 async def mention(self, ctx: Context, *, role: Role) -> None:
161 """Set a role to be mentionable for a limited time."""
162 if role.mentionable:
163 await ctx.send(f"{role} is already mentionable!")
164 return
165
166 await role.edit(reason=f"Role unlocked by {ctx.author}", mentionable=True)
167
168 human_time = humanize_delta(relativedelta.relativedelta(seconds=Mention.message_timeout))
169 await ctx.send(
170 f"{role} has been made mentionable. I will reset it in {human_time}, or when someone mentions this role."
171 )
172
173 def check(m: Message) -> bool:
174 """Checks that the message contains the role mention."""
175 return role in m.role_mentions
176
177 try:
178 msg = await self.bot.wait_for("message", check=check, timeout=Mention.message_timeout)
179 except TimeoutError:
180 await role.edit(mentionable=False, reason="Automatic role lock - timeout.")
181 await ctx.send(f"{ctx.author.mention}, you took too long. I have reset {role} to be unmentionable.")
182 return
183
184 if any(r.id in MODERATION_ROLES for r in msg.author.roles):
185 await sleep(Mention.reset_delay)
186 await role.edit(mentionable=False, reason=f"Automatic role lock by {msg.author}")
187 await ctx.send(
188 f"{ctx.author.mention}, I have reset {role} to be unmentionable as "
189 f"{msg.author if msg.author != ctx.author else 'you'} sent a message mentioning it."
190 )
191 return
192
193 await role.edit(mentionable=False, reason=f"Automatic role lock - unauthorised use by {msg.author}")
194 await ctx.send(
195 f"{ctx.author.mention}, I have reset {role} to be unmentionable "
196 f"as I detected unauthorised use by {msg.author} (ID: {msg.author.id})."
197 )
198
199 @command()
200 async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:
201 """
202 Show the Zen of Python.
203
204 Without any arguments, the full Zen will be produced.
205 If an integer is provided, the line with that index will be produced.
206 If a string is provided, the line which matches best will be produced.
207 """
208 embed = Embed(
209 colour=Colour.blurple(),
210 title="The Zen of Python",
211 description=ZEN_OF_PYTHON
212 )
213
214 if search_value is None:
215 embed.title += ", by Tim Peters"
216 await ctx.send(embed=embed)
217 return
218
219 zen_lines = ZEN_OF_PYTHON.splitlines()
220
221 # handle if it's an index int
222 if isinstance(search_value, int):
223 upper_bound = len(zen_lines) - 1
224 lower_bound = -1 * upper_bound
225 if not (lower_bound <= search_value <= upper_bound):
226 raise BadArgument(f"Please provide an index between {lower_bound} and {upper_bound}.")
227
228 embed.title += f" (line {search_value % len(zen_lines)}):"
229 embed.description = zen_lines[search_value]
230 await ctx.send(embed=embed)
231 return
232
233 # handle if it's a search string
234 matcher = difflib.SequenceMatcher(None, search_value.lower())
235
236 best_match = ""
237 match_index = 0
238 best_ratio = 0
239
240 for index, line in enumerate(zen_lines):
241 matcher.set_seq2(line.lower())
242
243 # the match ratio needs to be adjusted because, naturally,
244 # longer lines will have worse ratios than shorter lines when
245 # fuzzy searching for keywords. this seems to work okay.
246 adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()
247
248 if adjusted_ratio > best_ratio:
249 best_ratio = adjusted_ratio
250 best_match = line
251 match_index = index
252
253 if not best_match:
254 raise BadArgument("I didn't get a match! Please try again with a different search term.")
255
256 embed.title += f" (line {match_index}):"
257 embed.description = best_match
258 await ctx.send(embed=embed)
259
260 @command(aliases=("poll",))
261 @with_role(*MODERATION_ROLES)
262 async def vote(self, ctx: Context, title: str, *options: str) -> None:
263 """
264 Build a quick voting poll with matching reactions with the provided options.
265
266 A maximum of 20 options can be provided, as Discord supports a max of 20
267 reactions on a single message.
268 """
269 if len(options) < 2:
270 raise BadArgument("Please provide at least 2 options.")
271 if len(options) > 20:
272 raise BadArgument("I can only handle 20 options!")
273
274 codepoint_start = 127462 # represents "regional_indicator_a" unicode value
275 options = {chr(i): f"{chr(i)} - {v}" for i, v in enumerate(options, start=codepoint_start)}
276 embed = Embed(title=title, description="\n".join(options.values()))
277 message = await ctx.send(embed=embed)
278 for reaction in options:
279 await message.add_reaction(reaction)
280
281
282 def setup(bot: Bot) -> None:
283 """Load the Utils cog."""
284 bot.add_cog(Utils(bot))
285
[end of bot/cogs/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/utils.py b/bot/cogs/utils.py
--- a/bot/cogs/utils.py
+++ b/bot/cogs/utils.py
@@ -40,6 +40,8 @@
Namespaces are one honking great idea -- let's do more of those!
"""
+ICON_URL = "https://www.python.org/static/opengraph-icon-200x200.png"
+
class Utils(Cog):
"""A selection of utilities which don't have a clear category."""
@@ -59,6 +61,10 @@
await ctx.invoke(self.bot.get_command("help"), "pep")
return
+ # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.
+ if pep_number == 0:
+ return await self.send_pep_zero(ctx)
+
possible_extensions = ['.txt', '.rst']
found_pep = False
for extension in possible_extensions:
@@ -82,7 +88,7 @@
description=f"[Link]({self.base_pep_url}{pep_number:04})",
)
- pep_embed.set_thumbnail(url="https://www.python.org/static/opengraph-icon-200x200.png")
+ pep_embed.set_thumbnail(url=ICON_URL)
# Add the interesting information
fields_to_check = ("Status", "Python-Version", "Created", "Type")
@@ -278,6 +284,19 @@
for reaction in options:
await message.add_reaction(reaction)
+ async def send_pep_zero(self, ctx: Context) -> None:
+ """Send information about PEP 0."""
+ pep_embed = Embed(
+ title=f"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**",
+ description=f"[Link](https://www.python.org/dev/peps/)"
+ )
+ pep_embed.set_thumbnail(url=ICON_URL)
+ pep_embed.add_field(name="Status", value="Active")
+ pep_embed.add_field(name="Created", value="13-Jul-2000")
+ pep_embed.add_field(name="Type", value="Informational")
+
+ await ctx.send(embed=pep_embed)
+
def setup(bot: Bot) -> None:
"""Load the Utils cog."""
| {"golden_diff": "diff --git a/bot/cogs/utils.py b/bot/cogs/utils.py\n--- a/bot/cogs/utils.py\n+++ b/bot/cogs/utils.py\n@@ -40,6 +40,8 @@\n Namespaces are one honking great idea -- let's do more of those!\n \"\"\"\n \n+ICON_URL = \"https://www.python.org/static/opengraph-icon-200x200.png\"\n+\n \n class Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n@@ -59,6 +61,10 @@\n await ctx.invoke(self.bot.get_command(\"help\"), \"pep\")\n return\n \n+ # Handle PEP 0 directly because it's not in .rst or .txt so it can't be accessed like other PEPs.\n+ if pep_number == 0:\n+ return await self.send_pep_zero(ctx)\n+\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n@@ -82,7 +88,7 @@\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n \n- pep_embed.set_thumbnail(url=\"https://www.python.org/static/opengraph-icon-200x200.png\")\n+ pep_embed.set_thumbnail(url=ICON_URL)\n \n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n@@ -278,6 +284,19 @@\n for reaction in options:\n await message.add_reaction(reaction)\n \n+ async def send_pep_zero(self, ctx: Context) -> None:\n+ \"\"\"Send information about PEP 0.\"\"\"\n+ pep_embed = Embed(\n+ title=f\"**PEP 0 - Index of Python Enhancement Proposals (PEPs)**\",\n+ description=f\"[Link](https://www.python.org/dev/peps/)\"\n+ )\n+ pep_embed.set_thumbnail(url=ICON_URL)\n+ pep_embed.add_field(name=\"Status\", value=\"Active\")\n+ pep_embed.add_field(name=\"Created\", value=\"13-Jul-2000\")\n+ pep_embed.add_field(name=\"Type\", value=\"Informational\")\n+\n+ await ctx.send(embed=pep_embed)\n+\n \n def setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n", "issue": "Pep command cannot fetch PEP 0.\nThe [PEP 0](https://www.python.org/dev/peps/) is an index of all the other PEPs;\r\ninstead of being in the github repo directly, it's generated automatically through a script present there.\r\n~~If implemented, scraping the site for it seems to be the better option as generating it would require the whole up to date repo.~~\r\nThe header is a constant in the pep0 package and can be gotten from there in case of future changes \r\n or as suggested in the comment below https://github.com/python/peps/blob/master/pep0/constants.py#L10-L20\n", "before_files": [{"content": "import difflib\nimport logging\nimport re\nimport unicodedata\nfrom asyncio import TimeoutError, sleep\nfrom email.parser import HeaderParser\nfrom io import StringIO\nfrom typing import Tuple, Union\n\nfrom dateutil import relativedelta\nfrom discord import Colour, Embed, Message, Role\nfrom discord.ext.commands import BadArgument, Cog, Context, command\n\nfrom bot.bot import Bot\nfrom bot.constants import Channels, MODERATION_ROLES, Mention, STAFF_ROLES\nfrom bot.decorators import in_channel, with_role\nfrom bot.utils.time import humanize_delta\n\nlog = logging.getLogger(__name__)\n\nZEN_OF_PYTHON = \"\"\"\\\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than *right* now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\"\"\"\n\n\nclass Utils(Cog):\n \"\"\"A selection of utilities which don't have a clear category.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n self.base_pep_url = \"http://www.python.org/dev/peps/pep-\"\n self.base_github_pep_url = \"https://raw.githubusercontent.com/python/peps/master/pep-\"\n\n @command(name='pep', aliases=('get_pep', 'p'))\n async def pep_command(self, ctx: Context, pep_number: str) -> None:\n \"\"\"Fetches information about a PEP and sends it to the channel.\"\"\"\n if pep_number.isdigit():\n pep_number = int(pep_number)\n else:\n await ctx.invoke(self.bot.get_command(\"help\"), \"pep\")\n return\n\n possible_extensions = ['.txt', '.rst']\n found_pep = False\n for extension in possible_extensions:\n # Attempt to fetch the PEP\n pep_url = f\"{self.base_github_pep_url}{pep_number:04}{extension}\"\n log.trace(f\"Requesting PEP {pep_number} with {pep_url}\")\n response = await self.bot.http_session.get(pep_url)\n\n if response.status == 200:\n log.trace(\"PEP found\")\n found_pep = True\n\n pep_content = await response.text()\n\n # Taken from https://github.com/python/peps/blob/master/pep0/pep.py#L179\n pep_header = HeaderParser().parse(StringIO(pep_content))\n\n # Assemble the embed\n pep_embed = Embed(\n title=f\"**PEP {pep_number} - {pep_header['Title']}**\",\n description=f\"[Link]({self.base_pep_url}{pep_number:04})\",\n )\n\n pep_embed.set_thumbnail(url=\"https://www.python.org/static/opengraph-icon-200x200.png\")\n\n # Add the interesting information\n fields_to_check = (\"Status\", \"Python-Version\", \"Created\", \"Type\")\n for field in fields_to_check:\n # Check for a PEP metadata field that is present but has an empty value\n # embed field values can't contain an empty string\n if pep_header.get(field, \"\"):\n pep_embed.add_field(name=field, value=pep_header[field])\n\n elif response.status != 404:\n # any response except 200 and 404 is expected\n found_pep = True # actually not, but it's easier to display this way\n log.trace(f\"The user requested PEP {pep_number}, but the response had an unexpected status code: \"\n f\"{response.status}.\\n{response.text}\")\n\n error_message = \"Unexpected HTTP error during PEP search. Please let us know.\"\n pep_embed = Embed(title=\"Unexpected error\", description=error_message)\n pep_embed.colour = Colour.red()\n break\n\n if not found_pep:\n log.trace(\"PEP was not found\")\n not_found = f\"PEP {pep_number} does not exist.\"\n pep_embed = Embed(title=\"PEP not found\", description=not_found)\n pep_embed.colour = Colour.red()\n\n await ctx.message.channel.send(embed=pep_embed)\n\n @command()\n @in_channel(Channels.bot_commands, bypass_roles=STAFF_ROLES)\n async def charinfo(self, ctx: Context, *, characters: str) -> None:\n \"\"\"Shows you information on up to 25 unicode characters.\"\"\"\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n embed = Embed(\n title=\"Non-Character Detected\",\n description=(\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n )\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n if len(characters) > 25:\n embed = Embed(title=f\"Too many characters ({len(characters)}/25)\")\n embed.colour = Colour.red()\n await ctx.send(embed=embed)\n return\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {char}\"\n return info, u_code\n\n charlist, rawlist = zip(*(get_info(c) for c in characters))\n\n embed = Embed(description=\"\\n\".join(charlist))\n embed.set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n embed.add_field(name='Raw', value=f\"`{''.join(rawlist)}`\", inline=False)\n\n await ctx.send(embed=embed)\n\n @command()\n @with_role(*MODERATION_ROLES)\n async def mention(self, ctx: Context, *, role: Role) -> None:\n \"\"\"Set a role to be mentionable for a limited time.\"\"\"\n if role.mentionable:\n await ctx.send(f\"{role} is already mentionable!\")\n return\n\n await role.edit(reason=f\"Role unlocked by {ctx.author}\", mentionable=True)\n\n human_time = humanize_delta(relativedelta.relativedelta(seconds=Mention.message_timeout))\n await ctx.send(\n f\"{role} has been made mentionable. I will reset it in {human_time}, or when someone mentions this role.\"\n )\n\n def check(m: Message) -> bool:\n \"\"\"Checks that the message contains the role mention.\"\"\"\n return role in m.role_mentions\n\n try:\n msg = await self.bot.wait_for(\"message\", check=check, timeout=Mention.message_timeout)\n except TimeoutError:\n await role.edit(mentionable=False, reason=\"Automatic role lock - timeout.\")\n await ctx.send(f\"{ctx.author.mention}, you took too long. I have reset {role} to be unmentionable.\")\n return\n\n if any(r.id in MODERATION_ROLES for r in msg.author.roles):\n await sleep(Mention.reset_delay)\n await role.edit(mentionable=False, reason=f\"Automatic role lock by {msg.author}\")\n await ctx.send(\n f\"{ctx.author.mention}, I have reset {role} to be unmentionable as \"\n f\"{msg.author if msg.author != ctx.author else 'you'} sent a message mentioning it.\"\n )\n return\n\n await role.edit(mentionable=False, reason=f\"Automatic role lock - unauthorised use by {msg.author}\")\n await ctx.send(\n f\"{ctx.author.mention}, I have reset {role} to be unmentionable \"\n f\"as I detected unauthorised use by {msg.author} (ID: {msg.author.id}).\"\n )\n\n @command()\n async def zen(self, ctx: Context, *, search_value: Union[int, str, None] = None) -> None:\n \"\"\"\n Show the Zen of Python.\n\n Without any arguments, the full Zen will be produced.\n If an integer is provided, the line with that index will be produced.\n If a string is provided, the line which matches best will be produced.\n \"\"\"\n embed = Embed(\n colour=Colour.blurple(),\n title=\"The Zen of Python\",\n description=ZEN_OF_PYTHON\n )\n\n if search_value is None:\n embed.title += \", by Tim Peters\"\n await ctx.send(embed=embed)\n return\n\n zen_lines = ZEN_OF_PYTHON.splitlines()\n\n # handle if it's an index int\n if isinstance(search_value, int):\n upper_bound = len(zen_lines) - 1\n lower_bound = -1 * upper_bound\n if not (lower_bound <= search_value <= upper_bound):\n raise BadArgument(f\"Please provide an index between {lower_bound} and {upper_bound}.\")\n\n embed.title += f\" (line {search_value % len(zen_lines)}):\"\n embed.description = zen_lines[search_value]\n await ctx.send(embed=embed)\n return\n\n # handle if it's a search string\n matcher = difflib.SequenceMatcher(None, search_value.lower())\n\n best_match = \"\"\n match_index = 0\n best_ratio = 0\n\n for index, line in enumerate(zen_lines):\n matcher.set_seq2(line.lower())\n\n # the match ratio needs to be adjusted because, naturally,\n # longer lines will have worse ratios than shorter lines when\n # fuzzy searching for keywords. this seems to work okay.\n adjusted_ratio = (len(line) - 5) ** 0.5 * matcher.ratio()\n\n if adjusted_ratio > best_ratio:\n best_ratio = adjusted_ratio\n best_match = line\n match_index = index\n\n if not best_match:\n raise BadArgument(\"I didn't get a match! Please try again with a different search term.\")\n\n embed.title += f\" (line {match_index}):\"\n embed.description = best_match\n await ctx.send(embed=embed)\n\n @command(aliases=(\"poll\",))\n @with_role(*MODERATION_ROLES)\n async def vote(self, ctx: Context, title: str, *options: str) -> None:\n \"\"\"\n Build a quick voting poll with matching reactions with the provided options.\n\n A maximum of 20 options can be provided, as Discord supports a max of 20\n reactions on a single message.\n \"\"\"\n if len(options) < 2:\n raise BadArgument(\"Please provide at least 2 options.\")\n if len(options) > 20:\n raise BadArgument(\"I can only handle 20 options!\")\n\n codepoint_start = 127462 # represents \"regional_indicator_a\" unicode value\n options = {chr(i): f\"{chr(i)} - {v}\" for i, v in enumerate(options, start=codepoint_start)}\n embed = Embed(title=title, description=\"\\n\".join(options.values()))\n message = await ctx.send(embed=embed)\n for reaction in options:\n await message.add_reaction(reaction)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the Utils cog.\"\"\"\n bot.add_cog(Utils(bot))\n", "path": "bot/cogs/utils.py"}]} | 4,047 | 519 |
gh_patches_debug_14535 | rasdani/github-patches | git_diff | getsentry__sentry-15470 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch license to Apache 2
Let's do it.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 """
3 Sentry
4 ======
5
6 Sentry is a realtime event logging and aggregation platform. It specializes
7 in monitoring errors and extracting all the information needed to do a proper
8 post-mortem without any of the hassle of the standard user feedback loop.
9
10 Sentry is a Server
11 ------------------
12
13 The Sentry package, at its core, is just a simple server and web UI. It will
14 handle authentication clients (such as `the Python one
15 <https://github.com/getsentry/sentry-python>`_)
16 and all of the logic behind storage and aggregation.
17
18 That said, Sentry is not limited to Python. The primary implementation is in
19 Python, but it contains a full API for sending events from any language, in
20 any application.
21
22 :copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.
23 :license: BSD, see LICENSE for more details.
24 """
25 from __future__ import absolute_import
26
27 # if sys.version_info[:2] != (2, 7):
28 # print 'Error: Sentry requires Python 2.7'
29 # sys.exit(1)
30
31 import os
32 import os.path
33 import sys
34
35 from distutils.command.build import build as BuildCommand
36 from setuptools import setup, find_packages
37 from setuptools.command.sdist import sdist as SDistCommand
38 from setuptools.command.develop import develop as DevelopCommand
39
40 ROOT = os.path.realpath(os.path.join(os.path.dirname(sys.modules["__main__"].__file__)))
41
42 # Add Sentry to path so we can import distutils
43 sys.path.insert(0, os.path.join(ROOT, "src"))
44
45 from sentry.utils.distutils import (
46 BuildAssetsCommand,
47 BuildIntegrationDocsCommand,
48 BuildJsSdkRegistryCommand,
49 )
50
51 # The version of sentry
52 VERSION = "10.0.0.dev0"
53
54 # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
55 # in multiprocessing/util.py _exit_function when running `python
56 # setup.py test` (see
57 # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
58 for m in ("multiprocessing", "billiard"):
59 try:
60 __import__(m)
61 except ImportError:
62 pass
63
64 IS_LIGHT_BUILD = os.environ.get("SENTRY_LIGHT_BUILD") == "1"
65
66 # we use pip requirements files to improve Docker layer caching
67
68
69 def get_requirements(env):
70 with open(u"requirements-{}.txt".format(env)) as fp:
71 return [x.strip() for x in fp.read().split("\n") if not x.startswith("#")]
72
73
74 install_requires = get_requirements("base")
75 dev_requires = get_requirements("dev")
76
77 # override django version in requirements file if DJANGO_VERSION is set
78 DJANGO_VERSION = os.environ.get("DJANGO_VERSION")
79 if DJANGO_VERSION:
80 install_requires = [
81 u"Django{}".format(DJANGO_VERSION) if r.startswith("Django>=") else r
82 for r in install_requires
83 ]
84
85
86 class SentrySDistCommand(SDistCommand):
87 # If we are not a light build we want to also execute build_assets as
88 # part of our source build pipeline.
89 if not IS_LIGHT_BUILD:
90 sub_commands = SDistCommand.sub_commands + [
91 ("build_integration_docs", None),
92 ("build_assets", None),
93 ("build_js_sdk_registry", None),
94 ]
95
96
97 class SentryBuildCommand(BuildCommand):
98 def run(self):
99 if not IS_LIGHT_BUILD:
100 self.run_command("build_integration_docs")
101 self.run_command("build_assets")
102 self.run_command("build_js_sdk_registry")
103 BuildCommand.run(self)
104
105
106 class SentryDevelopCommand(DevelopCommand):
107 def run(self):
108 DevelopCommand.run(self)
109 if not IS_LIGHT_BUILD:
110 self.run_command("build_integration_docs")
111 self.run_command("build_assets")
112 self.run_command("build_js_sdk_registry")
113
114
115 cmdclass = {
116 "sdist": SentrySDistCommand,
117 "develop": SentryDevelopCommand,
118 "build": SentryBuildCommand,
119 "build_assets": BuildAssetsCommand,
120 "build_integration_docs": BuildIntegrationDocsCommand,
121 "build_js_sdk_registry": BuildJsSdkRegistryCommand,
122 }
123
124
125 setup(
126 name="sentry",
127 version=VERSION,
128 author="Sentry",
129 author_email="[email protected]",
130 url="https://sentry.io",
131 description="A realtime logging and aggregation server.",
132 long_description=open(os.path.join(ROOT, "README.rst")).read(),
133 package_dir={"": "src"},
134 packages=find_packages("src"),
135 zip_safe=False,
136 install_requires=install_requires,
137 extras_require={"dev": dev_requires, "postgres": []},
138 cmdclass=cmdclass,
139 license="BSD",
140 include_package_data=True,
141 entry_points={
142 "console_scripts": ["sentry = sentry.runner:main"],
143 "sentry.new_apps": [
144 "sessionstack = new_sentry_plugins.sessionstack",
145 ],
146 "sentry.test_only_apps": [
147 "jira_ac = test_only_plugins.jira_ac",
148 "jira = test_only_plugins.jira",
149 ],
150 "sentry.new_plugins": [
151 "amazon_sqs = new_sentry_plugins.amazon_sqs.plugin:AmazonSQSPlugin",
152 "sessionstack = new_sentry_plugins.sessionstack.plugin:SessionStackPlugin",
153 ],
154 "sentry.test_only_plugins": [
155 "asana = test_only_plugins.asana.plugin:AsanaPlugin",
156 "bitbucket = test_only_plugins.bitbucket.plugin:BitbucketPlugin",
157 "clubhouse = test_only_plugins.clubhouse.plugin:ClubhousePlugin",
158 "github = test_only_plugins.github.plugin:GitHubPlugin",
159 "gitlab = test_only_plugins.gitlab.plugin:GitLabPlugin",
160 "heroku = test_only_plugins.heroku.plugin:HerokuPlugin",
161 "jira = test_only_plugins.jira.plugin:JiraPlugin",
162 "jira_ac = test_only_plugins.jira_ac.plugin:JiraACPlugin",
163 "pagerduty = test_only_plugins.pagerduty.plugin:PagerDutyPlugin",
164 "phabricator = test_only_plugins.phabricator.plugin:PhabricatorPlugin",
165 "pivotal = test_only_plugins.pivotal.plugin:PivotalPlugin",
166 "pushover = test_only_plugins.pushover.plugin:PushoverPlugin",
167 "segment = test_only_plugins.segment.plugin:SegmentPlugin",
168 "slack = test_only_plugins.slack.plugin:SlackPlugin",
169 "splunk = test_only_plugins.splunk.plugin:SplunkPlugin",
170 "victorops = test_only_plugins.victorops.plugin:VictorOpsPlugin",
171 "vsts = test_only_plugins.vsts.plugin:VstsPlugin",
172 ],
173 },
174 classifiers=[
175 "Framework :: Django",
176 "Intended Audience :: Developers",
177 "Intended Audience :: System Administrators",
178 "Operating System :: POSIX :: Linux",
179 "Programming Language :: Python :: 2",
180 "Programming Language :: Python :: 2.7",
181 "Programming Language :: Python :: 2 :: Only",
182 "Topic :: Software Development",
183 ],
184 )
185
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -136,7 +136,7 @@
install_requires=install_requires,
extras_require={"dev": dev_requires, "postgres": []},
cmdclass=cmdclass,
- license="BSD",
+ license="BSL-1.1",
include_package_data=True,
entry_points={
"console_scripts": ["sentry = sentry.runner:main"],
@@ -180,5 +180,6 @@
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"Topic :: Software Development",
+ "License :: Other/Proprietary License",
],
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -136,7 +136,7 @@\n install_requires=install_requires,\n extras_require={\"dev\": dev_requires, \"postgres\": []},\n cmdclass=cmdclass,\n- license=\"BSD\",\n+ license=\"BSL-1.1\",\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\"sentry = sentry.runner:main\"],\n@@ -180,5 +180,6 @@\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 2 :: Only\",\n \"Topic :: Software Development\",\n+ \"License :: Other/Proprietary License\",\n ],\n )\n", "issue": "Switch license to Apache 2\nLet's do it.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `the Python one\n<https://github.com/getsentry/sentry-python>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import\n\n# if sys.version_info[:2] != (2, 7):\n# print 'Error: Sentry requires Python 2.7'\n# sys.exit(1)\n\nimport os\nimport os.path\nimport sys\n\nfrom distutils.command.build import build as BuildCommand\nfrom setuptools import setup, find_packages\nfrom setuptools.command.sdist import sdist as SDistCommand\nfrom setuptools.command.develop import develop as DevelopCommand\n\nROOT = os.path.realpath(os.path.join(os.path.dirname(sys.modules[\"__main__\"].__file__)))\n\n# Add Sentry to path so we can import distutils\nsys.path.insert(0, os.path.join(ROOT, \"src\"))\n\nfrom sentry.utils.distutils import (\n BuildAssetsCommand,\n BuildIntegrationDocsCommand,\n BuildJsSdkRegistryCommand,\n)\n\n# The version of sentry\nVERSION = \"10.0.0.dev0\"\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\nfor m in (\"multiprocessing\", \"billiard\"):\n try:\n __import__(m)\n except ImportError:\n pass\n\nIS_LIGHT_BUILD = os.environ.get(\"SENTRY_LIGHT_BUILD\") == \"1\"\n\n# we use pip requirements files to improve Docker layer caching\n\n\ndef get_requirements(env):\n with open(u\"requirements-{}.txt\".format(env)) as fp:\n return [x.strip() for x in fp.read().split(\"\\n\") if not x.startswith(\"#\")]\n\n\ninstall_requires = get_requirements(\"base\")\ndev_requires = get_requirements(\"dev\")\n\n# override django version in requirements file if DJANGO_VERSION is set\nDJANGO_VERSION = os.environ.get(\"DJANGO_VERSION\")\nif DJANGO_VERSION:\n install_requires = [\n u\"Django{}\".format(DJANGO_VERSION) if r.startswith(\"Django>=\") else r\n for r in install_requires\n ]\n\n\nclass SentrySDistCommand(SDistCommand):\n # If we are not a light build we want to also execute build_assets as\n # part of our source build pipeline.\n if not IS_LIGHT_BUILD:\n sub_commands = SDistCommand.sub_commands + [\n (\"build_integration_docs\", None),\n (\"build_assets\", None),\n (\"build_js_sdk_registry\", None),\n ]\n\n\nclass SentryBuildCommand(BuildCommand):\n def run(self):\n if not IS_LIGHT_BUILD:\n self.run_command(\"build_integration_docs\")\n self.run_command(\"build_assets\")\n self.run_command(\"build_js_sdk_registry\")\n BuildCommand.run(self)\n\n\nclass SentryDevelopCommand(DevelopCommand):\n def run(self):\n DevelopCommand.run(self)\n if not IS_LIGHT_BUILD:\n self.run_command(\"build_integration_docs\")\n self.run_command(\"build_assets\")\n self.run_command(\"build_js_sdk_registry\")\n\n\ncmdclass = {\n \"sdist\": SentrySDistCommand,\n \"develop\": SentryDevelopCommand,\n \"build\": SentryBuildCommand,\n \"build_assets\": BuildAssetsCommand,\n \"build_integration_docs\": BuildIntegrationDocsCommand,\n \"build_js_sdk_registry\": BuildJsSdkRegistryCommand,\n}\n\n\nsetup(\n name=\"sentry\",\n version=VERSION,\n author=\"Sentry\",\n author_email=\"[email protected]\",\n url=\"https://sentry.io\",\n description=\"A realtime logging and aggregation server.\",\n long_description=open(os.path.join(ROOT, \"README.rst\")).read(),\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n zip_safe=False,\n install_requires=install_requires,\n extras_require={\"dev\": dev_requires, \"postgres\": []},\n cmdclass=cmdclass,\n license=\"BSD\",\n include_package_data=True,\n entry_points={\n \"console_scripts\": [\"sentry = sentry.runner:main\"],\n \"sentry.new_apps\": [\n \"sessionstack = new_sentry_plugins.sessionstack\",\n ],\n \"sentry.test_only_apps\": [\n \"jira_ac = test_only_plugins.jira_ac\",\n \"jira = test_only_plugins.jira\",\n ],\n \"sentry.new_plugins\": [\n \"amazon_sqs = new_sentry_plugins.amazon_sqs.plugin:AmazonSQSPlugin\",\n \"sessionstack = new_sentry_plugins.sessionstack.plugin:SessionStackPlugin\",\n ],\n \"sentry.test_only_plugins\": [\n \"asana = test_only_plugins.asana.plugin:AsanaPlugin\",\n \"bitbucket = test_only_plugins.bitbucket.plugin:BitbucketPlugin\",\n \"clubhouse = test_only_plugins.clubhouse.plugin:ClubhousePlugin\",\n \"github = test_only_plugins.github.plugin:GitHubPlugin\",\n \"gitlab = test_only_plugins.gitlab.plugin:GitLabPlugin\",\n \"heroku = test_only_plugins.heroku.plugin:HerokuPlugin\",\n \"jira = test_only_plugins.jira.plugin:JiraPlugin\",\n \"jira_ac = test_only_plugins.jira_ac.plugin:JiraACPlugin\",\n \"pagerduty = test_only_plugins.pagerduty.plugin:PagerDutyPlugin\",\n \"phabricator = test_only_plugins.phabricator.plugin:PhabricatorPlugin\",\n \"pivotal = test_only_plugins.pivotal.plugin:PivotalPlugin\",\n \"pushover = test_only_plugins.pushover.plugin:PushoverPlugin\",\n \"segment = test_only_plugins.segment.plugin:SegmentPlugin\",\n \"slack = test_only_plugins.slack.plugin:SlackPlugin\",\n \"splunk = test_only_plugins.splunk.plugin:SplunkPlugin\",\n \"victorops = test_only_plugins.victorops.plugin:VictorOpsPlugin\",\n \"vsts = test_only_plugins.vsts.plugin:VstsPlugin\",\n ],\n },\n classifiers=[\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 2 :: Only\",\n \"Topic :: Software Development\",\n ],\n)\n", "path": "setup.py"}]} | 2,520 | 164 |
gh_patches_debug_38276 | rasdani/github-patches | git_diff | Flexget__Flexget-2787 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TorrentDay search result empty or malformed
### Expected behaviour:
Search result

### Actual behaviour:
Search result empty or malformed.
### Steps to reproduce:
- Step 1: run task
#### Config:
```yaml
download-childrenshow-discover:
template:
- cartoon-metainfo
- reject
- children_show
- notifyViaEmail
discover:
what:
- next_series_seasons:
from_start: yes
- next_series_episodes:
backfill: true
from_start: yes
from:
- torrentday:
uid: "'{? torrentday.uid ?}'"
passkey: "{? torrentday.passkey ?}"
cfduid: "{? torrentday.cfduid ?}"
rss_key: "{? torrentday.rss_key ?}"
```
#### Log:
<details>
<summary>(click to expand)</summary>
2020-09-30 12:39:49 VERBOSE discover download-childrenshow-discover Searching for `Vampirina S02` with plugin `torrentday` (1 of 7)
2020-09-30 12:39:49 DEBUG utils.requests download-childrenshow-discover GETing URL https://www.torrentday.com/t with args () and kwargs {'params': {'cata': 'yes', 'c7': 1, 'clear-new': 1, 'q': 'Vampirina S02'}, 'cookies': {'uid': '...', 'pass': '...', '__cfduid': '...'}, 'allow_redirects': True, 'timeout': 30}
2020-09-30 12:39:50 ERROR discover download-childrenshow-discover Error searching with torrentday: Search returned by torrentday appears to be empty or malformed.
2020-09-30 12:39:50 VERBOSE discover download-childrenshow-discover No search results for `Vampirina S02`
</details>
### Additional information:
- FlexGet version: 3.1.71
- Python version: 3.5.1
- Installation method: pip install
- Using daemon (yes/no): yes
- OS and version: Synology DSM 6
</issue>
<code>
[start of flexget/components/sites/sites/torrentday.py]
1 import re
2
3 from loguru import logger
4 from requests.exceptions import RequestException
5
6 from flexget import plugin
7 from flexget.components.sites.urlrewriting import UrlRewritingError
8 from flexget.components.sites.utils import normalize_unicode, torrent_availability
9 from flexget.entry import Entry
10 from flexget.event import event
11 from flexget.plugin import PluginError
12 from flexget.utils import requests
13 from flexget.utils.soup import get_soup
14 from flexget.utils.tools import parse_filesize
15
16 logger = logger.bind(name='torrentday')
17
18 CATEGORIES = {
19 'all': 0,
20 # Movies
21 'mov480p': 25,
22 'movHD': 11,
23 'movBD': 5,
24 'movDVD': 3,
25 'movMP4': 21,
26 'movNonEnglish': 22,
27 'movPACKS': 13,
28 'movSDx264': 44,
29 'movX265': 48,
30 'movXVID': 1,
31 # TV
32 'tv480p': 24,
33 'tvBRD': 32,
34 'tvDVD': 31,
35 'tvDVDrip': 33,
36 'tvMOBILE': 46,
37 'tvPACKS': 14,
38 'tvSDx264': 26,
39 'tvHDx264': 7,
40 'tvX265': 34,
41 'tvXVID': 2,
42 }
43
44
45 class UrlRewriteTorrentday:
46 """
47 Torrentday urlrewriter and search plugin.
48
49 torrentday:
50 uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies
51 passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous
52 cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES
53 rss_key: xxxxxxxxx (required) get this from your profile page
54 category: xxxxxxxx
55
56 Category can be one of
57 ID from browsing site OR 'name'
58 movies:
59 mov480p, movHD, movBD, movDVD,
60 movMP4, movNonEnglish, movPACKS,
61 movSDx264, movX265, movXVID
62 tv:
63 tv480p, tvBRD, tvDVD, tvDVDrip,
64 tvMOBILE, tvPACKS, tvSDx264,
65 tvHDx264, tvX265, tvXVID
66 """
67
68 schema = {
69 'type': 'object',
70 'properties': {
71 'rss_key': {'type': 'string'},
72 'uid': {'type': 'string'},
73 'passkey': {'type': 'string'},
74 'cfduid': {'type': 'string'},
75 'category': {
76 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]
77 },
78 },
79 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],
80 'additionalProperties': False,
81 }
82
83 # urlrewriter API
84 def url_rewritable(self, task, entry):
85 url = entry['url']
86 if url.find('.torrent'):
87 return False
88 if url.startswith('https://www.torrentday.com'):
89 return True
90 return False
91
92 # urlrewriter API
93 def url_rewrite(self, task, entry):
94 if 'url' not in entry:
95 logger.error('Didn\'t actually get a URL...')
96 else:
97 logger.debug('Got the URL: {}', entry['url'])
98 if entry['url'].startswith('https://www.torrentday.com/browse'):
99 # use search
100 results = self.search(task, entry)
101 if not results:
102 raise UrlRewritingError('No search results found')
103 entry['url'] = results[0]['url']
104
105 @plugin.internet(logger)
106 def search(self, task, entry, config=None):
107 """
108 Search for name from torrentday.
109 """
110
111 categories = config.get('category', 'all')
112 # Make sure categories is a list
113 if not isinstance(categories, list):
114 categories = [categories]
115 # If there are any text categories, turn them into their id number
116 categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
117 params = {
118 'cata': 'yes',
119 'c{}'.format(','.join(str(c) for c in categories)): 1,
120 'clear-new': 1,
121 }
122 entries = set()
123 for search_string in entry.get('search_strings', [entry['title']]):
124
125 url = 'https://www.torrentday.com/t'
126 params['q'] = normalize_unicode(search_string).replace(':', '')
127 cookies = {
128 'uid': config['uid'],
129 'pass': config['passkey'],
130 '__cfduid': config['cfduid'],
131 }
132
133 try:
134 page = requests.get(url, params=params, cookies=cookies).content
135 except RequestException as e:
136 raise PluginError('Could not connect to torrentday: {}'.format(e))
137
138 # the following should avoid table being None due to a malformed
139 # html in td search results
140 soup = get_soup(page).contents[1].contents[1].next.next.nextSibling
141 table = soup.find('table', {'id': 'torrentTable'})
142 if table is None:
143 raise PluginError(
144 'Search returned by torrentday appears to be empty or malformed.'
145 )
146
147 # the first row is the header so skip it
148 for tr in table.find_all('tr')[1:]:
149 entry = Entry()
150 # find the torrent names
151 td = tr.find('td', {'class': 'torrentNameInfo'})
152 if not td:
153 logger.warning('Could not find entry torrentNameInfo for {}.', search_string)
154 continue
155 title = td.find('a')
156 if not title:
157 logger.warning('Could not determine title for {}.', search_string)
158 continue
159 entry['title'] = title.contents[0]
160 logger.debug('title: {}', title.contents[0])
161
162 # find download link
163 torrent_url = tr.find('td', {'class': 'ac'})
164 if not torrent_url:
165 logger.warning('Could not determine download link for {}.', search_string)
166 continue
167 torrent_url = torrent_url.find('a').get('href')
168
169 # construct download URL
170 torrent_url = (
171 'https://www.torrentday.com/'
172 + torrent_url
173 + '?torrent_pass='
174 + config['rss_key']
175 )
176 logger.debug('RSS-ified download link: {}', torrent_url)
177 entry['url'] = torrent_url
178
179 # us tr object for seeders/leechers
180 seeders = tr.find('td', {'class': 'ac seedersInfo'})
181 leechers = tr.find('td', {'class': 'ac leechersInfo'})
182 entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))
183 entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))
184 entry['torrent_availability'] = torrent_availability(
185 entry['torrent_seeds'], entry['torrent_leeches']
186 )
187
188 # use tr object for size
189 size = tr.find('td', text=re.compile(r'([\.\d]+) ([TGMKk]?)B')).contents[0]
190 size = re.search(r'([\.\d]+) ([TGMKk]?)B', str(size))
191
192 entry['content_size'] = parse_filesize(size.group(0))
193
194 entries.add(entry)
195
196 return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
197
198
199 @event('plugin.register')
200 def register_plugin():
201 plugin.register(
202 UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2
203 )
204
[end of flexget/components/sites/sites/torrentday.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py
--- a/flexget/components/sites/sites/torrentday.py
+++ b/flexget/components/sites/sites/torrentday.py
@@ -18,6 +18,7 @@
CATEGORIES = {
'all': 0,
# Movies
+ 'mov4k': 96,
'mov480p': 25,
'movHD': 11,
'movBD': 5,
@@ -34,6 +35,7 @@
'tvDVD': 31,
'tvDVDrip': 33,
'tvMOBILE': 46,
+ 'tvNonEnglish': 82,
'tvPACKS': 14,
'tvSDx264': 26,
'tvHDx264': 7,
@@ -53,16 +55,16 @@
rss_key: xxxxxxxxx (required) get this from your profile page
category: xxxxxxxx
- Category can be one of
+ Category can be one of
ID from browsing site OR 'name'
movies:
- mov480p, movHD, movBD, movDVD,
+ mov4k, mov480p, movHD, movBD, movDVD,
movMP4, movNonEnglish, movPACKS,
movSDx264, movX265, movXVID
tv:
tv480p, tvBRD, tvDVD, tvDVDrip,
- tvMOBILE, tvPACKS, tvSDx264,
- tvHDx264, tvX265, tvXVID
+ tvMOBILE, tvNonEnglish, tvPACKS,
+ tvSDx264, tvHDx264, tvX265, tvXVID
"""
schema = {
@@ -114,11 +116,9 @@
categories = [categories]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
- params = {
- 'cata': 'yes',
- 'c{}'.format(','.join(str(c) for c in categories)): 1,
- 'clear-new': 1,
- }
+ params = {'cata': 'yes', 'clear-new': 1}
+ params.update({str(c): 1 for c in categories})
+
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
@@ -137,7 +137,7 @@
# the following should avoid table being None due to a malformed
# html in td search results
- soup = get_soup(page).contents[1].contents[1].next.next.nextSibling
+ soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling
table = soup.find('table', {'id': 'torrentTable'})
if table is None:
raise PluginError(
| {"golden_diff": "diff --git a/flexget/components/sites/sites/torrentday.py b/flexget/components/sites/sites/torrentday.py\n--- a/flexget/components/sites/sites/torrentday.py\n+++ b/flexget/components/sites/sites/torrentday.py\n@@ -18,6 +18,7 @@\n CATEGORIES = {\n 'all': 0,\n # Movies\n+ 'mov4k': 96,\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n@@ -34,6 +35,7 @@\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n+ 'tvNonEnglish': 82,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n@@ -53,16 +55,16 @@\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n \n- Category can be one of \n+ Category can be one of\n ID from browsing site OR 'name'\n movies:\n- mov480p, movHD, movBD, movDVD,\n+ mov4k, mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n- tvMOBILE, tvPACKS, tvSDx264, \n- tvHDx264, tvX265, tvXVID\n+ tvMOBILE, tvNonEnglish, tvPACKS,\n+ tvSDx264, tvHDx264, tvX265, tvXVID\n \"\"\"\n \n schema = {\n@@ -114,11 +116,9 @@\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n- params = {\n- 'cata': 'yes',\n- 'c{}'.format(','.join(str(c) for c in categories)): 1,\n- 'clear-new': 1,\n- }\n+ params = {'cata': 'yes', 'clear-new': 1}\n+ params.update({str(c): 1 for c in categories})\n+\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n \n@@ -137,7 +137,7 @@\n \n # the following should avoid table being None due to a malformed\n # html in td search results\n- soup = get_soup(page).contents[1].contents[1].next.next.nextSibling\n+ soup = get_soup(page).contents[1].contents[1].contents[1].next.nextSibling\n table = soup.find('table', {'id': 'torrentTable'})\n if table is None:\n raise PluginError(\n", "issue": "TorrentDay search result empty or malformed\n### Expected behaviour:\r\n\r\nSearch result\r\n\r\n\r\n### Actual behaviour:\r\n\r\nSearch result empty or malformed.\r\n\r\n### Steps to reproduce:\r\n- Step 1: run task\r\n\r\n#### Config:\r\n```yaml\r\n download-childrenshow-discover:\r\n template:\r\n - cartoon-metainfo\r\n - reject\r\n - children_show\r\n - notifyViaEmail\r\n discover:\r\n what:\r\n - next_series_seasons:\r\n from_start: yes\r\n - next_series_episodes:\r\n backfill: true\r\n from_start: yes\r\n from:\r\n - torrentday:\r\n uid: \"'{? torrentday.uid ?}'\"\r\n passkey: \"{? torrentday.passkey ?}\"\r\n cfduid: \"{? torrentday.cfduid ?}\"\r\n rss_key: \"{? torrentday.rss_key ?}\"\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n2020-09-30 12:39:49 VERBOSE discover download-childrenshow-discover Searching for `Vampirina S02` with plugin `torrentday` (1 of 7)\r\n2020-09-30 12:39:49 DEBUG utils.requests download-childrenshow-discover GETing URL https://www.torrentday.com/t with args () and kwargs {'params': {'cata': 'yes', 'c7': 1, 'clear-new': 1, 'q': 'Vampirina S02'}, 'cookies': {'uid': '...', 'pass': '...', '__cfduid': '...'}, 'allow_redirects': True, 'timeout': 30}\r\n2020-09-30 12:39:50 ERROR discover download-childrenshow-discover Error searching with torrentday: Search returned by torrentday appears to be empty or malformed.\r\n2020-09-30 12:39:50 VERBOSE discover download-childrenshow-discover No search results for `Vampirina S02`\r\n\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.1.71\r\n- Python version: 3.5.1\r\n- Installation method: pip install\r\n- Using daemon (yes/no): yes\r\n- OS and version: Synology DSM 6\n", "before_files": [{"content": "import re\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.components.sites.urlrewriting import UrlRewritingError\nfrom flexget.components.sites.utils import normalize_unicode, torrent_availability\nfrom flexget.entry import Entry\nfrom flexget.event import event\nfrom flexget.plugin import PluginError\nfrom flexget.utils import requests\nfrom flexget.utils.soup import get_soup\nfrom flexget.utils.tools import parse_filesize\n\nlogger = logger.bind(name='torrentday')\n\nCATEGORIES = {\n 'all': 0,\n # Movies\n 'mov480p': 25,\n 'movHD': 11,\n 'movBD': 5,\n 'movDVD': 3,\n 'movMP4': 21,\n 'movNonEnglish': 22,\n 'movPACKS': 13,\n 'movSDx264': 44,\n 'movX265': 48,\n 'movXVID': 1,\n # TV\n 'tv480p': 24,\n 'tvBRD': 32,\n 'tvDVD': 31,\n 'tvDVDrip': 33,\n 'tvMOBILE': 46,\n 'tvPACKS': 14,\n 'tvSDx264': 26,\n 'tvHDx264': 7,\n 'tvX265': 34,\n 'tvXVID': 2,\n}\n\n\nclass UrlRewriteTorrentday:\n \"\"\"\n Torrentday urlrewriter and search plugin.\n\n torrentday:\n uid: xxxxxxxxxxxxx (required) NOT YOUR LOGIN. find this in your browser's cookies\n passkey: xxxxxxxxx (required) NOT YOUR PASSWORD. see previous\n cfduid: xxxxxxxxxx (required) AGAIN IN THE COOKIES\n rss_key: xxxxxxxxx (required) get this from your profile page\n category: xxxxxxxx\n\n Category can be one of \n ID from browsing site OR 'name'\n movies:\n mov480p, movHD, movBD, movDVD,\n movMP4, movNonEnglish, movPACKS,\n movSDx264, movX265, movXVID\n tv:\n tv480p, tvBRD, tvDVD, tvDVDrip,\n tvMOBILE, tvPACKS, tvSDx264, \n tvHDx264, tvX265, tvXVID\n \"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'rss_key': {'type': 'string'},\n 'uid': {'type': 'string'},\n 'passkey': {'type': 'string'},\n 'cfduid': {'type': 'string'},\n 'category': {\n 'oneOf': [{'type': 'integer'}, {'type': 'string', 'enum': list(CATEGORIES)}]\n },\n },\n 'required': ['rss_key', 'uid', 'passkey', 'cfduid'],\n 'additionalProperties': False,\n }\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.find('.torrent'):\n return False\n if url.startswith('https://www.torrentday.com'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n if 'url' not in entry:\n logger.error('Didn\\'t actually get a URL...')\n else:\n logger.debug('Got the URL: {}', entry['url'])\n if entry['url'].startswith('https://www.torrentday.com/browse'):\n # use search\n results = self.search(task, entry)\n if not results:\n raise UrlRewritingError('No search results found')\n entry['url'] = results[0]['url']\n\n @plugin.internet(logger)\n def search(self, task, entry, config=None):\n \"\"\"\n Search for name from torrentday.\n \"\"\"\n\n categories = config.get('category', 'all')\n # Make sure categories is a list\n if not isinstance(categories, list):\n categories = [categories]\n # If there are any text categories, turn them into their id number\n categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]\n params = {\n 'cata': 'yes',\n 'c{}'.format(','.join(str(c) for c in categories)): 1,\n 'clear-new': 1,\n }\n entries = set()\n for search_string in entry.get('search_strings', [entry['title']]):\n\n url = 'https://www.torrentday.com/t'\n params['q'] = normalize_unicode(search_string).replace(':', '')\n cookies = {\n 'uid': config['uid'],\n 'pass': config['passkey'],\n '__cfduid': config['cfduid'],\n }\n\n try:\n page = requests.get(url, params=params, cookies=cookies).content\n except RequestException as e:\n raise PluginError('Could not connect to torrentday: {}'.format(e))\n\n # the following should avoid table being None due to a malformed\n # html in td search results\n soup = get_soup(page).contents[1].contents[1].next.next.nextSibling\n table = soup.find('table', {'id': 'torrentTable'})\n if table is None:\n raise PluginError(\n 'Search returned by torrentday appears to be empty or malformed.'\n )\n\n # the first row is the header so skip it\n for tr in table.find_all('tr')[1:]:\n entry = Entry()\n # find the torrent names\n td = tr.find('td', {'class': 'torrentNameInfo'})\n if not td:\n logger.warning('Could not find entry torrentNameInfo for {}.', search_string)\n continue\n title = td.find('a')\n if not title:\n logger.warning('Could not determine title for {}.', search_string)\n continue\n entry['title'] = title.contents[0]\n logger.debug('title: {}', title.contents[0])\n\n # find download link\n torrent_url = tr.find('td', {'class': 'ac'})\n if not torrent_url:\n logger.warning('Could not determine download link for {}.', search_string)\n continue\n torrent_url = torrent_url.find('a').get('href')\n\n # construct download URL\n torrent_url = (\n 'https://www.torrentday.com/'\n + torrent_url\n + '?torrent_pass='\n + config['rss_key']\n )\n logger.debug('RSS-ified download link: {}', torrent_url)\n entry['url'] = torrent_url\n\n # us tr object for seeders/leechers\n seeders = tr.find('td', {'class': 'ac seedersInfo'})\n leechers = tr.find('td', {'class': 'ac leechersInfo'})\n entry['torrent_seeds'] = int(seeders.contents[0].replace(',', ''))\n entry['torrent_leeches'] = int(leechers.contents[0].replace(',', ''))\n entry['torrent_availability'] = torrent_availability(\n entry['torrent_seeds'], entry['torrent_leeches']\n )\n\n # use tr object for size\n size = tr.find('td', text=re.compile(r'([\\.\\d]+) ([TGMKk]?)B')).contents[0]\n size = re.search(r'([\\.\\d]+) ([TGMKk]?)B', str(size))\n\n entry['content_size'] = parse_filesize(size.group(0))\n\n entries.add(entry)\n\n return sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(\n UrlRewriteTorrentday, 'torrentday', interfaces=['urlrewriter', 'search'], api_ver=2\n )\n", "path": "flexget/components/sites/sites/torrentday.py"}]} | 3,382 | 715 |
gh_patches_debug_13708 | rasdani/github-patches | git_diff | bokeh__bokeh-8492 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo in range_tool example
There is a small typo "range_rool" in the range_tool.py example. I would like to use this issue to create my first pull request and see how the process works.
</issue>
<code>
[start of examples/plotting/file/range_tool.py]
1 import numpy as np
2
3 from bokeh.io import show
4 from bokeh.layouts import column
5 from bokeh.models import ColumnDataSource, RangeTool
6 from bokeh.plotting import figure
7 from bokeh.sampledata.stocks import AAPL
8
9 dates = np.array(AAPL['date'], dtype=np.datetime64)
10 source = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))
11
12 p = figure(plot_height=300, plot_width=800, tools="", toolbar_location=None,
13 x_axis_type="datetime", x_axis_location="above",
14 background_fill_color="#efefef", x_range=(dates[1500], dates[2500]))
15
16 p.line('date', 'close', source=source)
17 p.yaxis.axis_label = 'Price'
18
19 select = figure(title="Drag the middle and edges of the selection box to change the range above",
20 plot_height=130, plot_width=800, y_range=p.y_range,
21 x_axis_type="datetime", y_axis_type=None,
22 tools="", toolbar_location=None, background_fill_color="#efefef")
23
24 range_rool = RangeTool(x_range=p.x_range)
25 range_rool.overlay.fill_color = "navy"
26 range_rool.overlay.fill_alpha = 0.2
27
28 select.line('date', 'close', source=source)
29 select.ygrid.grid_line_color = None
30 select.add_tools(range_rool)
31 select.toolbar.active_multi = range_rool
32
33 show(column(p, select))
34
[end of examples/plotting/file/range_tool.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/plotting/file/range_tool.py b/examples/plotting/file/range_tool.py
--- a/examples/plotting/file/range_tool.py
+++ b/examples/plotting/file/range_tool.py
@@ -21,13 +21,13 @@
x_axis_type="datetime", y_axis_type=None,
tools="", toolbar_location=None, background_fill_color="#efefef")
-range_rool = RangeTool(x_range=p.x_range)
-range_rool.overlay.fill_color = "navy"
-range_rool.overlay.fill_alpha = 0.2
+range_tool = RangeTool(x_range=p.x_range)
+range_tool.overlay.fill_color = "navy"
+range_tool.overlay.fill_alpha = 0.2
select.line('date', 'close', source=source)
select.ygrid.grid_line_color = None
-select.add_tools(range_rool)
-select.toolbar.active_multi = range_rool
+select.add_tools(range_tool)
+select.toolbar.active_multi = range_tool
show(column(p, select))
| {"golden_diff": "diff --git a/examples/plotting/file/range_tool.py b/examples/plotting/file/range_tool.py\n--- a/examples/plotting/file/range_tool.py\n+++ b/examples/plotting/file/range_tool.py\n@@ -21,13 +21,13 @@\n x_axis_type=\"datetime\", y_axis_type=None,\n tools=\"\", toolbar_location=None, background_fill_color=\"#efefef\")\n \n-range_rool = RangeTool(x_range=p.x_range)\n-range_rool.overlay.fill_color = \"navy\"\n-range_rool.overlay.fill_alpha = 0.2\n+range_tool = RangeTool(x_range=p.x_range)\n+range_tool.overlay.fill_color = \"navy\"\n+range_tool.overlay.fill_alpha = 0.2\n \n select.line('date', 'close', source=source)\n select.ygrid.grid_line_color = None\n-select.add_tools(range_rool)\n-select.toolbar.active_multi = range_rool\n+select.add_tools(range_tool)\n+select.toolbar.active_multi = range_tool\n \n show(column(p, select))\n", "issue": "Typo in range_tool example\nThere is a small typo \"range_rool\" in the range_tool.py example. I would like to use this issue to create my first pull request and see how the process works.\n", "before_files": [{"content": "import numpy as np\n\nfrom bokeh.io import show\nfrom bokeh.layouts import column\nfrom bokeh.models import ColumnDataSource, RangeTool\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.stocks import AAPL\n\ndates = np.array(AAPL['date'], dtype=np.datetime64)\nsource = ColumnDataSource(data=dict(date=dates, close=AAPL['adj_close']))\n\np = figure(plot_height=300, plot_width=800, tools=\"\", toolbar_location=None,\n x_axis_type=\"datetime\", x_axis_location=\"above\",\n background_fill_color=\"#efefef\", x_range=(dates[1500], dates[2500]))\n\np.line('date', 'close', source=source)\np.yaxis.axis_label = 'Price'\n\nselect = figure(title=\"Drag the middle and edges of the selection box to change the range above\",\n plot_height=130, plot_width=800, y_range=p.y_range,\n x_axis_type=\"datetime\", y_axis_type=None,\n tools=\"\", toolbar_location=None, background_fill_color=\"#efefef\")\n\nrange_rool = RangeTool(x_range=p.x_range)\nrange_rool.overlay.fill_color = \"navy\"\nrange_rool.overlay.fill_alpha = 0.2\n\nselect.line('date', 'close', source=source)\nselect.ygrid.grid_line_color = None\nselect.add_tools(range_rool)\nselect.toolbar.active_multi = range_rool\n\nshow(column(p, select))\n", "path": "examples/plotting/file/range_tool.py"}]} | 966 | 219 |
gh_patches_debug_28872 | rasdani/github-patches | git_diff | facebookresearch__hydra-170 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
completion bug when completion unresolved values
`python demos/6_sweep/experiment.py hydra/launcher=fairtask hydra.launcher.params.queue=local hydra.launcher.params.queues.slurm.params.<TAB>`
->
KeyError: "str interpolation key 'hydra.job.num_jobs' not found"
completion bug when completion unresolved values
`python demos/6_sweep/experiment.py hydra/launcher=fairtask hydra.launcher.params.queue=local hydra.launcher.params.queues.slurm.params.<TAB>`
->
KeyError: "str interpolation key 'hydra.job.num_jobs' not found"
</issue>
<code>
[start of hydra/plugins/completion_plugin.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import os
3 import sys
4 from abc import ABCMeta
5 from abc import abstractmethod
6 import six
7 from omegaconf import DictConfig, ListConfig, Config
8
9 from hydra.plugins import Plugin
10
11
12 @six.add_metaclass(ABCMeta)
13 class CompletionPlugin(Plugin):
14 def __init__(self, config_loader):
15 self.config_loader = config_loader
16
17 def install(self):
18 raise NotImplementedError()
19
20 def uninstall(self):
21 raise NotImplementedError()
22
23 @abstractmethod
24 def provides(self):
25 """
26 :return: the name of the shell this plugin provides completion for
27 """
28 return None
29
30 def query(self):
31 raise NotImplementedError()
32
33 @staticmethod
34 def _get_filename(fname):
35 last = fname.rfind("=")
36 if last != -1:
37 key_eq = fname[0 : last + 1]
38 fname = fname[last + 1 :]
39 prefixes = [".", "/", "\\", "./", ".\\"]
40 if sys.platform.startswith("win"):
41 for drive in range(ord("a"), ord("z")):
42 prefixes.append("{}:".format(chr(drive)))
43
44 if not fname:
45 return None, None
46 lowerfilename = fname.lower()
47 for prefix in prefixes:
48 if lowerfilename.startswith(prefix):
49 return key_eq, fname
50 return None, None
51
52 @staticmethod
53 def complete_files(word):
54 if os.path.isdir(word):
55 dirname = word
56 files = os.listdir(word)
57 file_prefix = ""
58 else:
59 dirname = os.path.dirname(word)
60 if os.path.isdir(dirname):
61 files = os.listdir(dirname)
62 else:
63 files = []
64 file_prefix = os.path.basename(word)
65 ret = []
66 for file in files:
67 if file.startswith(file_prefix):
68 ret.append(os.path.join(dirname, file))
69 return ret
70
71 @staticmethod
72 def _get_matches(config, word):
73 def str_rep(in_key, in_value):
74 if isinstance(in_value, Config):
75 return "{}.".format(in_key)
76 else:
77 return "{}=".format(in_key)
78
79 if config is None:
80 return []
81 elif isinstance(config, Config):
82 matches = []
83 if word.endswith(".") or word.endswith("="):
84 exact_key = word[0:-1]
85 conf_node = config.select(exact_key)
86 if conf_node is not None:
87 if isinstance(conf_node, Config):
88 key_matches = CompletionPlugin._get_matches(conf_node, "")
89 else:
90 # primitive
91 if isinstance(conf_node, bool):
92 conf_node = str(conf_node).lower()
93 key_matches = [conf_node]
94 else:
95 key_matches = []
96
97 matches.extend(["{}{}".format(word, match) for match in key_matches])
98 else:
99 last_dot = word.rfind(".")
100 if last_dot != -1:
101 base_key = word[0:last_dot]
102 partial_key = word[last_dot + 1 :]
103 conf_node = config.select(base_key)
104 key_matches = CompletionPlugin._get_matches(conf_node, partial_key)
105 matches.extend(
106 ["{}.{}".format(base_key, match) for match in key_matches]
107 )
108 else:
109 if isinstance(config, DictConfig):
110 for key, value in config.items():
111 if key.startswith(word):
112 matches.append(str_rep(key, value))
113 elif isinstance(config, ListConfig):
114 for idx, value in enumerate(config):
115 if str(idx).startswith(word):
116 matches.append(str_rep(idx, value))
117 else:
118 assert False, "Object is not an instance of config : {}".format(
119 type(config)
120 )
121
122 return matches
123
124 def _query_config_groups(self, word):
125 last_eq_index = word.rfind("=")
126 last_slash_index = word.rfind("/")
127 exact_match = False
128 if last_eq_index != -1:
129 parent_group = word[0:last_eq_index]
130 file_type = "file"
131 else:
132 file_type = "dir"
133 if last_slash_index == -1:
134 parent_group = ""
135 else:
136 parent_group = word[0:last_slash_index]
137
138 all_matched_groups = self.config_loader.get_group_options(
139 parent_group, file_type=file_type
140 )
141 matched_groups = []
142 if file_type == "file":
143 for match in all_matched_groups:
144 name = (
145 "{}={}".format(parent_group, match) if parent_group != "" else match
146 )
147 if name.startswith(word):
148 matched_groups.append(name)
149 exact_match = True
150 elif file_type == "dir":
151 for match in all_matched_groups:
152 name = (
153 "{}/{}".format(parent_group, match) if parent_group != "" else match
154 )
155 if name.startswith(word):
156 files = self.config_loader.get_group_options(name, file_type="file")
157 dirs = self.config_loader.get_group_options(name, file_type="dir")
158 if len(dirs) == 0 and len(files) > 0:
159 name = name + "="
160 elif len(dirs) > 0 and len(files) == 0:
161 name = name + "/"
162 matched_groups.append(name)
163
164 return matched_groups, exact_match
165
166 def _query(self, line):
167 from .._internal.utils import get_args
168
169 split = line.split(" ")
170 parsed_args = get_args(split)
171 args = parsed_args.overrides
172 if len(args) > 0:
173 word = args[-1]
174 words = args[0:-1]
175 else:
176 word = ""
177 words = []
178
179 config = self.config_loader.load_configuration(words)
180
181 fname_prefix, filename = CompletionPlugin._get_filename(word)
182 if filename is not None:
183 result = CompletionPlugin.complete_files(filename)
184 result = [fname_prefix + file for file in result]
185 else:
186 matched_groups, exact_match = self._query_config_groups(word)
187 config_matches = []
188 if not exact_match:
189 config_matches = CompletionPlugin._get_matches(config, word)
190 result = list(set(matched_groups + config_matches))
191
192 return sorted(result)
193
194
195 @six.add_metaclass(ABCMeta)
196 class DefaultCompletionPlugin(CompletionPlugin):
197 """
198 A concrete instance of CompletionPlugin that is used for testing.
199 """
200
201 def provides(self):
202 return None
203
[end of hydra/plugins/completion_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py
--- a/hydra/plugins/completion_plugin.py
+++ b/hydra/plugins/completion_plugin.py
@@ -4,7 +4,7 @@
from abc import ABCMeta
from abc import abstractmethod
import six
-from omegaconf import DictConfig, ListConfig, Config
+from omegaconf import DictConfig, ListConfig, Config, MissingMandatoryValue
from hydra.plugins import Plugin
@@ -82,7 +82,10 @@
matches = []
if word.endswith(".") or word.endswith("="):
exact_key = word[0:-1]
- conf_node = config.select(exact_key)
+ try:
+ conf_node = config.select(exact_key)
+ except MissingMandatoryValue:
+ conf_node = ""
if conf_node is not None:
if isinstance(conf_node, Config):
key_matches = CompletionPlugin._get_matches(conf_node, "")
@@ -107,7 +110,7 @@
)
else:
if isinstance(config, DictConfig):
- for key, value in config.items():
+ for key, value in config.items(resolve=False):
if key.startswith(word):
matches.append(str_rep(key, value))
elif isinstance(config, ListConfig):
| {"golden_diff": "diff --git a/hydra/plugins/completion_plugin.py b/hydra/plugins/completion_plugin.py\n--- a/hydra/plugins/completion_plugin.py\n+++ b/hydra/plugins/completion_plugin.py\n@@ -4,7 +4,7 @@\n from abc import ABCMeta\n from abc import abstractmethod\n import six\n-from omegaconf import DictConfig, ListConfig, Config\n+from omegaconf import DictConfig, ListConfig, Config, MissingMandatoryValue\n \n from hydra.plugins import Plugin\n \n@@ -82,7 +82,10 @@\n matches = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n- conf_node = config.select(exact_key)\n+ try:\n+ conf_node = config.select(exact_key)\n+ except MissingMandatoryValue:\n+ conf_node = \"\"\n if conf_node is not None:\n if isinstance(conf_node, Config):\n key_matches = CompletionPlugin._get_matches(conf_node, \"\")\n@@ -107,7 +110,7 @@\n )\n else:\n if isinstance(config, DictConfig):\n- for key, value in config.items():\n+ for key, value in config.items(resolve=False):\n if key.startswith(word):\n matches.append(str_rep(key, value))\n elif isinstance(config, ListConfig):\n", "issue": "completion bug when completion unresolved values\n`python demos/6_sweep/experiment.py hydra/launcher=fairtask hydra.launcher.params.queue=local hydra.launcher.params.queues.slurm.params.<TAB>`\r\n\r\n->\r\nKeyError: \"str interpolation key 'hydra.job.num_jobs' not found\"\ncompletion bug when completion unresolved values\n`python demos/6_sweep/experiment.py hydra/launcher=fairtask hydra.launcher.params.queue=local hydra.launcher.params.queues.slurm.params.<TAB>`\r\n\r\n->\r\nKeyError: \"str interpolation key 'hydra.job.num_jobs' not found\"\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\nimport sys\nfrom abc import ABCMeta\nfrom abc import abstractmethod\nimport six\nfrom omegaconf import DictConfig, ListConfig, Config\n\nfrom hydra.plugins import Plugin\n\n\[email protected]_metaclass(ABCMeta)\nclass CompletionPlugin(Plugin):\n def __init__(self, config_loader):\n self.config_loader = config_loader\n\n def install(self):\n raise NotImplementedError()\n\n def uninstall(self):\n raise NotImplementedError()\n\n @abstractmethod\n def provides(self):\n \"\"\"\n :return: the name of the shell this plugin provides completion for\n \"\"\"\n return None\n\n def query(self):\n raise NotImplementedError()\n\n @staticmethod\n def _get_filename(fname):\n last = fname.rfind(\"=\")\n if last != -1:\n key_eq = fname[0 : last + 1]\n fname = fname[last + 1 :]\n prefixes = [\".\", \"/\", \"\\\\\", \"./\", \".\\\\\"]\n if sys.platform.startswith(\"win\"):\n for drive in range(ord(\"a\"), ord(\"z\")):\n prefixes.append(\"{}:\".format(chr(drive)))\n\n if not fname:\n return None, None\n lowerfilename = fname.lower()\n for prefix in prefixes:\n if lowerfilename.startswith(prefix):\n return key_eq, fname\n return None, None\n\n @staticmethod\n def complete_files(word):\n if os.path.isdir(word):\n dirname = word\n files = os.listdir(word)\n file_prefix = \"\"\n else:\n dirname = os.path.dirname(word)\n if os.path.isdir(dirname):\n files = os.listdir(dirname)\n else:\n files = []\n file_prefix = os.path.basename(word)\n ret = []\n for file in files:\n if file.startswith(file_prefix):\n ret.append(os.path.join(dirname, file))\n return ret\n\n @staticmethod\n def _get_matches(config, word):\n def str_rep(in_key, in_value):\n if isinstance(in_value, Config):\n return \"{}.\".format(in_key)\n else:\n return \"{}=\".format(in_key)\n\n if config is None:\n return []\n elif isinstance(config, Config):\n matches = []\n if word.endswith(\".\") or word.endswith(\"=\"):\n exact_key = word[0:-1]\n conf_node = config.select(exact_key)\n if conf_node is not None:\n if isinstance(conf_node, Config):\n key_matches = CompletionPlugin._get_matches(conf_node, \"\")\n else:\n # primitive\n if isinstance(conf_node, bool):\n conf_node = str(conf_node).lower()\n key_matches = [conf_node]\n else:\n key_matches = []\n\n matches.extend([\"{}{}\".format(word, match) for match in key_matches])\n else:\n last_dot = word.rfind(\".\")\n if last_dot != -1:\n base_key = word[0:last_dot]\n partial_key = word[last_dot + 1 :]\n conf_node = config.select(base_key)\n key_matches = CompletionPlugin._get_matches(conf_node, partial_key)\n matches.extend(\n [\"{}.{}\".format(base_key, match) for match in key_matches]\n )\n else:\n if isinstance(config, DictConfig):\n for key, value in config.items():\n if key.startswith(word):\n matches.append(str_rep(key, value))\n elif isinstance(config, ListConfig):\n for idx, value in enumerate(config):\n if str(idx).startswith(word):\n matches.append(str_rep(idx, value))\n else:\n assert False, \"Object is not an instance of config : {}\".format(\n type(config)\n )\n\n return matches\n\n def _query_config_groups(self, word):\n last_eq_index = word.rfind(\"=\")\n last_slash_index = word.rfind(\"/\")\n exact_match = False\n if last_eq_index != -1:\n parent_group = word[0:last_eq_index]\n file_type = \"file\"\n else:\n file_type = \"dir\"\n if last_slash_index == -1:\n parent_group = \"\"\n else:\n parent_group = word[0:last_slash_index]\n\n all_matched_groups = self.config_loader.get_group_options(\n parent_group, file_type=file_type\n )\n matched_groups = []\n if file_type == \"file\":\n for match in all_matched_groups:\n name = (\n \"{}={}\".format(parent_group, match) if parent_group != \"\" else match\n )\n if name.startswith(word):\n matched_groups.append(name)\n exact_match = True\n elif file_type == \"dir\":\n for match in all_matched_groups:\n name = (\n \"{}/{}\".format(parent_group, match) if parent_group != \"\" else match\n )\n if name.startswith(word):\n files = self.config_loader.get_group_options(name, file_type=\"file\")\n dirs = self.config_loader.get_group_options(name, file_type=\"dir\")\n if len(dirs) == 0 and len(files) > 0:\n name = name + \"=\"\n elif len(dirs) > 0 and len(files) == 0:\n name = name + \"/\"\n matched_groups.append(name)\n\n return matched_groups, exact_match\n\n def _query(self, line):\n from .._internal.utils import get_args\n\n split = line.split(\" \")\n parsed_args = get_args(split)\n args = parsed_args.overrides\n if len(args) > 0:\n word = args[-1]\n words = args[0:-1]\n else:\n word = \"\"\n words = []\n\n config = self.config_loader.load_configuration(words)\n\n fname_prefix, filename = CompletionPlugin._get_filename(word)\n if filename is not None:\n result = CompletionPlugin.complete_files(filename)\n result = [fname_prefix + file for file in result]\n else:\n matched_groups, exact_match = self._query_config_groups(word)\n config_matches = []\n if not exact_match:\n config_matches = CompletionPlugin._get_matches(config, word)\n result = list(set(matched_groups + config_matches))\n\n return sorted(result)\n\n\[email protected]_metaclass(ABCMeta)\nclass DefaultCompletionPlugin(CompletionPlugin):\n \"\"\"\n A concrete instance of CompletionPlugin that is used for testing.\n \"\"\"\n\n def provides(self):\n return None\n", "path": "hydra/plugins/completion_plugin.py"}]} | 2,522 | 284 |
gh_patches_debug_39238 | rasdani/github-patches | git_diff | Qiskit__qiskit-2431 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using multiple SamplePulses with the same name overwritten in Qobj pulse library
<!-- β οΈ If you do not respect this template, your issue will be closed -->
<!-- β οΈ Make sure to browse the opened and closed issues -->
### Information
- **Qiskit Terra version**:
- **Python version**:
- **Operating system**:
### What is the current behavior?
In the Qobj specification, pulse are identified by their name. In the pulse module pulses are identified by their object instance and when assembled are stored in the Qobj pulse library by name. If two `SamplePulse`s of the same name exist, the second will overwrite the first, causing undesired behaviour.
### Steps to reproduce the problem
Create a pulse schedule with two different pulses of the same name and assemble into a Qobj.
### What is the expected behavior?
Unique pulses should be stored uniquely in the Qobj pulse library.
### Suggested solutions
If a name collision occurs when building the pulse library. Modify the name of the sample pulse in some consistent manner, and adjust the commands that refer to this pulse.
</issue>
<code>
[start of qiskit/assembler/assemble_schedules.py]
1 # -*- coding: utf-8 -*-
2
3 # This code is part of Qiskit.
4 #
5 # (C) Copyright IBM 2017, 2019.
6 #
7 # This code is licensed under the Apache License, Version 2.0. You may
8 # obtain a copy of this license in the LICENSE.txt file in the root directory
9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
10 #
11 # Any modifications or derivative works of this code must retain this
12 # copyright notice, and modified files need to carry a notice indicating
13 # that they have been altered from the originals.
14
15 """Assemble function for converting a list of circuits into a qobj"""
16 from qiskit.exceptions import QiskitError
17 from qiskit.pulse.commands import PulseInstruction, AcquireInstruction
18 from qiskit.qobj import (PulseQobj, QobjExperimentHeader,
19 PulseQobjInstruction, PulseQobjExperimentConfig,
20 PulseQobjExperiment, PulseQobjConfig, PulseLibraryItem)
21 from qiskit.qobj.converters import InstructionToQobjConverter, LoConfigConverter
22
23
24 def assemble_schedules(schedules, qobj_id, qobj_header, run_config):
25 """Assembles a list of schedules into a qobj which can be run on the backend.
26 Args:
27 schedules (list[Schedule]): schedules to assemble
28 qobj_id (int): identifier for the generated qobj
29 qobj_header (QobjHeader): header to pass to the results
30 run_config (RunConfig): configuration of the runtime environment
31 Returns:
32 PulseQobj: the Qobj to be run on the backends
33 Raises:
34 QiskitError: when invalid schedules or configs are provided
35 """
36 if hasattr(run_config, 'instruction_converter'):
37 instruction_converter = run_config.instruction_converter
38 else:
39 instruction_converter = InstructionToQobjConverter
40
41 qobj_config = run_config.to_dict()
42 qubit_lo_range = qobj_config.pop('qubit_lo_range')
43 meas_lo_range = qobj_config.pop('meas_lo_range')
44 meas_map = qobj_config.pop('meas_map', None)
45 instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)
46
47 lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,
48 meas_lo_range=meas_lo_range, **qobj_config)
49
50 # Pack everything into the Qobj
51 qobj_schedules = []
52 user_pulselib = set()
53 for idx, schedule in enumerate(schedules):
54 # instructions
55 qobj_instructions = []
56 # Instructions are returned as tuple of shifted time and instruction
57 for shift, instruction in schedule.instructions:
58 # TODO: support conditional gate
59 qobj_instructions.append(instruction_converter(shift, instruction))
60 if isinstance(instruction, PulseInstruction):
61 # add samples to pulse library
62 user_pulselib.add(instruction.command)
63 if isinstance(instruction, AcquireInstruction):
64 if meas_map:
65 # verify all acquires satisfy meas_map
66 _validate_meas_map(instruction, meas_map)
67
68 # experiment header
69 qobj_experiment_header = QobjExperimentHeader(
70 name=schedule.name or 'Experiment-%d' % idx
71 )
72
73 qobj_schedules.append({
74 'header': qobj_experiment_header,
75 'instructions': qobj_instructions
76 })
77
78 # setup pulse_library
79 qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
80 for pulse in user_pulselib]
81
82 # create qobj experiment field
83 experiments = []
84 schedule_los = qobj_config.pop('schedule_los', [])
85
86 if len(schedule_los) == 1:
87 lo_dict = schedule_los[0]
88 # update global config
89 q_los = lo_converter.get_qubit_los(lo_dict)
90 if q_los:
91 qobj_config['qubit_lo_freq'] = q_los
92 m_los = lo_converter.get_meas_los(lo_dict)
93 if m_los:
94 qobj_config['meas_lo_freq'] = m_los
95
96 if schedule_los:
97 # multiple frequency setups
98 if len(qobj_schedules) == 1:
99 # frequency sweep
100 for lo_dict in schedule_los:
101 experiments.append(PulseQobjExperiment(
102 instructions=qobj_schedules[0]['instructions'],
103 header=qobj_schedules[0]['header'],
104 config=lo_converter(lo_dict)
105 ))
106 elif len(qobj_schedules) == len(schedule_los):
107 # n:n setup
108 for lo_dict, schedule in zip(schedule_los, qobj_schedules):
109 experiments.append(PulseQobjExperiment(
110 instructions=schedule['instructions'],
111 header=schedule['header'],
112 config=lo_converter(lo_dict)
113 ))
114 else:
115 raise QiskitError('Invalid LO setting is specified. '
116 'The LO should be configured for each schedule, or '
117 'single setup for all schedules (unique), or '
118 'multiple setups for a single schedule (frequency sweep),'
119 'or no LO configured at all.')
120 else:
121 # unique frequency setup
122 for schedule in qobj_schedules:
123 experiments.append(PulseQobjExperiment(
124 instructions=schedule['instructions'],
125 header=schedule['header'],
126 ))
127
128 qobj_config = PulseQobjConfig(**qobj_config)
129
130 return PulseQobj(qobj_id=qobj_id,
131 config=qobj_config,
132 experiments=experiments,
133 header=qobj_header)
134
135
136 def _validate_meas_map(acquire, meas_map):
137 """Validate all qubits tied in meas_map are to be acquired."""
138 meas_map_set = [set(m) for m in meas_map]
139 # Verify that each qubit is listed once in measurement map
140 measured_qubits = {acq_ch.index for acq_ch in acquire.acquires}
141 tied_qubits = set()
142 for meas_qubit in measured_qubits:
143 for map_inst in meas_map_set:
144 if meas_qubit in map_inst:
145 tied_qubits |= map_inst
146
147 if measured_qubits != tied_qubits:
148 raise QiskitError('Qubits to be acquired: {0} do not satisfy required qubits '
149 'in measurement map: {1}'.format(measured_qubits, tied_qubits))
150 return True
151
[end of qiskit/assembler/assemble_schedules.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/assembler/assemble_schedules.py b/qiskit/assembler/assemble_schedules.py
--- a/qiskit/assembler/assemble_schedules.py
+++ b/qiskit/assembler/assemble_schedules.py
@@ -14,7 +14,7 @@
"""Assemble function for converting a list of circuits into a qobj"""
from qiskit.exceptions import QiskitError
-from qiskit.pulse.commands import PulseInstruction, AcquireInstruction
+from qiskit.pulse.commands import PulseInstruction, AcquireInstruction, SamplePulse
from qiskit.qobj import (PulseQobj, QobjExperimentHeader,
PulseQobjInstruction, PulseQobjExperimentConfig,
PulseQobjExperiment, PulseQobjConfig, PulseLibraryItem)
@@ -49,21 +49,28 @@
# Pack everything into the Qobj
qobj_schedules = []
- user_pulselib = set()
+ user_pulselib = {}
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
- qobj_instructions.append(instruction_converter(shift, instruction))
if isinstance(instruction, PulseInstruction):
+ name = instruction.command.name
+ if name in user_pulselib and instruction.command != user_pulselib[name]:
+ name = "{0}-{1:x}".format(name, hash(instruction.command.samples.tostring()))
+ instruction = PulseInstruction(
+ command=SamplePulse(name=name, samples=instruction.command.samples),
+ name=instruction.name,
+ channel=instruction.timeslots.channels[0])
# add samples to pulse library
- user_pulselib.add(instruction.command)
+ user_pulselib[name] = instruction.command
if isinstance(instruction, AcquireInstruction):
if meas_map:
# verify all acquires satisfy meas_map
_validate_meas_map(instruction, meas_map)
+ qobj_instructions.append(instruction_converter(shift, instruction))
# experiment header
qobj_experiment_header = QobjExperimentHeader(
@@ -77,7 +84,7 @@
# setup pulse_library
qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
- for pulse in user_pulselib]
+ for pulse in user_pulselib.values()]
# create qobj experiment field
experiments = []
| {"golden_diff": "diff --git a/qiskit/assembler/assemble_schedules.py b/qiskit/assembler/assemble_schedules.py\n--- a/qiskit/assembler/assemble_schedules.py\n+++ b/qiskit/assembler/assemble_schedules.py\n@@ -14,7 +14,7 @@\n \n \"\"\"Assemble function for converting a list of circuits into a qobj\"\"\"\n from qiskit.exceptions import QiskitError\n-from qiskit.pulse.commands import PulseInstruction, AcquireInstruction\n+from qiskit.pulse.commands import PulseInstruction, AcquireInstruction, SamplePulse\n from qiskit.qobj import (PulseQobj, QobjExperimentHeader,\n PulseQobjInstruction, PulseQobjExperimentConfig,\n PulseQobjExperiment, PulseQobjConfig, PulseLibraryItem)\n@@ -49,21 +49,28 @@\n \n # Pack everything into the Qobj\n qobj_schedules = []\n- user_pulselib = set()\n+ user_pulselib = {}\n for idx, schedule in enumerate(schedules):\n # instructions\n qobj_instructions = []\n # Instructions are returned as tuple of shifted time and instruction\n for shift, instruction in schedule.instructions:\n # TODO: support conditional gate\n- qobj_instructions.append(instruction_converter(shift, instruction))\n if isinstance(instruction, PulseInstruction):\n+ name = instruction.command.name\n+ if name in user_pulselib and instruction.command != user_pulselib[name]:\n+ name = \"{0}-{1:x}\".format(name, hash(instruction.command.samples.tostring()))\n+ instruction = PulseInstruction(\n+ command=SamplePulse(name=name, samples=instruction.command.samples),\n+ name=instruction.name,\n+ channel=instruction.timeslots.channels[0])\n # add samples to pulse library\n- user_pulselib.add(instruction.command)\n+ user_pulselib[name] = instruction.command\n if isinstance(instruction, AcquireInstruction):\n if meas_map:\n # verify all acquires satisfy meas_map\n _validate_meas_map(instruction, meas_map)\n+ qobj_instructions.append(instruction_converter(shift, instruction))\n \n # experiment header\n qobj_experiment_header = QobjExperimentHeader(\n@@ -77,7 +84,7 @@\n \n # setup pulse_library\n qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)\n- for pulse in user_pulselib]\n+ for pulse in user_pulselib.values()]\n \n # create qobj experiment field\n experiments = []\n", "issue": "Using multiple SamplePulses with the same name overwritten in Qobj pulse library\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nIn the Qobj specification, pulse are identified by their name. In the pulse module pulses are identified by their object instance and when assembled are stored in the Qobj pulse library by name. If two `SamplePulse`s of the same name exist, the second will overwrite the first, causing undesired behaviour. \r\n\r\n\r\n### Steps to reproduce the problem\r\nCreate a pulse schedule with two different pulses of the same name and assemble into a Qobj.\r\n\r\n\r\n### What is the expected behavior?\r\nUnique pulses should be stored uniquely in the Qobj pulse library.\r\n\r\n\r\n### Suggested solutions\r\nIf a name collision occurs when building the pulse library. Modify the name of the sample pulse in some consistent manner, and adjust the commands that refer to this pulse. \r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Assemble function for converting a list of circuits into a qobj\"\"\"\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.pulse.commands import PulseInstruction, AcquireInstruction\nfrom qiskit.qobj import (PulseQobj, QobjExperimentHeader,\n PulseQobjInstruction, PulseQobjExperimentConfig,\n PulseQobjExperiment, PulseQobjConfig, PulseLibraryItem)\nfrom qiskit.qobj.converters import InstructionToQobjConverter, LoConfigConverter\n\n\ndef assemble_schedules(schedules, qobj_id, qobj_header, run_config):\n \"\"\"Assembles a list of schedules into a qobj which can be run on the backend.\n Args:\n schedules (list[Schedule]): schedules to assemble\n qobj_id (int): identifier for the generated qobj\n qobj_header (QobjHeader): header to pass to the results\n run_config (RunConfig): configuration of the runtime environment\n Returns:\n PulseQobj: the Qobj to be run on the backends\n Raises:\n QiskitError: when invalid schedules or configs are provided\n \"\"\"\n if hasattr(run_config, 'instruction_converter'):\n instruction_converter = run_config.instruction_converter\n else:\n instruction_converter = InstructionToQobjConverter\n\n qobj_config = run_config.to_dict()\n qubit_lo_range = qobj_config.pop('qubit_lo_range')\n meas_lo_range = qobj_config.pop('meas_lo_range')\n meas_map = qobj_config.pop('meas_map', None)\n instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)\n\n lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,\n meas_lo_range=meas_lo_range, **qobj_config)\n\n # Pack everything into the Qobj\n qobj_schedules = []\n user_pulselib = set()\n for idx, schedule in enumerate(schedules):\n # instructions\n qobj_instructions = []\n # Instructions are returned as tuple of shifted time and instruction\n for shift, instruction in schedule.instructions:\n # TODO: support conditional gate\n qobj_instructions.append(instruction_converter(shift, instruction))\n if isinstance(instruction, PulseInstruction):\n # add samples to pulse library\n user_pulselib.add(instruction.command)\n if isinstance(instruction, AcquireInstruction):\n if meas_map:\n # verify all acquires satisfy meas_map\n _validate_meas_map(instruction, meas_map)\n\n # experiment header\n qobj_experiment_header = QobjExperimentHeader(\n name=schedule.name or 'Experiment-%d' % idx\n )\n\n qobj_schedules.append({\n 'header': qobj_experiment_header,\n 'instructions': qobj_instructions\n })\n\n # setup pulse_library\n qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)\n for pulse in user_pulselib]\n\n # create qobj experiment field\n experiments = []\n schedule_los = qobj_config.pop('schedule_los', [])\n\n if len(schedule_los) == 1:\n lo_dict = schedule_los[0]\n # update global config\n q_los = lo_converter.get_qubit_los(lo_dict)\n if q_los:\n qobj_config['qubit_lo_freq'] = q_los\n m_los = lo_converter.get_meas_los(lo_dict)\n if m_los:\n qobj_config['meas_lo_freq'] = m_los\n\n if schedule_los:\n # multiple frequency setups\n if len(qobj_schedules) == 1:\n # frequency sweep\n for lo_dict in schedule_los:\n experiments.append(PulseQobjExperiment(\n instructions=qobj_schedules[0]['instructions'],\n header=qobj_schedules[0]['header'],\n config=lo_converter(lo_dict)\n ))\n elif len(qobj_schedules) == len(schedule_los):\n # n:n setup\n for lo_dict, schedule in zip(schedule_los, qobj_schedules):\n experiments.append(PulseQobjExperiment(\n instructions=schedule['instructions'],\n header=schedule['header'],\n config=lo_converter(lo_dict)\n ))\n else:\n raise QiskitError('Invalid LO setting is specified. '\n 'The LO should be configured for each schedule, or '\n 'single setup for all schedules (unique), or '\n 'multiple setups for a single schedule (frequency sweep),'\n 'or no LO configured at all.')\n else:\n # unique frequency setup\n for schedule in qobj_schedules:\n experiments.append(PulseQobjExperiment(\n instructions=schedule['instructions'],\n header=schedule['header'],\n ))\n\n qobj_config = PulseQobjConfig(**qobj_config)\n\n return PulseQobj(qobj_id=qobj_id,\n config=qobj_config,\n experiments=experiments,\n header=qobj_header)\n\n\ndef _validate_meas_map(acquire, meas_map):\n \"\"\"Validate all qubits tied in meas_map are to be acquired.\"\"\"\n meas_map_set = [set(m) for m in meas_map]\n # Verify that each qubit is listed once in measurement map\n measured_qubits = {acq_ch.index for acq_ch in acquire.acquires}\n tied_qubits = set()\n for meas_qubit in measured_qubits:\n for map_inst in meas_map_set:\n if meas_qubit in map_inst:\n tied_qubits |= map_inst\n\n if measured_qubits != tied_qubits:\n raise QiskitError('Qubits to be acquired: {0} do not satisfy required qubits '\n 'in measurement map: {1}'.format(measured_qubits, tied_qubits))\n return True\n", "path": "qiskit/assembler/assemble_schedules.py"}]} | 2,498 | 555 |
gh_patches_debug_62436 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3379 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server error page won't render because of missing context
If a view throws a 500 error, the 500.html should get rendered. We recently updated the template to inherit from base.html, and now it will not render anymore because it is missing context variables (the 500 view is by default passed an empty context).
I'm unsure if we should update the 500 view and add the missing context or if we should go back to not inheriting from base.html for the error views?
</issue>
<code>
[start of app/config/urls/challenge_subdomain.py]
1 from django.conf import settings
2 from django.urls import include, path
3 from django.views.generic import TemplateView
4
5 from grandchallenge.challenges.views import ChallengeUpdate
6
7 urlpatterns = [
8 path(
9 "robots.txt",
10 TemplateView.as_view(
11 template_name="robots.txt", content_type="text/plain"
12 ),
13 name="subdomain_robots_txt",
14 ),
15 path(
16 "evaluation/",
17 include("grandchallenge.evaluation.urls", namespace="evaluation"),
18 ),
19 path("teams/", include("grandchallenge.teams.urls", namespace="teams")),
20 path(
21 "participants/",
22 include("grandchallenge.participants.urls", namespace="participants"),
23 ),
24 path("admins/", include("grandchallenge.admins.urls", namespace="admins")),
25 path("update/", ChallengeUpdate.as_view(), name="challenge-update"),
26 path("summernote/", include("django_summernote.urls")),
27 path("", include("grandchallenge.pages.urls", namespace="pages")),
28 ]
29
30 if settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:
31 import debug_toolbar
32
33 urlpatterns = [
34 path("__debug__/", include(debug_toolbar.urls))
35 ] + urlpatterns
36
[end of app/config/urls/challenge_subdomain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/config/urls/challenge_subdomain.py b/app/config/urls/challenge_subdomain.py
--- a/app/config/urls/challenge_subdomain.py
+++ b/app/config/urls/challenge_subdomain.py
@@ -4,6 +4,9 @@
from grandchallenge.challenges.views import ChallengeUpdate
+handler500 = "grandchallenge.core.views.handler500"
+
+
urlpatterns = [
path(
"robots.txt",
| {"golden_diff": "diff --git a/app/config/urls/challenge_subdomain.py b/app/config/urls/challenge_subdomain.py\n--- a/app/config/urls/challenge_subdomain.py\n+++ b/app/config/urls/challenge_subdomain.py\n@@ -4,6 +4,9 @@\n \n from grandchallenge.challenges.views import ChallengeUpdate\n \n+handler500 = \"grandchallenge.core.views.handler500\"\n+\n+\n urlpatterns = [\n path(\n \"robots.txt\",\n", "issue": "Server error page won't render because of missing context\nIf a view throws a 500 error, the 500.html should get rendered. We recently updated the template to inherit from base.html, and now it will not render anymore because it is missing context variables (the 500 view is by default passed an empty context). \r\n\r\nI'm unsure if we should update the 500 view and add the missing context or if we should go back to not inheriting from base.html for the error views? \r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.urls import include, path\nfrom django.views.generic import TemplateView\n\nfrom grandchallenge.challenges.views import ChallengeUpdate\n\nurlpatterns = [\n path(\n \"robots.txt\",\n TemplateView.as_view(\n template_name=\"robots.txt\", content_type=\"text/plain\"\n ),\n name=\"subdomain_robots_txt\",\n ),\n path(\n \"evaluation/\",\n include(\"grandchallenge.evaluation.urls\", namespace=\"evaluation\"),\n ),\n path(\"teams/\", include(\"grandchallenge.teams.urls\", namespace=\"teams\")),\n path(\n \"participants/\",\n include(\"grandchallenge.participants.urls\", namespace=\"participants\"),\n ),\n path(\"admins/\", include(\"grandchallenge.admins.urls\", namespace=\"admins\")),\n path(\"update/\", ChallengeUpdate.as_view(), name=\"challenge-update\"),\n path(\"summernote/\", include(\"django_summernote.urls\")),\n path(\"\", include(\"grandchallenge.pages.urls\", namespace=\"pages\")),\n]\n\nif settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR:\n import debug_toolbar\n\n urlpatterns = [\n path(\"__debug__/\", include(debug_toolbar.urls))\n ] + urlpatterns\n", "path": "app/config/urls/challenge_subdomain.py"}]} | 954 | 98 |
gh_patches_debug_35282 | rasdani/github-patches | git_diff | davanstrien__flyswot-547 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Convert documentation to myst
</issue>
<code>
[start of noxfile.py]
1 """Nox sessions."""
2 import shutil
3 import sys
4 from pathlib import Path
5 from textwrap import dedent
6
7 import nox
8 from nox_poetry import Session
9 from nox_poetry import session
10
11
12 package = "flyswot"
13 python_versions = ["3.9", "3.8"]
14 nox.options.sessions = (
15 "pre-commit",
16 "safety",
17 "mypy",
18 "tests",
19 "xdoctest",
20 "docs-build",
21 )
22
23
24 def activate_virtualenv_in_precommit_hooks(session: Session) -> None:
25 """Activate virtualenv in hooks installed by pre-commit.
26
27 This function patches git hooks installed by pre-commit to activate the
28 session's virtual environment. This allows pre-commit to locate hooks in
29 that environment when invoked from git.
30
31 Args:
32 session: The Session object.
33 """
34 if session.bin is None:
35 return
36
37 virtualenv = session.env.get("VIRTUAL_ENV")
38 if virtualenv is None:
39 return
40
41 hookdir = Path(".git") / "hooks"
42 if not hookdir.is_dir():
43 return
44
45 for hook in hookdir.iterdir():
46 if hook.name.endswith(".sample") or not hook.is_file():
47 continue
48
49 text = hook.read_text()
50 bindir = repr(session.bin)[1:-1] # strip quotes
51 if not (
52 Path("A") == Path("a") and bindir.lower() in text.lower() or bindir in text
53 ):
54 continue
55
56 lines = text.splitlines()
57 if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
58 continue
59
60 header = dedent(
61 f"""\
62 import os
63 os.environ["VIRTUAL_ENV"] = {virtualenv!r}
64 os.environ["PATH"] = os.pathsep.join((
65 {session.bin!r},
66 os.environ.get("PATH", ""),
67 ))
68 """
69 )
70
71 lines.insert(1, header)
72 hook.write_text("\n".join(lines))
73
74
75 @session(name="pre-commit", python="3.9")
76 def precommit(session: Session) -> None:
77 """Lint using pre-commit."""
78 args = session.posargs or ["run", "--all-files", "--show-diff-on-failure"]
79 session.install(
80 "black",
81 "darglint",
82 "flake8",
83 "flake8-bandit",
84 "flake8-bugbear",
85 "flake8-docstrings",
86 "flake8-rst-docstrings",
87 "pep8-naming",
88 "pre-commit",
89 "pre-commit-hooks",
90 "reorder-python-imports",
91 )
92 session.run("pre-commit", *args)
93 if args and args[0] == "install":
94 activate_virtualenv_in_precommit_hooks(session)
95
96
97 @session(python="3.9")
98 def safety(session: Session) -> None:
99 """Scan dependencies for insecure packages."""
100 requirements = session.poetry.export_requirements()
101 session.install("safety")
102 session.run(
103 "safety",
104 "check",
105 "--full-report",
106 "-i",
107 "44716",
108 "-i",
109 "44717",
110 "-i",
111 "44715",
112 f"--file={requirements}",
113 )
114
115
116 @session(python=python_versions)
117 def mypy(session: Session) -> None:
118 """Type-check using mypy."""
119 args = session.posargs or ["src", "docs/conf.py"]
120 session.install(".")
121 session.install("mypy", "pytest")
122 session.run("mypy", *args)
123 if not session.posargs:
124 session.run("mypy", f"--python-executable={sys.executable}", "noxfile.py")
125
126
127 @session(python=python_versions)
128 def tests(session: Session) -> None:
129 """Run the test suite."""
130 session.install(".")
131 session.install(
132 "coverage[toml]",
133 "pytest",
134 "pygments",
135 "hypothesis",
136 "pytest-datafiles",
137 "onnxruntime",
138 )
139 try:
140 session.run("coverage", "run", "--parallel", "-m", "pytest", *session.posargs)
141 finally:
142 if session.interactive:
143 session.notify("coverage")
144
145
146 @session
147 def coverage(session: Session) -> None:
148 """Produce the coverage report."""
149 # Do not use session.posargs unless this is the only session.
150 nsessions = len(session._runner.manifest) # type: ignore[attr-defined]
151 has_args = session.posargs and nsessions == 1
152 args = session.posargs if has_args else ["report"]
153
154 session.install("coverage[toml]")
155
156 if not has_args and any(Path().glob(".coverage.*")):
157 session.run("coverage", "combine")
158
159 session.run("coverage", *args)
160
161
162 @session(python=python_versions)
163 def xdoctest(session: Session) -> None:
164 """Run examples with xdoctest."""
165 args = session.posargs or ["all"]
166 session.install(".")
167 session.install("xdoctest[colors]")
168 session.run("python", "-m", "xdoctest", package, *args)
169
170
171 @session(name="docs-build", python="3.8")
172 def docs_build(session: Session) -> None:
173 """Build the documentation."""
174 args = session.posargs or ["docs", "docs/_build"]
175 session.install(".")
176 session.install("sphinx", "sphinx-click", "sphinx-rtd-theme")
177
178 build_dir = Path("docs", "_build")
179 if build_dir.exists():
180 shutil.rmtree(build_dir)
181
182 session.run("sphinx-build", *args)
183
184
185 @session(python="3.8")
186 def docs(session: Session) -> None:
187 """Build and serve the documentation with live reloading on file changes."""
188 args = session.posargs or ["--open-browser", "docs", "docs/_build"]
189 session.install(".")
190 session.install("sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-rtd-theme")
191
192 build_dir = Path("docs", "_build")
193 if build_dir.exists():
194 shutil.rmtree(build_dir)
195
196 session.run("sphinx-autobuild", *args)
197
[end of noxfile.py]
[start of docs/conf.py]
1 """Sphinx configuration."""
2 from datetime import datetime
3
4
5 project = "flyswot"
6 author = "Daniel van Strien"
7 copyright = f"{datetime.now().year}, {author}"
8 extensions = [
9 "sphinx.ext.autodoc",
10 "sphinx.ext.napoleon",
11 "sphinx_click",
12 "sphinx_rtd_theme",
13 ]
14 autodoc_typehints = "description"
15 html_theme = "sphinx_rtd_theme"
16
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,15 +1,12 @@
"""Sphinx configuration."""
-from datetime import datetime
-
-
project = "flyswot"
author = "Daniel van Strien"
-copyright = f"{datetime.now().year}, {author}"
+copyright = "2021, Daniel van Strien"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx_click",
- "sphinx_rtd_theme",
+ "myst_parser",
]
autodoc_typehints = "description"
-html_theme = "sphinx_rtd_theme"
+html_theme = "furo"
diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -1,4 +1,5 @@
"""Nox sessions."""
+import os
import shutil
import sys
from pathlib import Path
@@ -168,12 +169,17 @@
session.run("python", "-m", "xdoctest", package, *args)
-@session(name="docs-build", python="3.8")
+@session(name="docs-build", python=python_versions[1])
def docs_build(session: Session) -> None:
"""Build the documentation."""
- args = session.posargs or ["docs", "docs/_build"]
session.install(".")
- session.install("sphinx", "sphinx-click", "sphinx-rtd-theme")
+ session.install("cogapp")
+ args = ["-r", "README.md"]
+ session.run("cog", *args)
+ args = session.posargs or ["docs", "docs/_build"]
+ if not session.posargs and "FORCE_COLOR" in os.environ:
+ args.insert(0, "--color")
+ session.install("sphinx", "sphinx-click", "furo", "myst-parser")
build_dir = Path("docs", "_build")
if build_dir.exists():
@@ -182,12 +188,12 @@
session.run("sphinx-build", *args)
-@session(python="3.8")
+@session(python=python_versions[0])
def docs(session: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = session.posargs or ["--open-browser", "docs", "docs/_build"]
session.install(".")
- session.install("sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-rtd-theme")
+ session.install("sphinx", "sphinx-autobuild", "sphinx-click", "furo", "myst-parser")
build_dir = Path("docs", "_build")
if build_dir.exists():
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -1,15 +1,12 @@\n \"\"\"Sphinx configuration.\"\"\"\n-from datetime import datetime\n-\n-\n project = \"flyswot\"\n author = \"Daniel van Strien\"\n-copyright = f\"{datetime.now().year}, {author}\"\n+copyright = \"2021, Daniel van Strien\"\n extensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_click\",\n- \"sphinx_rtd_theme\",\n+ \"myst_parser\",\n ]\n autodoc_typehints = \"description\"\n-html_theme = \"sphinx_rtd_theme\"\n+html_theme = \"furo\"\ndiff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -1,4 +1,5 @@\n \"\"\"Nox sessions.\"\"\"\n+import os\n import shutil\n import sys\n from pathlib import Path\n@@ -168,12 +169,17 @@\n session.run(\"python\", \"-m\", \"xdoctest\", package, *args)\n \n \n-@session(name=\"docs-build\", python=\"3.8\")\n+@session(name=\"docs-build\", python=python_versions[1])\n def docs_build(session: Session) -> None:\n \"\"\"Build the documentation.\"\"\"\n- args = session.posargs or [\"docs\", \"docs/_build\"]\n session.install(\".\")\n- session.install(\"sphinx\", \"sphinx-click\", \"sphinx-rtd-theme\")\n+ session.install(\"cogapp\")\n+ args = [\"-r\", \"README.md\"]\n+ session.run(\"cog\", *args)\n+ args = session.posargs or [\"docs\", \"docs/_build\"]\n+ if not session.posargs and \"FORCE_COLOR\" in os.environ:\n+ args.insert(0, \"--color\")\n+ session.install(\"sphinx\", \"sphinx-click\", \"furo\", \"myst-parser\")\n \n build_dir = Path(\"docs\", \"_build\")\n if build_dir.exists():\n@@ -182,12 +188,12 @@\n session.run(\"sphinx-build\", *args)\n \n \n-@session(python=\"3.8\")\n+@session(python=python_versions[0])\n def docs(session: Session) -> None:\n \"\"\"Build and serve the documentation with live reloading on file changes.\"\"\"\n args = session.posargs or [\"--open-browser\", \"docs\", \"docs/_build\"]\n session.install(\".\")\n- session.install(\"sphinx\", \"sphinx-autobuild\", \"sphinx-click\", \"sphinx-rtd-theme\")\n+ session.install(\"sphinx\", \"sphinx-autobuild\", \"sphinx-click\", \"furo\", \"myst-parser\")\n \n build_dir = Path(\"docs\", \"_build\")\n if build_dir.exists():\n", "issue": "Convert documentation to myst\n\n", "before_files": [{"content": "\"\"\"Nox sessions.\"\"\"\nimport shutil\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport nox\nfrom nox_poetry import Session\nfrom nox_poetry import session\n\n\npackage = \"flyswot\"\npython_versions = [\"3.9\", \"3.8\"]\nnox.options.sessions = (\n \"pre-commit\",\n \"safety\",\n \"mypy\",\n \"tests\",\n \"xdoctest\",\n \"docs-build\",\n)\n\n\ndef activate_virtualenv_in_precommit_hooks(session: Session) -> None:\n \"\"\"Activate virtualenv in hooks installed by pre-commit.\n\n This function patches git hooks installed by pre-commit to activate the\n session's virtual environment. This allows pre-commit to locate hooks in\n that environment when invoked from git.\n\n Args:\n session: The Session object.\n \"\"\"\n if session.bin is None:\n return\n\n virtualenv = session.env.get(\"VIRTUAL_ENV\")\n if virtualenv is None:\n return\n\n hookdir = Path(\".git\") / \"hooks\"\n if not hookdir.is_dir():\n return\n\n for hook in hookdir.iterdir():\n if hook.name.endswith(\".sample\") or not hook.is_file():\n continue\n\n text = hook.read_text()\n bindir = repr(session.bin)[1:-1] # strip quotes\n if not (\n Path(\"A\") == Path(\"a\") and bindir.lower() in text.lower() or bindir in text\n ):\n continue\n\n lines = text.splitlines()\n if not (lines[0].startswith(\"#!\") and \"python\" in lines[0].lower()):\n continue\n\n header = dedent(\n f\"\"\"\\\n import os\n os.environ[\"VIRTUAL_ENV\"] = {virtualenv!r}\n os.environ[\"PATH\"] = os.pathsep.join((\n {session.bin!r},\n os.environ.get(\"PATH\", \"\"),\n ))\n \"\"\"\n )\n\n lines.insert(1, header)\n hook.write_text(\"\\n\".join(lines))\n\n\n@session(name=\"pre-commit\", python=\"3.9\")\ndef precommit(session: Session) -> None:\n \"\"\"Lint using pre-commit.\"\"\"\n args = session.posargs or [\"run\", \"--all-files\", \"--show-diff-on-failure\"]\n session.install(\n \"black\",\n \"darglint\",\n \"flake8\",\n \"flake8-bandit\",\n \"flake8-bugbear\",\n \"flake8-docstrings\",\n \"flake8-rst-docstrings\",\n \"pep8-naming\",\n \"pre-commit\",\n \"pre-commit-hooks\",\n \"reorder-python-imports\",\n )\n session.run(\"pre-commit\", *args)\n if args and args[0] == \"install\":\n activate_virtualenv_in_precommit_hooks(session)\n\n\n@session(python=\"3.9\")\ndef safety(session: Session) -> None:\n \"\"\"Scan dependencies for insecure packages.\"\"\"\n requirements = session.poetry.export_requirements()\n session.install(\"safety\")\n session.run(\n \"safety\",\n \"check\",\n \"--full-report\",\n \"-i\",\n \"44716\",\n \"-i\",\n \"44717\",\n \"-i\",\n \"44715\",\n f\"--file={requirements}\",\n )\n\n\n@session(python=python_versions)\ndef mypy(session: Session) -> None:\n \"\"\"Type-check using mypy.\"\"\"\n args = session.posargs or [\"src\", \"docs/conf.py\"]\n session.install(\".\")\n session.install(\"mypy\", \"pytest\")\n session.run(\"mypy\", *args)\n if not session.posargs:\n session.run(\"mypy\", f\"--python-executable={sys.executable}\", \"noxfile.py\")\n\n\n@session(python=python_versions)\ndef tests(session: Session) -> None:\n \"\"\"Run the test suite.\"\"\"\n session.install(\".\")\n session.install(\n \"coverage[toml]\",\n \"pytest\",\n \"pygments\",\n \"hypothesis\",\n \"pytest-datafiles\",\n \"onnxruntime\",\n )\n try:\n session.run(\"coverage\", \"run\", \"--parallel\", \"-m\", \"pytest\", *session.posargs)\n finally:\n if session.interactive:\n session.notify(\"coverage\")\n\n\n@session\ndef coverage(session: Session) -> None:\n \"\"\"Produce the coverage report.\"\"\"\n # Do not use session.posargs unless this is the only session.\n nsessions = len(session._runner.manifest) # type: ignore[attr-defined]\n has_args = session.posargs and nsessions == 1\n args = session.posargs if has_args else [\"report\"]\n\n session.install(\"coverage[toml]\")\n\n if not has_args and any(Path().glob(\".coverage.*\")):\n session.run(\"coverage\", \"combine\")\n\n session.run(\"coverage\", *args)\n\n\n@session(python=python_versions)\ndef xdoctest(session: Session) -> None:\n \"\"\"Run examples with xdoctest.\"\"\"\n args = session.posargs or [\"all\"]\n session.install(\".\")\n session.install(\"xdoctest[colors]\")\n session.run(\"python\", \"-m\", \"xdoctest\", package, *args)\n\n\n@session(name=\"docs-build\", python=\"3.8\")\ndef docs_build(session: Session) -> None:\n \"\"\"Build the documentation.\"\"\"\n args = session.posargs or [\"docs\", \"docs/_build\"]\n session.install(\".\")\n session.install(\"sphinx\", \"sphinx-click\", \"sphinx-rtd-theme\")\n\n build_dir = Path(\"docs\", \"_build\")\n if build_dir.exists():\n shutil.rmtree(build_dir)\n\n session.run(\"sphinx-build\", *args)\n\n\n@session(python=\"3.8\")\ndef docs(session: Session) -> None:\n \"\"\"Build and serve the documentation with live reloading on file changes.\"\"\"\n args = session.posargs or [\"--open-browser\", \"docs\", \"docs/_build\"]\n session.install(\".\")\n session.install(\"sphinx\", \"sphinx-autobuild\", \"sphinx-click\", \"sphinx-rtd-theme\")\n\n build_dir = Path(\"docs\", \"_build\")\n if build_dir.exists():\n shutil.rmtree(build_dir)\n\n session.run(\"sphinx-autobuild\", *args)\n", "path": "noxfile.py"}, {"content": "\"\"\"Sphinx configuration.\"\"\"\nfrom datetime import datetime\n\n\nproject = \"flyswot\"\nauthor = \"Daniel van Strien\"\ncopyright = f\"{datetime.now().year}, {author}\"\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx_click\",\n \"sphinx_rtd_theme\",\n]\nautodoc_typehints = \"description\"\nhtml_theme = \"sphinx_rtd_theme\"\n", "path": "docs/conf.py"}]} | 2,502 | 626 |
gh_patches_debug_22478 | rasdani/github-patches | git_diff | python-pillow__Pillow-3588 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No n_frames, or bad value for n_frames
When I feed the test file flower2.jpg into this code (from #1630)
```python
im = Image.open( fn )
imgcnt = im.n_frames
colors = im.getcolors( im.width * im.height )
if args.hist:
for cnt, col in colors:
allcolors[ col ] += cnt
for iz in range( 1, imgcnt ):
im = Image.open( fn ) # does getcolors implicitly close????
# without the open, get "seek of closed
# file" error on line below.
im.seek( iz )
colors = im.getcolors( im.width * im.height )
for cnt, col in colors:
allcolors[ col ] += cnt
```
I get "AttributeError: n_frames"
But other .jpg files do not get that error... this one: http://nevcal.com/temporary/20151110-105826gl.jpg has no problem with the attribute error on that line, but it gets a value of 2, apparently handles the seek OK, but dies in the second call to getcolors, with "OSError: image file is truncated (0 bytes not processed)".
</issue>
<code>
[start of src/PIL/MpoImagePlugin.py]
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # MPO file handling
6 #
7 # See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the
8 # Camera & Imaging Products Association)
9 #
10 # The multi-picture object combines multiple JPEG images (with a modified EXIF
11 # data format) into a single file. While it can theoretically be used much like
12 # a GIF animation, it is commonly used to represent 3D photographs and is (as
13 # of this writing) the most commonly used format by 3D cameras.
14 #
15 # History:
16 # 2014-03-13 Feneric Created
17 #
18 # See the README file for information on usage and redistribution.
19 #
20
21 from . import Image, JpegImagePlugin
22
23 # __version__ is deprecated and will be removed in a future version. Use
24 # PIL.__version__ instead.
25 __version__ = "0.1"
26
27
28 def _accept(prefix):
29 return JpegImagePlugin._accept(prefix)
30
31
32 def _save(im, fp, filename):
33 # Note that we can only save the current frame at present
34 return JpegImagePlugin._save(im, fp, filename)
35
36
37 ##
38 # Image plugin for MPO images.
39
40 class MpoImageFile(JpegImagePlugin.JpegImageFile):
41
42 format = "MPO"
43 format_description = "MPO (CIPA DC-007)"
44 _close_exclusive_fp_after_loading = False
45
46 def _open(self):
47 self.fp.seek(0) # prep the fp in order to pass the JPEG test
48 JpegImagePlugin.JpegImageFile._open(self)
49 self.mpinfo = self._getmp()
50 self.__framecount = self.mpinfo[0xB001]
51 self.__mpoffsets = [mpent['DataOffset'] + self.info['mpoffset']
52 for mpent in self.mpinfo[0xB002]]
53 self.__mpoffsets[0] = 0
54 # Note that the following assertion will only be invalid if something
55 # gets broken within JpegImagePlugin.
56 assert self.__framecount == len(self.__mpoffsets)
57 del self.info['mpoffset'] # no longer needed
58 self.__fp = self.fp # FIXME: hack
59 self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame
60 self.__frame = 0
61 self.offset = 0
62 # for now we can only handle reading and individual frame extraction
63 self.readonly = 1
64
65 def load_seek(self, pos):
66 self.__fp.seek(pos)
67
68 @property
69 def n_frames(self):
70 return self.__framecount
71
72 @property
73 def is_animated(self):
74 return self.__framecount > 1
75
76 def seek(self, frame):
77 if not self._seek_check(frame):
78 return
79 self.fp = self.__fp
80 self.offset = self.__mpoffsets[frame]
81 self.tile = [
82 ("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))
83 ]
84 self.__frame = frame
85
86 def tell(self):
87 return self.__frame
88
89 def _close__fp(self):
90 try:
91 if self.__fp != self.fp:
92 self.__fp.close()
93 except AttributeError:
94 pass
95 finally:
96 self.__fp = None
97
98
99 # ---------------------------------------------------------------------
100 # Registry stuff
101
102 # Note that since MPO shares a factory with JPEG, we do not need to do a
103 # separate registration for it here.
104 # Image.register_open(MpoImageFile.format,
105 # JpegImagePlugin.jpeg_factory, _accept)
106 Image.register_save(MpoImageFile.format, _save)
107
108 Image.register_extension(MpoImageFile.format, ".mpo")
109
110 Image.register_mime(MpoImageFile.format, "image/mpo")
111
[end of src/PIL/MpoImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/PIL/MpoImagePlugin.py b/src/PIL/MpoImagePlugin.py
--- a/src/PIL/MpoImagePlugin.py
+++ b/src/PIL/MpoImagePlugin.py
@@ -18,7 +18,8 @@
# See the README file for information on usage and redistribution.
#
-from . import Image, JpegImagePlugin
+from . import Image, ImageFile, JpegImagePlugin
+from ._binary import i16be as i16
# __version__ is deprecated and will be removed in a future version. Use
# PIL.__version__ instead.
@@ -78,6 +79,20 @@
return
self.fp = self.__fp
self.offset = self.__mpoffsets[frame]
+
+ self.fp.seek(self.offset + 2) # skip SOI marker
+ if "parsed_exif" in self.info:
+ del self.info["parsed_exif"]
+ if i16(self.fp.read(2)) == 0xFFE1: # APP1
+ n = i16(self.fp.read(2))-2
+ self.info["exif"] = ImageFile._safe_read(self.fp, n)
+
+ exif = self._getexif()
+ if 40962 in exif and 40963 in exif:
+ self._size = (exif[40962], exif[40963])
+ elif "exif" in self.info:
+ del self.info["exif"]
+
self.tile = [
("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))
]
| {"golden_diff": "diff --git a/src/PIL/MpoImagePlugin.py b/src/PIL/MpoImagePlugin.py\n--- a/src/PIL/MpoImagePlugin.py\n+++ b/src/PIL/MpoImagePlugin.py\n@@ -18,7 +18,8 @@\n # See the README file for information on usage and redistribution.\n #\n \n-from . import Image, JpegImagePlugin\n+from . import Image, ImageFile, JpegImagePlugin\n+from ._binary import i16be as i16\n \n # __version__ is deprecated and will be removed in a future version. Use\n # PIL.__version__ instead.\n@@ -78,6 +79,20 @@\n return\n self.fp = self.__fp\n self.offset = self.__mpoffsets[frame]\n+\n+ self.fp.seek(self.offset + 2) # skip SOI marker\n+ if \"parsed_exif\" in self.info:\n+ del self.info[\"parsed_exif\"]\n+ if i16(self.fp.read(2)) == 0xFFE1: # APP1\n+ n = i16(self.fp.read(2))-2\n+ self.info[\"exif\"] = ImageFile._safe_read(self.fp, n)\n+\n+ exif = self._getexif()\n+ if 40962 in exif and 40963 in exif:\n+ self._size = (exif[40962], exif[40963])\n+ elif \"exif\" in self.info:\n+ del self.info[\"exif\"]\n+\n self.tile = [\n (\"jpeg\", (0, 0) + self.size, self.offset, (self.mode, \"\"))\n ]\n", "issue": "No n_frames, or bad value for n_frames\nWhen I feed the test file flower2.jpg into this code (from #1630)\r\n\r\n```python\r\nim = Image.open( fn )\r\nimgcnt = im.n_frames\r\ncolors = im.getcolors( im.width * im.height )\r\nif args.hist:\r\n for cnt, col in colors:\r\n allcolors[ col ] += cnt\r\n for iz in range( 1, imgcnt ):\r\n im = Image.open( fn ) # does getcolors implicitly close????\r\n # without the open, get \"seek of closed\r\n # file\" error on line below.\r\n im.seek( iz )\r\n colors = im.getcolors( im.width * im.height )\r\n for cnt, col in colors:\r\n allcolors[ col ] += cnt\r\n```\r\n\r\nI get \"AttributeError: n_frames\"\r\n\r\nBut other .jpg files do not get that error... this one: http://nevcal.com/temporary/20151110-105826gl.jpg has no problem with the attribute error on that line, but it gets a value of 2, apparently handles the seek OK, but dies in the second call to getcolors, with \"OSError: image file is truncated (0 bytes not processed)\".\r\n\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# MPO file handling\n#\n# See \"Multi-Picture Format\" (CIPA DC-007-Translation 2009, Standard of the\n# Camera & Imaging Products Association)\n#\n# The multi-picture object combines multiple JPEG images (with a modified EXIF\n# data format) into a single file. While it can theoretically be used much like\n# a GIF animation, it is commonly used to represent 3D photographs and is (as\n# of this writing) the most commonly used format by 3D cameras.\n#\n# History:\n# 2014-03-13 Feneric Created\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom . import Image, JpegImagePlugin\n\n# __version__ is deprecated and will be removed in a future version. Use\n# PIL.__version__ instead.\n__version__ = \"0.1\"\n\n\ndef _accept(prefix):\n return JpegImagePlugin._accept(prefix)\n\n\ndef _save(im, fp, filename):\n # Note that we can only save the current frame at present\n return JpegImagePlugin._save(im, fp, filename)\n\n\n##\n# Image plugin for MPO images.\n\nclass MpoImageFile(JpegImagePlugin.JpegImageFile):\n\n format = \"MPO\"\n format_description = \"MPO (CIPA DC-007)\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n self.fp.seek(0) # prep the fp in order to pass the JPEG test\n JpegImagePlugin.JpegImageFile._open(self)\n self.mpinfo = self._getmp()\n self.__framecount = self.mpinfo[0xB001]\n self.__mpoffsets = [mpent['DataOffset'] + self.info['mpoffset']\n for mpent in self.mpinfo[0xB002]]\n self.__mpoffsets[0] = 0\n # Note that the following assertion will only be invalid if something\n # gets broken within JpegImagePlugin.\n assert self.__framecount == len(self.__mpoffsets)\n del self.info['mpoffset'] # no longer needed\n self.__fp = self.fp # FIXME: hack\n self.__fp.seek(self.__mpoffsets[0]) # get ready to read first frame\n self.__frame = 0\n self.offset = 0\n # for now we can only handle reading and individual frame extraction\n self.readonly = 1\n\n def load_seek(self, pos):\n self.__fp.seek(pos)\n\n @property\n def n_frames(self):\n return self.__framecount\n\n @property\n def is_animated(self):\n return self.__framecount > 1\n\n def seek(self, frame):\n if not self._seek_check(frame):\n return\n self.fp = self.__fp\n self.offset = self.__mpoffsets[frame]\n self.tile = [\n (\"jpeg\", (0, 0) + self.size, self.offset, (self.mode, \"\"))\n ]\n self.__frame = frame\n\n def tell(self):\n return self.__frame\n\n def _close__fp(self):\n try:\n if self.__fp != self.fp:\n self.__fp.close()\n except AttributeError:\n pass\n finally:\n self.__fp = None\n\n\n# ---------------------------------------------------------------------\n# Registry stuff\n\n# Note that since MPO shares a factory with JPEG, we do not need to do a\n# separate registration for it here.\n# Image.register_open(MpoImageFile.format,\n# JpegImagePlugin.jpeg_factory, _accept)\nImage.register_save(MpoImageFile.format, _save)\n\nImage.register_extension(MpoImageFile.format, \".mpo\")\n\nImage.register_mime(MpoImageFile.format, \"image/mpo\")\n", "path": "src/PIL/MpoImagePlugin.py"}]} | 1,886 | 376 |
gh_patches_debug_14630 | rasdani/github-patches | git_diff | vacanza__python-holidays-469 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2 april 2021 in spain
hi,
dt.date(2021, 4, 2) in holidays.ES() don't work
</issue>
<code>
[start of holidays/countries/spain.py]
1 # -*- coding: utf-8 -*-
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Author: ryanss <[email protected]> (c) 2014-2017
10 # dr-prodigy <[email protected]> (c) 2017-2021
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 from datetime import date
15
16 from dateutil.easter import easter
17 from dateutil.relativedelta import relativedelta as rd, TH, FR, MO
18 from holidays.constants import (
19 JAN,
20 FEB,
21 MAR,
22 APR,
23 MAY,
24 JUN,
25 JUL,
26 AUG,
27 SEP,
28 OCT,
29 NOV,
30 DEC,
31 )
32 from holidays.constants import SUN
33 from holidays.holiday_base import HolidayBase
34
35
36 class Spain(HolidayBase):
37 PROVINCES = [
38 "AN",
39 "AR",
40 "AS",
41 "CB",
42 "CM",
43 "CL",
44 "CT",
45 "VC",
46 "EX",
47 "GA",
48 "IB",
49 "CN",
50 "MD",
51 "MC",
52 "ML",
53 "NC",
54 "PV",
55 "RI",
56 ]
57
58 def __init__(self, **kwargs):
59 self.country = "ES"
60 self.prov = kwargs.pop("prov", kwargs.pop("state", ""))
61 HolidayBase.__init__(self, **kwargs)
62
63 def _is_observed(self, date_holiday, name_holiday):
64 if self.observed and date_holiday.weekday() == SUN:
65 self[date_holiday + rd(days=+1)] = name_holiday + " (Trasladado)"
66 else:
67 self[date_holiday] = name_holiday
68
69 def _populate(self, year):
70 self._is_observed(date(year, JAN, 1), "AΓ±o nuevo")
71 self._is_observed(date(year, JAN, 6), "EpifanΓa del SeΓ±or")
72
73 if (
74 year < 2015
75 and self.prov
76 and self.prov
77 in [
78 "AR",
79 "CL",
80 "CM",
81 "EX",
82 "GA",
83 "MD",
84 "ML",
85 "MC",
86 "NC",
87 "PV",
88 "VC",
89 ]
90 ):
91 self._is_observed(date(year, MAR, 19), "San JosΓ©")
92 elif (
93 year == 2015
94 and self.prov
95 and self.prov in ["CM", "MD", "ML", "MC", "NC", "PV", "VC"]
96 ):
97 self._is_observed(date(year, MAR, 19), "San JosΓ©")
98 elif (
99 year == 2016
100 and self.prov
101 and self.prov in ["ML", "MC", "PV", "VC"]
102 ):
103 self._is_observed(date(year, MAR, 19), "San JosΓ©")
104 elif year == 2017 and self.prov and self.prov in ["PV"]:
105 self._is_observed(date(year, MAR, 19), "San JosΓ©")
106 elif (
107 2018 <= year <= 2019
108 and self.prov
109 and self.prov in ["GA", "MC", "NC", "PV", "VC"]
110 ):
111 self._is_observed(date(year, MAR, 19), "San JosΓ©")
112 elif (
113 2020 <= year <= 2025
114 and self.prov
115 and self.prov in ["CM", "GA", "MC", "NC", "PV", "VC"]
116 ):
117 self._is_observed(date(year, MAR, 19), "San JosΓ©")
118 if self.prov and self.prov not in ["CT", "VC"]:
119 self[easter(year) + rd(weeks=-1, weekday=TH)] = "Jueves Santo"
120 self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
121 if self.prov and self.prov in ["CT", "PV", "NC", "VC", "IB", "CM"]:
122 self[easter(year) + rd(weekday=MO)] = "Lunes de Pascua"
123 self._is_observed(date(year, MAY, 1), "DΓa del Trabajador")
124 if self.prov and self.prov in ["CT", "GA", "VC"]:
125 self._is_observed(date(year, JUN, 24), "San Juan")
126 self._is_observed(date(year, AUG, 15), "AsunciΓ³n de la Virgen")
127 self._is_observed(date(year, OCT, 12), "DΓa de la Hispanidad")
128 self._is_observed(date(year, NOV, 1), "Todos los Santos")
129 self._is_observed(
130 date(year, DEC, 6), "DΓa de la ConstituciΓ³n " "EspaΓ±ola"
131 )
132 self._is_observed(date(year, DEC, 8), "La Inmaculada ConcepciΓ³n")
133 self._is_observed(date(year, DEC, 25), "Navidad")
134 if self.prov and self.prov in ["CT", "IB"]:
135 self._is_observed(date(year, DEC, 26), "San Esteban")
136 # Provinces festive day
137 if self.prov:
138 if self.prov == "AN":
139 self._is_observed(date(year, FEB, 28), "DΓa de Andalucia")
140 elif self.prov == "AR":
141 self._is_observed(date(year, APR, 23), "DΓa de San Jorge")
142 elif self.prov == "AS":
143 self._is_observed(date(year, SEP, 8), "DΓa de Asturias")
144 elif self.prov == "CB":
145 self._is_observed(
146 date(year, JUL, 28),
147 "DΓa de las Instituci" "ones de Cantabria",
148 )
149 elif self.prov == "CM":
150 self._is_observed(
151 date(year, MAY, 31), "DΓa de Castilla " "La Mancha"
152 )
153 elif self.prov == "CL":
154 self._is_observed(
155 date(year, APR, 23), "DΓa de Castilla y " "Leon"
156 )
157 elif self.prov == "CT":
158 self._is_observed(
159 date(year, SEP, 11), "DΓa Nacional de " "Catalunya"
160 )
161 elif self.prov == "VC":
162 self._is_observed(
163 date(year, OCT, 9), "DΓa de la Comunidad " "Valenciana"
164 )
165 elif self.prov == "EX":
166 self._is_observed(date(year, SEP, 8), "DΓa de Extremadura")
167 elif self.prov == "GA":
168 self._is_observed(
169 date(year, JUL, 25), "DΓa Nacional de " "Galicia"
170 )
171 elif self.prov == "IB":
172 self._is_observed(
173 date(year, MAR, 1), "DΓa de las Islas " "Baleares"
174 )
175 elif self.prov == "CN":
176 self._is_observed(date(year, MAY, 30), "DΓa de Canarias")
177 elif self.prov == "MD":
178 self._is_observed(
179 date(year, MAY, 2), "DΓa de Comunidad de " "Madrid"
180 )
181 elif self.prov == "MC":
182 self._is_observed(
183 date(year, JUN, 9), "DΓa de la RegiΓ³n de " "Murcia"
184 )
185 elif self.prov == "NC":
186 self._is_observed(date(year, SEP, 27), "DΓa de Navarra")
187 elif self.prov == "PV":
188 self._is_observed(date(year, OCT, 25), "DΓa del PΓ‘is Vasco")
189 elif self.prov == "RI":
190 self._is_observed(date(year, JUN, 9), "DΓa de La Rioja")
191
192
193 class ES(Spain):
194 pass
195
196
197 class ESP(Spain):
198 pass
199
[end of holidays/countries/spain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/holidays/countries/spain.py b/holidays/countries/spain.py
--- a/holidays/countries/spain.py
+++ b/holidays/countries/spain.py
@@ -117,7 +117,7 @@
self._is_observed(date(year, MAR, 19), "San JosΓ©")
if self.prov and self.prov not in ["CT", "VC"]:
self[easter(year) + rd(weeks=-1, weekday=TH)] = "Jueves Santo"
- self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
+ self[easter(year) + rd(weeks=-1, weekday=FR)] = "Viernes Santo"
if self.prov and self.prov in ["CT", "PV", "NC", "VC", "IB", "CM"]:
self[easter(year) + rd(weekday=MO)] = "Lunes de Pascua"
self._is_observed(date(year, MAY, 1), "DΓa del Trabajador")
| {"golden_diff": "diff --git a/holidays/countries/spain.py b/holidays/countries/spain.py\n--- a/holidays/countries/spain.py\n+++ b/holidays/countries/spain.py\n@@ -117,7 +117,7 @@\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n if self.prov and self.prov not in [\"CT\", \"VC\"]:\n self[easter(year) + rd(weeks=-1, weekday=TH)] = \"Jueves Santo\"\n- self[easter(year) + rd(weeks=-1, weekday=FR)] = \"Viernes Santo\"\n+ self[easter(year) + rd(weeks=-1, weekday=FR)] = \"Viernes Santo\"\n if self.prov and self.prov in [\"CT\", \"PV\", \"NC\", \"VC\", \"IB\", \"CM\"]:\n self[easter(year) + rd(weekday=MO)] = \"Lunes de Pascua\"\n self._is_observed(date(year, MAY, 1), \"D\u00eda del Trabajador\")\n", "issue": "2 april 2021 in spain\nhi, \r\ndt.date(2021, 4, 2) in holidays.ES() don't work\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2021\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd, TH, FR, MO\nfrom holidays.constants import (\n JAN,\n FEB,\n MAR,\n APR,\n MAY,\n JUN,\n JUL,\n AUG,\n SEP,\n OCT,\n NOV,\n DEC,\n)\nfrom holidays.constants import SUN\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Spain(HolidayBase):\n PROVINCES = [\n \"AN\",\n \"AR\",\n \"AS\",\n \"CB\",\n \"CM\",\n \"CL\",\n \"CT\",\n \"VC\",\n \"EX\",\n \"GA\",\n \"IB\",\n \"CN\",\n \"MD\",\n \"MC\",\n \"ML\",\n \"NC\",\n \"PV\",\n \"RI\",\n ]\n\n def __init__(self, **kwargs):\n self.country = \"ES\"\n self.prov = kwargs.pop(\"prov\", kwargs.pop(\"state\", \"\"))\n HolidayBase.__init__(self, **kwargs)\n\n def _is_observed(self, date_holiday, name_holiday):\n if self.observed and date_holiday.weekday() == SUN:\n self[date_holiday + rd(days=+1)] = name_holiday + \" (Trasladado)\"\n else:\n self[date_holiday] = name_holiday\n\n def _populate(self, year):\n self._is_observed(date(year, JAN, 1), \"A\u00f1o nuevo\")\n self._is_observed(date(year, JAN, 6), \"Epifan\u00eda del Se\u00f1or\")\n\n if (\n year < 2015\n and self.prov\n and self.prov\n in [\n \"AR\",\n \"CL\",\n \"CM\",\n \"EX\",\n \"GA\",\n \"MD\",\n \"ML\",\n \"MC\",\n \"NC\",\n \"PV\",\n \"VC\",\n ]\n ):\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n elif (\n year == 2015\n and self.prov\n and self.prov in [\"CM\", \"MD\", \"ML\", \"MC\", \"NC\", \"PV\", \"VC\"]\n ):\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n elif (\n year == 2016\n and self.prov\n and self.prov in [\"ML\", \"MC\", \"PV\", \"VC\"]\n ):\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n elif year == 2017 and self.prov and self.prov in [\"PV\"]:\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n elif (\n 2018 <= year <= 2019\n and self.prov\n and self.prov in [\"GA\", \"MC\", \"NC\", \"PV\", \"VC\"]\n ):\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n elif (\n 2020 <= year <= 2025\n and self.prov\n and self.prov in [\"CM\", \"GA\", \"MC\", \"NC\", \"PV\", \"VC\"]\n ):\n self._is_observed(date(year, MAR, 19), \"San Jos\u00e9\")\n if self.prov and self.prov not in [\"CT\", \"VC\"]:\n self[easter(year) + rd(weeks=-1, weekday=TH)] = \"Jueves Santo\"\n self[easter(year) + rd(weeks=-1, weekday=FR)] = \"Viernes Santo\"\n if self.prov and self.prov in [\"CT\", \"PV\", \"NC\", \"VC\", \"IB\", \"CM\"]:\n self[easter(year) + rd(weekday=MO)] = \"Lunes de Pascua\"\n self._is_observed(date(year, MAY, 1), \"D\u00eda del Trabajador\")\n if self.prov and self.prov in [\"CT\", \"GA\", \"VC\"]:\n self._is_observed(date(year, JUN, 24), \"San Juan\")\n self._is_observed(date(year, AUG, 15), \"Asunci\u00f3n de la Virgen\")\n self._is_observed(date(year, OCT, 12), \"D\u00eda de la Hispanidad\")\n self._is_observed(date(year, NOV, 1), \"Todos los Santos\")\n self._is_observed(\n date(year, DEC, 6), \"D\u00eda de la Constituci\u00f3n \" \"Espa\u00f1ola\"\n )\n self._is_observed(date(year, DEC, 8), \"La Inmaculada Concepci\u00f3n\")\n self._is_observed(date(year, DEC, 25), \"Navidad\")\n if self.prov and self.prov in [\"CT\", \"IB\"]:\n self._is_observed(date(year, DEC, 26), \"San Esteban\")\n # Provinces festive day\n if self.prov:\n if self.prov == \"AN\":\n self._is_observed(date(year, FEB, 28), \"D\u00eda de Andalucia\")\n elif self.prov == \"AR\":\n self._is_observed(date(year, APR, 23), \"D\u00eda de San Jorge\")\n elif self.prov == \"AS\":\n self._is_observed(date(year, SEP, 8), \"D\u00eda de Asturias\")\n elif self.prov == \"CB\":\n self._is_observed(\n date(year, JUL, 28),\n \"D\u00eda de las Instituci\" \"ones de Cantabria\",\n )\n elif self.prov == \"CM\":\n self._is_observed(\n date(year, MAY, 31), \"D\u00eda de Castilla \" \"La Mancha\"\n )\n elif self.prov == \"CL\":\n self._is_observed(\n date(year, APR, 23), \"D\u00eda de Castilla y \" \"Leon\"\n )\n elif self.prov == \"CT\":\n self._is_observed(\n date(year, SEP, 11), \"D\u00eda Nacional de \" \"Catalunya\"\n )\n elif self.prov == \"VC\":\n self._is_observed(\n date(year, OCT, 9), \"D\u00eda de la Comunidad \" \"Valenciana\"\n )\n elif self.prov == \"EX\":\n self._is_observed(date(year, SEP, 8), \"D\u00eda de Extremadura\")\n elif self.prov == \"GA\":\n self._is_observed(\n date(year, JUL, 25), \"D\u00eda Nacional de \" \"Galicia\"\n )\n elif self.prov == \"IB\":\n self._is_observed(\n date(year, MAR, 1), \"D\u00eda de las Islas \" \"Baleares\"\n )\n elif self.prov == \"CN\":\n self._is_observed(date(year, MAY, 30), \"D\u00eda de Canarias\")\n elif self.prov == \"MD\":\n self._is_observed(\n date(year, MAY, 2), \"D\u00eda de Comunidad de \" \"Madrid\"\n )\n elif self.prov == \"MC\":\n self._is_observed(\n date(year, JUN, 9), \"D\u00eda de la Regi\u00f3n de \" \"Murcia\"\n )\n elif self.prov == \"NC\":\n self._is_observed(date(year, SEP, 27), \"D\u00eda de Navarra\")\n elif self.prov == \"PV\":\n self._is_observed(date(year, OCT, 25), \"D\u00eda del P\u00e1is Vasco\")\n elif self.prov == \"RI\":\n self._is_observed(date(year, JUN, 9), \"D\u00eda de La Rioja\")\n\n\nclass ES(Spain):\n pass\n\n\nclass ESP(Spain):\n pass\n", "path": "holidays/countries/spain.py"}]} | 2,964 | 234 |
gh_patches_debug_19004 | rasdani/github-patches | git_diff | ray-project__ray-4043 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tune] PBT (Memory Checkpointing) using TF-Saver is broken
Another issue on the same topic is that [TensorFlow examples](https://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py) does not work correctly for any training scheduler which requires checkpointing. The issue is related (as far as my understanding goes) to the fact that after running trainable.restore(...) ray deletes the temporary directory with the TensorFlow checkpoint. The TensorFlow method self.saver.restore(self.sess, path) is just a declaration of restoring process, and it does not restore model in the place where declared. The restoration of the model variables happens when session.run(...) is performed, and this is the moment when variables are loaded from saved checkpoint to the tf.Graph(). Since checkpoint directory is deleted (in trainable.restore(...) ), the actual process fails.
Why ray.tune delete checkpoint directory in trainable.restore(...)?
_Originally posted by @agniszczotka in https://github.com/ray-project/ray/issues/2856#issuecomment-459011993_
[tune] Checkpointing with tensorflow no longer works
<!--
General questions should be asked on the mailing list [email protected].
Questions about how to use Ray should be asked on
[StackOverflow](https://stackoverflow.com/questions/tagged/ray).
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.6.2
- **Python version**: 3.6.7
- **Exact command to reproduce**:
https://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
Saving and restoring only works with single files in the newest version for ray. Tensorflow stores multiple files during checkpointing. Thus, the tensorflow example for storing and saving is broken.
```
def _save(self, checkpoint_dir):
return self.saver.save(
self.sess, checkpoint_dir + "/save", global_step=self.iterations)
def _restore(self, path):
return self.saver.restore(self.sess, path)
```
https://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
ValueError: The returned checkpoint path does not exist: ray_results/2019-02-06_15-56-40yekao5r5/checkpoint_0/save
</issue>
<code>
[start of python/ray/tune/examples/tune_mnist_ray_hyperband.py]
1 #!/usr/bin/env python
2 #
3 # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 # ==============================================================================
17 """A deep MNIST classifier using convolutional layers.
18 See extensive documentation at
19 https://www.tensorflow.org/get_started/mnist/pros
20 """
21 # Disable linter warnings to maintain consistency with tutorial.
22 # pylint: disable=invalid-name
23 # pylint: disable=g-bad-import-order
24
25 from __future__ import absolute_import
26 from __future__ import division
27 from __future__ import print_function
28
29 import argparse
30 import time
31
32 import ray
33 from ray.tune import grid_search, run_experiments, register_trainable, \
34 Trainable, sample_from
35 from ray.tune.schedulers import HyperBandScheduler
36 from tensorflow.examples.tutorials.mnist import input_data
37
38 import tensorflow as tf
39 import numpy as np
40
41 activation_fn = None # e.g. tf.nn.relu
42
43
44 def setupCNN(x):
45 """setupCNN builds the graph for a deep net for classifying digits.
46 Args:
47 x: an input tensor with the dimensions (N_examples, 784), where 784 is
48 the number of pixels in a standard MNIST image.
49 Returns:
50 A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with
51 values equal to the logits of classifying the digit into one of 10
52 classes (the digits 0-9). keep_prob is a scalar placeholder for the
53 probability of dropout.
54 """
55 # Reshape to use within a convolutional neural net.
56 # Last dimension is for "features" - there is only one here, since images
57 # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.
58 with tf.name_scope('reshape'):
59 x_image = tf.reshape(x, [-1, 28, 28, 1])
60
61 # First convolutional layer - maps one grayscale image to 32 feature maps.
62 with tf.name_scope('conv1'):
63 W_conv1 = weight_variable([5, 5, 1, 32])
64 b_conv1 = bias_variable([32])
65 h_conv1 = activation_fn(conv2d(x_image, W_conv1) + b_conv1)
66
67 # Pooling layer - downsamples by 2X.
68 with tf.name_scope('pool1'):
69 h_pool1 = max_pool_2x2(h_conv1)
70
71 # Second convolutional layer -- maps 32 feature maps to 64.
72 with tf.name_scope('conv2'):
73 W_conv2 = weight_variable([5, 5, 32, 64])
74 b_conv2 = bias_variable([64])
75 h_conv2 = activation_fn(conv2d(h_pool1, W_conv2) + b_conv2)
76
77 # Second pooling layer.
78 with tf.name_scope('pool2'):
79 h_pool2 = max_pool_2x2(h_conv2)
80
81 # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image
82 # is down to 7x7x64 feature maps -- maps this to 1024 features.
83 with tf.name_scope('fc1'):
84 W_fc1 = weight_variable([7 * 7 * 64, 1024])
85 b_fc1 = bias_variable([1024])
86
87 h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
88 h_fc1 = activation_fn(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
89
90 # Dropout - controls the complexity of the model, prevents co-adaptation of
91 # features.
92 with tf.name_scope('dropout'):
93 keep_prob = tf.placeholder(tf.float32)
94 h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
95
96 # Map the 1024 features to 10 classes, one for each digit
97 with tf.name_scope('fc2'):
98 W_fc2 = weight_variable([1024, 10])
99 b_fc2 = bias_variable([10])
100
101 y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
102 return y_conv, keep_prob
103
104
105 def conv2d(x, W):
106 """conv2d returns a 2d convolution layer with full stride."""
107 return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
108
109
110 def max_pool_2x2(x):
111 """max_pool_2x2 downsamples a feature map by 2X."""
112 return tf.nn.max_pool(
113 x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
114
115
116 def weight_variable(shape):
117 """weight_variable generates a weight variable of a given shape."""
118 initial = tf.truncated_normal(shape, stddev=0.1)
119 return tf.Variable(initial)
120
121
122 def bias_variable(shape):
123 """bias_variable generates a bias variable of a given shape."""
124 initial = tf.constant(0.1, shape=shape)
125 return tf.Variable(initial)
126
127
128 class TrainMNIST(Trainable):
129 """Example MNIST trainable."""
130
131 def _setup(self, config):
132 global activation_fn
133
134 self.timestep = 0
135
136 # Import data
137 for _ in range(10):
138 try:
139 self.mnist = input_data.read_data_sets(
140 "/tmp/mnist_ray_demo", one_hot=True)
141 break
142 except Exception as e:
143 print("Error loading data, retrying", e)
144 time.sleep(5)
145
146 assert self.mnist
147
148 self.x = tf.placeholder(tf.float32, [None, 784])
149 self.y_ = tf.placeholder(tf.float32, [None, 10])
150
151 activation_fn = getattr(tf.nn, config['activation'])
152
153 # Build the graph for the deep net
154 y_conv, self.keep_prob = setupCNN(self.x)
155
156 with tf.name_scope('loss'):
157 cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
158 labels=self.y_, logits=y_conv)
159 cross_entropy = tf.reduce_mean(cross_entropy)
160
161 with tf.name_scope('adam_optimizer'):
162 train_step = tf.train.AdamOptimizer(
163 config['learning_rate']).minimize(cross_entropy)
164
165 self.train_step = train_step
166
167 with tf.name_scope('accuracy'):
168 correct_prediction = tf.equal(
169 tf.argmax(y_conv, 1), tf.argmax(self.y_, 1))
170 correct_prediction = tf.cast(correct_prediction, tf.float32)
171 self.accuracy = tf.reduce_mean(correct_prediction)
172
173 self.sess = tf.Session()
174 self.sess.run(tf.global_variables_initializer())
175 self.iterations = 0
176 self.saver = tf.train.Saver()
177
178 def _train(self):
179 for i in range(10):
180 batch = self.mnist.train.next_batch(50)
181 self.sess.run(
182 self.train_step,
183 feed_dict={
184 self.x: batch[0],
185 self.y_: batch[1],
186 self.keep_prob: 0.5
187 })
188
189 batch = self.mnist.train.next_batch(50)
190 train_accuracy = self.sess.run(
191 self.accuracy,
192 feed_dict={
193 self.x: batch[0],
194 self.y_: batch[1],
195 self.keep_prob: 1.0
196 })
197
198 self.iterations += 1
199 return {"mean_accuracy": train_accuracy}
200
201 def _save(self, checkpoint_dir):
202 return self.saver.save(
203 self.sess, checkpoint_dir + "/save", global_step=self.iterations)
204
205 def _restore(self, path):
206 return self.saver.restore(self.sess, path)
207
208
209 # !!! Example of using the ray.tune Python API !!!
210 if __name__ == '__main__':
211 parser = argparse.ArgumentParser()
212 parser.add_argument(
213 '--smoke-test', action='store_true', help='Finish quickly for testing')
214 args, _ = parser.parse_known_args()
215
216 register_trainable("my_class", TrainMNIST)
217 mnist_spec = {
218 'run': 'my_class',
219 'stop': {
220 'mean_accuracy': 0.99,
221 'time_total_s': 600,
222 },
223 'config': {
224 'learning_rate': sample_from(
225 lambda spec: 10**np.random.uniform(-5, -3)),
226 'activation': grid_search(['relu', 'elu', 'tanh']),
227 },
228 "num_samples": 10,
229 }
230
231 if args.smoke_test:
232 mnist_spec['stop']['training_iteration'] = 2
233 mnist_spec['num_samples'] = 2
234
235 ray.init()
236 hyperband = HyperBandScheduler(
237 time_attr="training_iteration", reward_attr="mean_accuracy", max_t=10)
238
239 run_experiments({'mnist_hyperband_test': mnist_spec}, scheduler=hyperband)
240
[end of python/ray/tune/examples/tune_mnist_ray_hyperband.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/tune/examples/tune_mnist_ray_hyperband.py b/python/ray/tune/examples/tune_mnist_ray_hyperband.py
--- a/python/ray/tune/examples/tune_mnist_ray_hyperband.py
+++ b/python/ray/tune/examples/tune_mnist_ray_hyperband.py
@@ -199,11 +199,13 @@
return {"mean_accuracy": train_accuracy}
def _save(self, checkpoint_dir):
- return self.saver.save(
+ prefix = self.saver.save(
self.sess, checkpoint_dir + "/save", global_step=self.iterations)
+ return {"prefix": prefix}
- def _restore(self, path):
- return self.saver.restore(self.sess, path)
+ def _restore(self, ckpt_data):
+ prefix = ckpt_data["prefix"]
+ return self.saver.restore(self.sess, prefix)
# !!! Example of using the ray.tune Python API !!!
@@ -229,7 +231,7 @@
}
if args.smoke_test:
- mnist_spec['stop']['training_iteration'] = 2
+ mnist_spec['stop']['training_iteration'] = 20
mnist_spec['num_samples'] = 2
ray.init()
| {"golden_diff": "diff --git a/python/ray/tune/examples/tune_mnist_ray_hyperband.py b/python/ray/tune/examples/tune_mnist_ray_hyperband.py\n--- a/python/ray/tune/examples/tune_mnist_ray_hyperband.py\n+++ b/python/ray/tune/examples/tune_mnist_ray_hyperband.py\n@@ -199,11 +199,13 @@\n return {\"mean_accuracy\": train_accuracy}\n \n def _save(self, checkpoint_dir):\n- return self.saver.save(\n+ prefix = self.saver.save(\n self.sess, checkpoint_dir + \"/save\", global_step=self.iterations)\n+ return {\"prefix\": prefix}\n \n- def _restore(self, path):\n- return self.saver.restore(self.sess, path)\n+ def _restore(self, ckpt_data):\n+ prefix = ckpt_data[\"prefix\"]\n+ return self.saver.restore(self.sess, prefix)\n \n \n # !!! Example of using the ray.tune Python API !!!\n@@ -229,7 +231,7 @@\n }\n \n if args.smoke_test:\n- mnist_spec['stop']['training_iteration'] = 2\n+ mnist_spec['stop']['training_iteration'] = 20\n mnist_spec['num_samples'] = 2\n \n ray.init()\n", "issue": "[tune] PBT (Memory Checkpointing) using TF-Saver is broken\nAnother issue on the same topic is that [TensorFlow examples](https://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py) does not work correctly for any training scheduler which requires checkpointing. The issue is related (as far as my understanding goes) to the fact that after running trainable.restore(...) ray deletes the temporary directory with the TensorFlow checkpoint. The TensorFlow method self.saver.restore(self.sess, path) is just a declaration of restoring process, and it does not restore model in the place where declared. The restoration of the model variables happens when session.run(...) is performed, and this is the moment when variables are loaded from saved checkpoint to the tf.Graph(). Since checkpoint directory is deleted (in trainable.restore(...) ), the actual process fails.\r\n\r\nWhy ray.tune delete checkpoint directory in trainable.restore(...)?\r\n\r\n_Originally posted by @agniszczotka in https://github.com/ray-project/ray/issues/2856#issuecomment-459011993_\n[tune] Checkpointing with tensorflow no longer works\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\nQuestions about how to use Ray should be asked on\r\n[StackOverflow](https://stackoverflow.com/questions/tagged/ray).\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.6.2\r\n- **Python version**: 3.6.7\r\n- **Exact command to reproduce**:\r\nhttps://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py\r\n\r\n\r\n<!--\r\nYou can obtain the Ray version with\r\n\r\npython -c \"import ray; print(ray.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n\r\nSaving and restoring only works with single files in the newest version for ray. Tensorflow stores multiple files during checkpointing. Thus, the tensorflow example for storing and saving is broken.\r\n\r\n```\r\n def _save(self, checkpoint_dir):\r\n return self.saver.save(\r\n self.sess, checkpoint_dir + \"/save\", global_step=self.iterations)\r\n\r\n def _restore(self, path):\r\n return self.saver.restore(self.sess, path)\r\n```\r\nhttps://github.com/ray-project/ray/blob/master/python/ray/tune/examples/tune_mnist_ray_hyperband.py\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\nValueError: The returned checkpoint path does not exist: ray_results/2019-02-06_15-56-40yekao5r5/checkpoint_0/save\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A deep MNIST classifier using convolutional layers.\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/pros\n\"\"\"\n# Disable linter warnings to maintain consistency with tutorial.\n# pylint: disable=invalid-name\n# pylint: disable=g-bad-import-order\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\n\nimport ray\nfrom ray.tune import grid_search, run_experiments, register_trainable, \\\n Trainable, sample_from\nfrom ray.tune.schedulers import HyperBandScheduler\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport tensorflow as tf\nimport numpy as np\n\nactivation_fn = None # e.g. tf.nn.relu\n\n\ndef setupCNN(x):\n \"\"\"setupCNN builds the graph for a deep net for classifying digits.\n Args:\n x: an input tensor with the dimensions (N_examples, 784), where 784 is\n the number of pixels in a standard MNIST image.\n Returns:\n A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with\n values equal to the logits of classifying the digit into one of 10\n classes (the digits 0-9). keep_prob is a scalar placeholder for the\n probability of dropout.\n \"\"\"\n # Reshape to use within a convolutional neural net.\n # Last dimension is for \"features\" - there is only one here, since images\n # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc.\n with tf.name_scope('reshape'):\n x_image = tf.reshape(x, [-1, 28, 28, 1])\n\n # First convolutional layer - maps one grayscale image to 32 feature maps.\n with tf.name_scope('conv1'):\n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n h_conv1 = activation_fn(conv2d(x_image, W_conv1) + b_conv1)\n\n # Pooling layer - downsamples by 2X.\n with tf.name_scope('pool1'):\n h_pool1 = max_pool_2x2(h_conv1)\n\n # Second convolutional layer -- maps 32 feature maps to 64.\n with tf.name_scope('conv2'):\n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n h_conv2 = activation_fn(conv2d(h_pool1, W_conv2) + b_conv2)\n\n # Second pooling layer.\n with tf.name_scope('pool2'):\n h_pool2 = max_pool_2x2(h_conv2)\n\n # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image\n # is down to 7x7x64 feature maps -- maps this to 1024 features.\n with tf.name_scope('fc1'):\n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])\n h_fc1 = activation_fn(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n # Dropout - controls the complexity of the model, prevents co-adaptation of\n # features.\n with tf.name_scope('dropout'):\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n # Map the 1024 features to 10 classes, one for each digit\n with tf.name_scope('fc2'):\n W_fc2 = weight_variable([1024, 10])\n b_fc2 = bias_variable([10])\n\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2\n return y_conv, keep_prob\n\n\ndef conv2d(x, W):\n \"\"\"conv2d returns a 2d convolution layer with full stride.\"\"\"\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\n\ndef max_pool_2x2(x):\n \"\"\"max_pool_2x2 downsamples a feature map by 2X.\"\"\"\n return tf.nn.max_pool(\n x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n\ndef weight_variable(shape):\n \"\"\"weight_variable generates a weight variable of a given shape.\"\"\"\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n\ndef bias_variable(shape):\n \"\"\"bias_variable generates a bias variable of a given shape.\"\"\"\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n\nclass TrainMNIST(Trainable):\n \"\"\"Example MNIST trainable.\"\"\"\n\n def _setup(self, config):\n global activation_fn\n\n self.timestep = 0\n\n # Import data\n for _ in range(10):\n try:\n self.mnist = input_data.read_data_sets(\n \"/tmp/mnist_ray_demo\", one_hot=True)\n break\n except Exception as e:\n print(\"Error loading data, retrying\", e)\n time.sleep(5)\n\n assert self.mnist\n\n self.x = tf.placeholder(tf.float32, [None, 784])\n self.y_ = tf.placeholder(tf.float32, [None, 10])\n\n activation_fn = getattr(tf.nn, config['activation'])\n\n # Build the graph for the deep net\n y_conv, self.keep_prob = setupCNN(self.x)\n\n with tf.name_scope('loss'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n labels=self.y_, logits=y_conv)\n cross_entropy = tf.reduce_mean(cross_entropy)\n\n with tf.name_scope('adam_optimizer'):\n train_step = tf.train.AdamOptimizer(\n config['learning_rate']).minimize(cross_entropy)\n\n self.train_step = train_step\n\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(\n tf.argmax(y_conv, 1), tf.argmax(self.y_, 1))\n correct_prediction = tf.cast(correct_prediction, tf.float32)\n self.accuracy = tf.reduce_mean(correct_prediction)\n\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.iterations = 0\n self.saver = tf.train.Saver()\n\n def _train(self):\n for i in range(10):\n batch = self.mnist.train.next_batch(50)\n self.sess.run(\n self.train_step,\n feed_dict={\n self.x: batch[0],\n self.y_: batch[1],\n self.keep_prob: 0.5\n })\n\n batch = self.mnist.train.next_batch(50)\n train_accuracy = self.sess.run(\n self.accuracy,\n feed_dict={\n self.x: batch[0],\n self.y_: batch[1],\n self.keep_prob: 1.0\n })\n\n self.iterations += 1\n return {\"mean_accuracy\": train_accuracy}\n\n def _save(self, checkpoint_dir):\n return self.saver.save(\n self.sess, checkpoint_dir + \"/save\", global_step=self.iterations)\n\n def _restore(self, path):\n return self.saver.restore(self.sess, path)\n\n\n# !!! Example of using the ray.tune Python API !!!\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--smoke-test', action='store_true', help='Finish quickly for testing')\n args, _ = parser.parse_known_args()\n\n register_trainable(\"my_class\", TrainMNIST)\n mnist_spec = {\n 'run': 'my_class',\n 'stop': {\n 'mean_accuracy': 0.99,\n 'time_total_s': 600,\n },\n 'config': {\n 'learning_rate': sample_from(\n lambda spec: 10**np.random.uniform(-5, -3)),\n 'activation': grid_search(['relu', 'elu', 'tanh']),\n },\n \"num_samples\": 10,\n }\n\n if args.smoke_test:\n mnist_spec['stop']['training_iteration'] = 2\n mnist_spec['num_samples'] = 2\n\n ray.init()\n hyperband = HyperBandScheduler(\n time_attr=\"training_iteration\", reward_attr=\"mean_accuracy\", max_t=10)\n\n run_experiments({'mnist_hyperband_test': mnist_spec}, scheduler=hyperband)\n", "path": "python/ray/tune/examples/tune_mnist_ray_hyperband.py"}]} | 3,901 | 281 |
gh_patches_debug_58134 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-382 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Order of poll answer choices mixed up after saving
The order of poll answer choices is mixed up after saving. Restoring original order is not possible:

</issue>
<code>
[start of apps/polls/models.py]
1 from django.contrib.contenttypes.fields import GenericRelation
2 from django.db import models
3
4 from adhocracy4.comments import models as comment_models
5 from adhocracy4.models.base import UserGeneratedContentModel
6 from adhocracy4.modules import models as module_models
7
8 from . import validators
9
10
11 class Poll(module_models.Item):
12 comments = GenericRelation(comment_models.Comment,
13 related_query_name='poll',
14 object_id_field='object_pk')
15
16
17 class Question(models.Model):
18 label = models.CharField(max_length=255)
19 weight = models.SmallIntegerField()
20
21 poll = models.ForeignKey(
22 'Poll',
23 on_delete=models.CASCADE,
24 related_name='questions'
25 )
26
27 def user_choices_list(self, user):
28 if not user.is_authenticated():
29 return []
30
31 return self.choices\
32 .filter(votes__creator=user)\
33 .values_list('id', flat=True)
34
35 def __str__(self):
36 return self.label
37
38 class Meta:
39 ordering = ['weight']
40
41
42 class ChoiceQuerySet(models.QuerySet):
43
44 def annotate_vote_count(self):
45 return self.annotate(
46 vote_count=models.Count(
47 'votes'
48 )
49 )
50
51
52 class Choice(models.Model):
53 label = models.CharField(max_length=255)
54
55 question = models.ForeignKey(
56 'Question',
57 on_delete=models.CASCADE,
58 related_name='choices',
59 )
60
61 objects = ChoiceQuerySet.as_manager()
62
63 def __str__(self):
64 return '%s @%s' % (self.label, self.question)
65
66
67 class Vote(UserGeneratedContentModel):
68 choice = models.ForeignKey(
69 'Choice',
70 on_delete=models.CASCADE,
71 related_name='votes'
72 )
73
74 def validate_unique(self, exclude=None):
75 super(Vote, self).validate_unique(exclude)
76 validators.single_vote_per_user(self.creator,
77 self.choice.question,
78 self.pk)
79
80 # Make Vote instances behave like items for rule checking
81 @property
82 def module(self):
83 self.choice.question.poll.module
84
85 @property
86 def project(self):
87 return self.module.project
88
89 def __str__(self):
90 return '%s: %s' % (self.creator, self.choice)
91
[end of apps/polls/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/polls/models.py b/apps/polls/models.py
--- a/apps/polls/models.py
+++ b/apps/polls/models.py
@@ -60,6 +60,9 @@
objects = ChoiceQuerySet.as_manager()
+ class Meta:
+ ordering = ['id']
+
def __str__(self):
return '%s @%s' % (self.label, self.question)
| {"golden_diff": "diff --git a/apps/polls/models.py b/apps/polls/models.py\n--- a/apps/polls/models.py\n+++ b/apps/polls/models.py\n@@ -60,6 +60,9 @@\n \n objects = ChoiceQuerySet.as_manager()\n \n+ class Meta:\n+ ordering = ['id']\n+\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n", "issue": "Order of poll answer choices mixed up after saving\nThe order of poll answer choices is mixed up after saving. Restoring original order is not possible:\r\n\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\n\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.modules import models as module_models\n\nfrom . import validators\n\n\nclass Poll(module_models.Item):\n comments = GenericRelation(comment_models.Comment,\n related_query_name='poll',\n object_id_field='object_pk')\n\n\nclass Question(models.Model):\n label = models.CharField(max_length=255)\n weight = models.SmallIntegerField()\n\n poll = models.ForeignKey(\n 'Poll',\n on_delete=models.CASCADE,\n related_name='questions'\n )\n\n def user_choices_list(self, user):\n if not user.is_authenticated():\n return []\n\n return self.choices\\\n .filter(votes__creator=user)\\\n .values_list('id', flat=True)\n\n def __str__(self):\n return self.label\n\n class Meta:\n ordering = ['weight']\n\n\nclass ChoiceQuerySet(models.QuerySet):\n\n def annotate_vote_count(self):\n return self.annotate(\n vote_count=models.Count(\n 'votes'\n )\n )\n\n\nclass Choice(models.Model):\n label = models.CharField(max_length=255)\n\n question = models.ForeignKey(\n 'Question',\n on_delete=models.CASCADE,\n related_name='choices',\n )\n\n objects = ChoiceQuerySet.as_manager()\n\n def __str__(self):\n return '%s @%s' % (self.label, self.question)\n\n\nclass Vote(UserGeneratedContentModel):\n choice = models.ForeignKey(\n 'Choice',\n on_delete=models.CASCADE,\n related_name='votes'\n )\n\n def validate_unique(self, exclude=None):\n super(Vote, self).validate_unique(exclude)\n validators.single_vote_per_user(self.creator,\n self.choice.question,\n self.pk)\n\n # Make Vote instances behave like items for rule checking\n @property\n def module(self):\n self.choice.question.poll.module\n\n @property\n def project(self):\n return self.module.project\n\n def __str__(self):\n return '%s: %s' % (self.creator, self.choice)\n", "path": "apps/polls/models.py"}]} | 1,297 | 91 |
gh_patches_debug_11332 | rasdani/github-patches | git_diff | certbot__certbot-5383 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Apache SSL cipher settings are old, no ChaCha20
The Nginx plugin's `options-ssl-nginx.conf` file uses Mozilla's current intermediate SSL cipher configuration.
The Apache plugin probably did too... except it hasn't been updated since 2014: 2faacc1b43786edd5386305f9cffec376b5a5d26
Should Certbot's Apache settings be updated?
The main difference is that the new configuration adds ChaCha20 cipher suites. (It also removes a few things.)
Should this wait until after further documentation/feature improvements in #4830?
If so, how about a stopgap patch to add ChaCha20 without removing anything?
Differences with OpenSSL 1.0.2 or 1.1.0:
* Adds ChaCha20. (Yay!)
* Adds newer 3DES cipher suites with key exchange/signature algorithms other than RSA/RSA. (Probably doesn't matter. Anything that supports ECDHE-ECDSA ought to support a better cipher than 3DES. ) (Edit: Mozilla suggests that EDH-RSA 3DES is useful, though,.)
* Removes AES-CCM. (OpenSSL 1.1. Probably only included inadvertently.)
* Removes Camellia. (Perfectly nice cipher, but everybody uses AES.)
* Removes some static DH and SRP key exchange cipher suites. (Probably only included by accident, mostly or entirely ignored by servers and clients.)
* Changes the order of some things.
</issue>
<code>
[start of certbot-apache/certbot_apache/constants.py]
1 """Apache plugin constants."""
2 import pkg_resources
3
4
5 MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
6 """Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
7
8
9 UPDATED_MOD_SSL_CONF_DIGEST = ".updated-options-ssl-apache-conf-digest.txt"
10 """Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`."""
11
12 ALL_SSL_OPTIONS_HASHES = [
13 '2086bca02db48daf93468332543c60ac6acdb6f0b58c7bfdf578a5d47092f82a',
14 '4844d36c9a0f587172d9fa10f4f1c9518e3bcfa1947379f155e16a70a728c21a',
15 '5a922826719981c0a234b1fbcd495f3213e49d2519e845ea0748ba513044b65b',
16 '4066b90268c03c9ba0201068eaa39abbc02acf9558bb45a788b630eb85dadf27',
17 'f175e2e7c673bd88d0aff8220735f385f916142c44aa83b09f1df88dd4767a88',
18 'cfdd7c18d2025836ea3307399f509cfb1ebf2612c87dd600a65da2a8e2f2797b',
19 ]
20 """SHA256 hashes of the contents of previous versions of all versions of MOD_SSL_CONF_SRC"""
21
22 AUGEAS_LENS_DIR = pkg_resources.resource_filename(
23 "certbot_apache", "augeas_lens")
24 """Path to the Augeas lens directory"""
25
26 REWRITE_HTTPS_ARGS = [
27 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,NE,R=permanent]"]
28 """Apache version<2.3.9 rewrite rule arguments used for redirections to
29 https vhost"""
30
31 REWRITE_HTTPS_ARGS_WITH_END = [
32 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,NE,R=permanent]"]
33 """Apache version >= 2.3.9 rewrite rule arguments used for redirections to
34 https vhost"""
35
36 OLD_REWRITE_HTTPS_ARGS = [
37 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"],
38 ["^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]]
39
40 HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
41 "\"max-age=31536000\""]
42 """Apache header arguments for HSTS"""
43
44 UIR_ARGS = ["always", "set", "Content-Security-Policy",
45 "upgrade-insecure-requests"]
46
47 HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
48 "Upgrade-Insecure-Requests": UIR_ARGS}
49
[end of certbot-apache/certbot_apache/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py
--- a/certbot-apache/certbot_apache/constants.py
+++ b/certbot-apache/certbot_apache/constants.py
@@ -16,6 +16,8 @@
'4066b90268c03c9ba0201068eaa39abbc02acf9558bb45a788b630eb85dadf27',
'f175e2e7c673bd88d0aff8220735f385f916142c44aa83b09f1df88dd4767a88',
'cfdd7c18d2025836ea3307399f509cfb1ebf2612c87dd600a65da2a8e2f2797b',
+ '80720bd171ccdc2e6b917ded340defae66919e4624962396b992b7218a561791',
+ 'c0c022ea6b8a51ecc8f1003d0a04af6c3f2bc1c3ce506b3c2dfc1f11ef931082',
]
"""SHA256 hashes of the contents of previous versions of all versions of MOD_SSL_CONF_SRC"""
| {"golden_diff": "diff --git a/certbot-apache/certbot_apache/constants.py b/certbot-apache/certbot_apache/constants.py\n--- a/certbot-apache/certbot_apache/constants.py\n+++ b/certbot-apache/certbot_apache/constants.py\n@@ -16,6 +16,8 @@\n '4066b90268c03c9ba0201068eaa39abbc02acf9558bb45a788b630eb85dadf27',\n 'f175e2e7c673bd88d0aff8220735f385f916142c44aa83b09f1df88dd4767a88',\n 'cfdd7c18d2025836ea3307399f509cfb1ebf2612c87dd600a65da2a8e2f2797b',\n+ '80720bd171ccdc2e6b917ded340defae66919e4624962396b992b7218a561791',\n+ 'c0c022ea6b8a51ecc8f1003d0a04af6c3f2bc1c3ce506b3c2dfc1f11ef931082',\n ]\n \"\"\"SHA256 hashes of the contents of previous versions of all versions of MOD_SSL_CONF_SRC\"\"\"\n", "issue": "Apache SSL cipher settings are old, no ChaCha20\nThe Nginx plugin's `options-ssl-nginx.conf` file uses Mozilla's current intermediate SSL cipher configuration.\r\n\r\nThe Apache plugin probably did too... except it hasn't been updated since 2014: 2faacc1b43786edd5386305f9cffec376b5a5d26\r\n\r\nShould Certbot's Apache settings be updated?\r\n\r\nThe main difference is that the new configuration adds ChaCha20 cipher suites. (It also removes a few things.)\r\n\r\nShould this wait until after further documentation/feature improvements in #4830?\r\n\r\nIf so, how about a stopgap patch to add ChaCha20 without removing anything?\r\n\r\nDifferences with OpenSSL 1.0.2 or 1.1.0:\r\n\r\n* Adds ChaCha20. (Yay!)\r\n* Adds newer 3DES cipher suites with key exchange/signature algorithms other than RSA/RSA. (Probably doesn't matter. Anything that supports ECDHE-ECDSA ought to support a better cipher than 3DES. ) (Edit: Mozilla suggests that EDH-RSA 3DES is useful, though,.)\r\n* Removes AES-CCM. (OpenSSL 1.1. Probably only included inadvertently.)\r\n* Removes Camellia. (Perfectly nice cipher, but everybody uses AES.)\r\n* Removes some static DH and SRP key exchange cipher suites. (Probably only included by accident, mostly or entirely ignored by servers and clients.)\r\n* Changes the order of some things.\n", "before_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\n\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\n\nUPDATED_MOD_SSL_CONF_DIGEST = \".updated-options-ssl-apache-conf-digest.txt\"\n\"\"\"Name of the hash of the updated or informed mod_ssl_conf as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_OPTIONS_HASHES = [\n '2086bca02db48daf93468332543c60ac6acdb6f0b58c7bfdf578a5d47092f82a',\n '4844d36c9a0f587172d9fa10f4f1c9518e3bcfa1947379f155e16a70a728c21a',\n '5a922826719981c0a234b1fbcd495f3213e49d2519e845ea0748ba513044b65b',\n '4066b90268c03c9ba0201068eaa39abbc02acf9558bb45a788b630eb85dadf27',\n 'f175e2e7c673bd88d0aff8220735f385f916142c44aa83b09f1df88dd4767a88',\n 'cfdd7c18d2025836ea3307399f509cfb1ebf2612c87dd600a65da2a8e2f2797b',\n]\n\"\"\"SHA256 hashes of the contents of previous versions of all versions of MOD_SSL_CONF_SRC\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"certbot_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,NE,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to\nhttps vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,NE,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nOLD_REWRITE_HTTPS_ARGS = [\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"],\n [\"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]]\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n", "path": "certbot-apache/certbot_apache/constants.py"}]} | 1,747 | 392 |
gh_patches_debug_24509 | rasdani/github-patches | git_diff | searxng__searxng-156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: google news engine
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
7ef33c91e057defde55cc90d4123f284910723d9
**How did you install SearXNG?**
docker
**How To Reproduce**
`!google_news time` crashes whatever the search language is
**Technical report**
Error
* Error: KeyError
* Percentage: 100
* Parameters: `()`
* File name: `searx/engines/google_news.py:97`
* Function: `request`
* Code: `if lang_info['hl'] == 'en':`
Checker
* simple: `KeyError `,
* lang_fr: `KeyError `,
* lang_en: `KeyError `,
</issue>
<code>
[start of searx/engines/google_news.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Google (News)
4
5 For detailed description of the *REST-full* API see: `Query Parameter
6 Definitions`_. Not all parameters can be appied:
7
8 - num_ : the number of search results is ignored
9 - save_ : is ignored / Google-News results are always *SafeSearch*
10
11 .. _Query Parameter Definitions:
12 https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
13
14 .. _num: https://developers.google.com/custom-search/docs/xml_results#numsp
15 .. _save: https://developers.google.com/custom-search/docs/xml_results#safesp
16
17 """
18
19 # pylint: disable=invalid-name, missing-function-docstring
20
21 import binascii
22 import re
23 from urllib.parse import urlencode
24 from base64 import b64decode
25 from lxml import html
26
27 from searx import logger
28 from searx.utils import (
29 eval_xpath,
30 eval_xpath_list,
31 eval_xpath_getindex,
32 extract_text,
33 )
34
35 # pylint: disable=unused-import
36 from searx.engines.google import (
37 supported_languages_url,
38 _fetch_supported_languages,
39 )
40 # pylint: enable=unused-import
41
42 from searx.engines.google import (
43 get_lang_info,
44 detect_google_sorry,
45 )
46
47 # about
48 about = {
49 "website": 'https://news.google.com',
50 "wikidata_id": 'Q12020',
51 "official_api_documentation": 'https://developers.google.com/custom-search',
52 "use_official_api": False,
53 "require_api_key": False,
54 "results": 'HTML',
55 }
56
57 logger = logger.getChild('google news')
58
59 # compared to other google engines google-news has a different time range
60 # support. The time range is included in the search term.
61 time_range_dict = {
62 'day': 'when:1d',
63 'week': 'when:7d',
64 'month': 'when:1m',
65 'year': 'when:1y',
66 }
67
68 # engine dependent config
69
70 categories = ['news']
71 paging = False
72 use_locale_domain = True
73 time_range_support = True
74
75 # Google-News results are always *SafeSearch*. Option 'safesearch' is set to
76 # False here, otherwise checker will report safesearch-errors::
77 #
78 # safesearch : results are identitical for safesearch=0 and safesearch=2
79 safesearch = False
80
81 def request(query, params):
82 """Google-News search request"""
83
84 lang_info = get_lang_info(
85 # pylint: disable=undefined-variable
86 params, supported_languages, language_aliases, False
87 )
88 logger.debug(
89 "HTTP header Accept-Language --> %s", lang_info['headers']['Accept-Language'])
90
91 # google news has only one domain
92 lang_info['subdomain'] = 'news.google.com'
93
94 ceid = "%s:%s" % (lang_info['country'], lang_info['language'])
95
96 # google news redirects en to en-US
97 if lang_info['hl'] == 'en':
98 lang_info['hl'] = 'en-US'
99
100 # Very special to google-news compared to other google engines, the time
101 # range is included in the search term.
102 if params['time_range']:
103 query += ' ' + time_range_dict[params['time_range']]
104
105 query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
106 'q': query,
107 **lang_info['params'],
108 'ie': "utf8",
109 'oe': "utf8",
110 'gl': lang_info['country'],
111 }) + ('&ceid=%s' % ceid) # ceid includes a ':' character which must not be urlencoded
112 params['url'] = query_url
113
114 params['headers'].update(lang_info['headers'])
115 params['headers']['Accept'] = (
116 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
117 )
118
119 return params
120
121
122 def response(resp):
123 """Get response from google's search request"""
124 results = []
125
126 detect_google_sorry(resp)
127
128 # convert the text to dom
129 dom = html.fromstring(resp.text)
130
131 for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'):
132
133 # The first <a> tag in the <article> contains the link to the
134 # article The href attribute of the <a> is a google internal link,
135 # we can't use. The real link is hidden in the jslog attribute:
136 #
137 # <a ...
138 # jslog="95014; 4:https://www.cnn.com/.../index.html; track:click"
139 # href="./articles/CAIiENu3nGS...?hl=en-US&gl=US&ceid=US%3Aen"
140 # ... />
141
142 jslog = eval_xpath_getindex(result, './article/a/@jslog', 0)
143 url = re.findall('http[^;]*', jslog)
144 if url:
145 url = url[0]
146 else:
147 # The real URL is base64 encoded in the json attribute:
148 # jslog="95014; 5:W251bGwsbnVsbCxudW...giXQ==; track:click"
149 jslog = jslog.split(";")[1].split(':')[1].strip()
150 try:
151 padding = (4 -(len(jslog) % 4)) * "="
152 jslog = b64decode(jslog + padding)
153 except binascii.Error:
154 # URL cant be read, skip this result
155 continue
156
157 # now we have : b'[null, ... null,"https://www.cnn.com/.../index.html"]'
158 url = re.findall('http[^;"]*', str(jslog))[0]
159
160 # the first <h3> tag in the <article> contains the title of the link
161 title = extract_text(eval_xpath(result, './article/h3[1]'))
162
163 # the first <div> tag in the <article> contains the content of the link
164 content = extract_text(eval_xpath(result, './article/div[1]'))
165
166 # the second <div> tag contains origin publisher and the publishing date
167
168 pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))
169 pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))
170
171 pub_info = []
172 if pub_origin:
173 pub_info.append(pub_origin)
174 if pub_date:
175 # The pub_date is mostly a string like 'yesertday', not a real
176 # timezone date or time. Therefore we can't use publishedDate.
177 pub_info.append(pub_date)
178 pub_info = ', '.join(pub_info)
179 if pub_info:
180 content = pub_info + ': ' + content
181
182 # The image URL is located in a preceding sibling <img> tag, e.g.:
183 # "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
184 # These URL are long but not personalized (double checked via tor).
185
186 img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
187
188 results.append({
189 'url': url,
190 'title': title,
191 'content': content,
192 'img_src': img_src,
193 })
194
195 # return results
196 return results
197
[end of searx/engines/google_news.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -19,6 +19,7 @@
# pylint: disable=invalid-name, missing-function-docstring
import binascii
+from datetime import datetime
import re
from urllib.parse import urlencode
from base64 import b64decode
@@ -94,8 +95,8 @@
ceid = "%s:%s" % (lang_info['country'], lang_info['language'])
# google news redirects en to en-US
- if lang_info['hl'] == 'en':
- lang_info['hl'] = 'en-US'
+ if lang_info['params']['hl'] == 'en':
+ lang_info['params']['hl'] = 'en-US'
# Very special to google-news compared to other google engines, the time
# range is included in the search term.
@@ -115,6 +116,7 @@
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
)
+ params['headers']['Cookie'] = "CONSENT=YES+cb.%s-14-p0.en+F+941;" % datetime.now().strftime("%Y%m%d")
return params
| {"golden_diff": "diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py\n--- a/searx/engines/google_news.py\n+++ b/searx/engines/google_news.py\n@@ -19,6 +19,7 @@\n # pylint: disable=invalid-name, missing-function-docstring\n \n import binascii\n+from datetime import datetime\n import re\n from urllib.parse import urlencode\n from base64 import b64decode\n@@ -94,8 +95,8 @@\n ceid = \"%s:%s\" % (lang_info['country'], lang_info['language'])\n \n # google news redirects en to en-US\n- if lang_info['hl'] == 'en':\n- lang_info['hl'] = 'en-US'\n+ if lang_info['params']['hl'] == 'en':\n+ lang_info['params']['hl'] = 'en-US'\n \n # Very special to google-news compared to other google engines, the time\n # range is included in the search term.\n@@ -115,6 +116,7 @@\n params['headers']['Accept'] = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n )\n+ params['headers']['Cookie'] = \"CONSENT=YES+cb.%s-14-p0.en+F+941;\" % datetime.now().strftime(\"%Y%m%d\")\n \n return params\n", "issue": "Bug: google news engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\n7ef33c91e057defde55cc90d4123f284910723d9\r\n\r\n**How did you install SearXNG?**\r\ndocker\r\n\r\n**How To Reproduce**\r\n`!google_news time` crashes whatever the search language is\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: KeyError\r\n * Percentage: 100\r\n * Parameters: `()`\r\n * File name: `searx/engines/google_news.py:97`\r\n * Function: `request`\r\n * Code: `if lang_info['hl'] == 'en':`\r\n\r\nChecker\r\n * simple: `KeyError `,\r\n * lang_fr: `KeyError `,\r\n * lang_en: `KeyError `,\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Google (News)\n\nFor detailed description of the *REST-full* API see: `Query Parameter\nDefinitions`_. Not all parameters can be appied:\n\n- num_ : the number of search results is ignored\n- save_ : is ignored / Google-News results are always *SafeSearch*\n\n.. _Query Parameter Definitions:\n https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions\n\n.. _num: https://developers.google.com/custom-search/docs/xml_results#numsp\n.. _save: https://developers.google.com/custom-search/docs/xml_results#safesp\n\n\"\"\"\n\n# pylint: disable=invalid-name, missing-function-docstring\n\nimport binascii\nimport re\nfrom urllib.parse import urlencode\nfrom base64 import b64decode\nfrom lxml import html\n\nfrom searx import logger\nfrom searx.utils import (\n eval_xpath,\n eval_xpath_list,\n eval_xpath_getindex,\n extract_text,\n)\n\n# pylint: disable=unused-import\nfrom searx.engines.google import (\n supported_languages_url,\n _fetch_supported_languages,\n)\n# pylint: enable=unused-import\n\nfrom searx.engines.google import (\n get_lang_info,\n detect_google_sorry,\n)\n\n# about\nabout = {\n \"website\": 'https://news.google.com',\n \"wikidata_id\": 'Q12020',\n \"official_api_documentation\": 'https://developers.google.com/custom-search',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\nlogger = logger.getChild('google news')\n\n# compared to other google engines google-news has a different time range\n# support. The time range is included in the search term.\ntime_range_dict = {\n 'day': 'when:1d',\n 'week': 'when:7d',\n 'month': 'when:1m',\n 'year': 'when:1y',\n}\n\n# engine dependent config\n\ncategories = ['news']\npaging = False\nuse_locale_domain = True\ntime_range_support = True\n\n# Google-News results are always *SafeSearch*. Option 'safesearch' is set to\n# False here, otherwise checker will report safesearch-errors::\n#\n# safesearch : results are identitical for safesearch=0 and safesearch=2\nsafesearch = False\n\ndef request(query, params):\n \"\"\"Google-News search request\"\"\"\n\n lang_info = get_lang_info(\n # pylint: disable=undefined-variable\n params, supported_languages, language_aliases, False\n )\n logger.debug(\n \"HTTP header Accept-Language --> %s\", lang_info['headers']['Accept-Language'])\n\n # google news has only one domain\n lang_info['subdomain'] = 'news.google.com'\n\n ceid = \"%s:%s\" % (lang_info['country'], lang_info['language'])\n\n # google news redirects en to en-US\n if lang_info['hl'] == 'en':\n lang_info['hl'] = 'en-US'\n\n # Very special to google-news compared to other google engines, the time\n # range is included in the search term.\n if params['time_range']:\n query += ' ' + time_range_dict[params['time_range']]\n\n query_url = 'https://' + lang_info['subdomain'] + '/search' + \"?\" + urlencode({\n 'q': query,\n **lang_info['params'],\n 'ie': \"utf8\",\n 'oe': \"utf8\",\n 'gl': lang_info['country'],\n }) + ('&ceid=%s' % ceid) # ceid includes a ':' character which must not be urlencoded\n params['url'] = query_url\n\n params['headers'].update(lang_info['headers'])\n params['headers']['Accept'] = (\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n )\n\n return params\n\n\ndef response(resp):\n \"\"\"Get response from google's search request\"\"\"\n results = []\n\n detect_google_sorry(resp)\n\n # convert the text to dom\n dom = html.fromstring(resp.text)\n\n for result in eval_xpath_list(dom, '//div[@class=\"xrnccd\"]'):\n\n # The first <a> tag in the <article> contains the link to the\n # article The href attribute of the <a> is a google internal link,\n # we can't use. The real link is hidden in the jslog attribute:\n #\n # <a ...\n # jslog=\"95014; 4:https://www.cnn.com/.../index.html; track:click\"\n # href=\"./articles/CAIiENu3nGS...?hl=en-US&gl=US&ceid=US%3Aen\"\n # ... />\n\n jslog = eval_xpath_getindex(result, './article/a/@jslog', 0)\n url = re.findall('http[^;]*', jslog)\n if url:\n url = url[0]\n else:\n # The real URL is base64 encoded in the json attribute:\n # jslog=\"95014; 5:W251bGwsbnVsbCxudW...giXQ==; track:click\"\n jslog = jslog.split(\";\")[1].split(':')[1].strip()\n try:\n padding = (4 -(len(jslog) % 4)) * \"=\"\n jslog = b64decode(jslog + padding)\n except binascii.Error:\n # URL cant be read, skip this result\n continue\n\n # now we have : b'[null, ... null,\"https://www.cnn.com/.../index.html\"]'\n url = re.findall('http[^;\"]*', str(jslog))[0]\n\n # the first <h3> tag in the <article> contains the title of the link\n title = extract_text(eval_xpath(result, './article/h3[1]'))\n\n # the first <div> tag in the <article> contains the content of the link\n content = extract_text(eval_xpath(result, './article/div[1]'))\n\n # the second <div> tag contains origin publisher and the publishing date\n\n pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))\n pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))\n\n pub_info = []\n if pub_origin:\n pub_info.append(pub_origin)\n if pub_date:\n # The pub_date is mostly a string like 'yesertday', not a real\n # timezone date or time. Therefore we can't use publishedDate.\n pub_info.append(pub_date)\n pub_info = ', '.join(pub_info)\n if pub_info:\n content = pub_info + ': ' + content\n\n # The image URL is located in a preceding sibling <img> tag, e.g.:\n # \"https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100\"\n # These URL are long but not personalized (double checked via tor).\n\n img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))\n\n results.append({\n 'url': url,\n 'title': title,\n 'content': content,\n 'img_src': img_src,\n })\n\n # return results\n return results\n", "path": "searx/engines/google_news.py"}]} | 2,898 | 320 |
gh_patches_debug_12635 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'set_logger_provider' no longer importable from 'opentelemetry.sdk._logs'
A week ago I was able to run the example for logging in https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples/logs.
**Steps to reproduce**
I install either version via:
```bash
pip install opentelemetry-exporter-otlp==1.15.0
```
or
```bash
pip install opentelemetry-exporter-otlp==1.14.0
```
Then run the following code:
```python
from opentelemetry.sdk._logs import set_logger_provider
```
**What is the expected behavior?**
Should import without any issues.
**What is the actual behavior?**
```python
ImportError: cannot import name 'set_logger_provider' from 'opentelemetry.sdk._logs' (/usr/local/lib/python3.10/dist-packages/opentelemetry/sdk/_logs/__init__.py)
```
**Additional context**
I found that when I installed `opentelemetry-exporter-otlp==1.14.0`, `opentelemetry-sdk==1.15.0` was installed as well. When I installed `opentelemetry-sdk==1.14.0`, everything works fine again.
</issue>
<code>
[start of docs/examples/logs/example.py]
1 import logging
2
3 from opentelemetry import trace
4 from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
5 OTLPLogExporter,
6 )
7 from opentelemetry.sdk._logs import (
8 LoggerProvider,
9 LoggingHandler,
10 set_logger_provider,
11 )
12 from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
13 from opentelemetry.sdk.resources import Resource
14 from opentelemetry.sdk.trace import TracerProvider
15 from opentelemetry.sdk.trace.export import (
16 BatchSpanProcessor,
17 ConsoleSpanExporter,
18 )
19
20 trace.set_tracer_provider(TracerProvider())
21 trace.get_tracer_provider().add_span_processor(
22 BatchSpanProcessor(ConsoleSpanExporter())
23 )
24
25 logger_provider = LoggerProvider(
26 resource=Resource.create(
27 {
28 "service.name": "shoppingcart",
29 "service.instance.id": "instance-12",
30 }
31 ),
32 )
33 set_logger_provider(logger_provider)
34
35 exporter = OTLPLogExporter(insecure=True)
36 logger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter))
37 handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider)
38
39 # Attach OTLP handler to root logger
40 logging.getLogger().addHandler(handler)
41
42 # Log directly
43 logging.info("Jackdaws love my big sphinx of quartz.")
44
45 # Create different namespaced loggers
46 logger1 = logging.getLogger("myapp.area1")
47 logger2 = logging.getLogger("myapp.area2")
48
49 logger1.debug("Quick zephyrs blow, vexing daft Jim.")
50 logger1.info("How quickly daft jumping zebras vex.")
51 logger2.warning("Jail zesty vixen who grabbed pay from quack.")
52 logger2.error("The five boxing wizards jump quickly.")
53
54
55 # Trace context correlation
56 tracer = trace.get_tracer(__name__)
57 with tracer.start_as_current_span("foo"):
58 # Do something
59 logger2.error("Hyderabad, we have a major problem.")
60
61 logger_provider.shutdown()
62
[end of docs/examples/logs/example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/logs/example.py b/docs/examples/logs/example.py
--- a/docs/examples/logs/example.py
+++ b/docs/examples/logs/example.py
@@ -1,14 +1,11 @@
import logging
from opentelemetry import trace
+from opentelemetry._logs import set_logger_provider
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
OTLPLogExporter,
)
-from opentelemetry.sdk._logs import (
- LoggerProvider,
- LoggingHandler,
- set_logger_provider,
-)
+from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
| {"golden_diff": "diff --git a/docs/examples/logs/example.py b/docs/examples/logs/example.py\n--- a/docs/examples/logs/example.py\n+++ b/docs/examples/logs/example.py\n@@ -1,14 +1,11 @@\n import logging\n \n from opentelemetry import trace\n+from opentelemetry._logs import set_logger_provider\n from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (\n OTLPLogExporter,\n )\n-from opentelemetry.sdk._logs import (\n- LoggerProvider,\n- LoggingHandler,\n- set_logger_provider,\n-)\n+from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler\n from opentelemetry.sdk._logs.export import BatchLogRecordProcessor\n from opentelemetry.sdk.resources import Resource\n from opentelemetry.sdk.trace import TracerProvider\n", "issue": "'set_logger_provider' no longer importable from 'opentelemetry.sdk._logs'\nA week ago I was able to run the example for logging in https://github.com/open-telemetry/opentelemetry-python/tree/main/docs/examples/logs.\r\n\r\n**Steps to reproduce**\r\nI install either version via:\r\n```bash\r\npip install opentelemetry-exporter-otlp==1.15.0\r\n```\r\nor\r\n```bash\r\npip install opentelemetry-exporter-otlp==1.14.0\r\n```\r\nThen run the following code:\r\n```python\r\nfrom opentelemetry.sdk._logs import set_logger_provider\r\n```\r\n\r\n**What is the expected behavior?**\r\nShould import without any issues.\r\n\r\n**What is the actual behavior?**\r\n```python\r\nImportError: cannot import name 'set_logger_provider' from 'opentelemetry.sdk._logs' (/usr/local/lib/python3.10/dist-packages/opentelemetry/sdk/_logs/__init__.py)\r\n```\r\n\r\n\r\n**Additional context**\r\nI found that when I installed `opentelemetry-exporter-otlp==1.14.0`, `opentelemetry-sdk==1.15.0` was installed as well. When I installed `opentelemetry-sdk==1.14.0`, everything works fine again.\r\n\n", "before_files": [{"content": "import logging\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc._log_exporter import (\n OTLPLogExporter,\n)\nfrom opentelemetry.sdk._logs import (\n LoggerProvider,\n LoggingHandler,\n set_logger_provider,\n)\nfrom opentelemetry.sdk._logs.export import BatchLogRecordProcessor\nfrom opentelemetry.sdk.resources import Resource\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n BatchSpanProcessor,\n ConsoleSpanExporter,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n BatchSpanProcessor(ConsoleSpanExporter())\n)\n\nlogger_provider = LoggerProvider(\n resource=Resource.create(\n {\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }\n ),\n)\nset_logger_provider(logger_provider)\n\nexporter = OTLPLogExporter(insecure=True)\nlogger_provider.add_log_record_processor(BatchLogRecordProcessor(exporter))\nhandler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider)\n\n# Attach OTLP handler to root logger\nlogging.getLogger().addHandler(handler)\n\n# Log directly\nlogging.info(\"Jackdaws love my big sphinx of quartz.\")\n\n# Create different namespaced loggers\nlogger1 = logging.getLogger(\"myapp.area1\")\nlogger2 = logging.getLogger(\"myapp.area2\")\n\nlogger1.debug(\"Quick zephyrs blow, vexing daft Jim.\")\nlogger1.info(\"How quickly daft jumping zebras vex.\")\nlogger2.warning(\"Jail zesty vixen who grabbed pay from quack.\")\nlogger2.error(\"The five boxing wizards jump quickly.\")\n\n\n# Trace context correlation\ntracer = trace.get_tracer(__name__)\nwith tracer.start_as_current_span(\"foo\"):\n # Do something\n logger2.error(\"Hyderabad, we have a major problem.\")\n\nlogger_provider.shutdown()\n", "path": "docs/examples/logs/example.py"}]} | 1,322 | 165 |
gh_patches_debug_31186 | rasdani/github-patches | git_diff | facebookresearch__habitat-lab-272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix contains method in habitat space classes
## π Bug
The `contains` method for the `EmptySpace`, `ActionSpace`, and `ListSpace` classes in `habitat.core.spaces` all contain errors. Specifically:
1. `EmptySpace.contains(x)` should return `True` if `x is None`.
2. `ActionSpace.contains(x)` improperly handles invalid keys and invalid `x["action"]` values.
3. `ListSpace.contains(x)` improperly checks the length of `x`.
## Command
`EmptySpace.contains()`
`ActionSpace.contains()`
`ListSpace.contains()`
## To Reproduce
Code to reproduce the behavior:
```python
>>> import gym
>>> from habitat.core.spaces import EmptySpace, ActionSpace, ListSpace
>>>
>>> space = EmptySpace()
>>> print(space.contains(None)) # should be True
False
>>>
>>> space = ActionSpace({
... "move": gym.spaces.Dict({
... "position": gym.spaces.Discrete(2),
... "velocity": gym.spaces.Discrete(3)
... }),
... "move_forward": EmptySpace(),
... })
>>> space.contains({'action': 'move'}) # should be False
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/mnt/habitat/habitat-api/habitat/core/spaces.py", line 64, in contains
if not self.spaces[x["action"]].contains(x["action_args"]):
KeyError: 'action_args'
>>>
>>> space.contains({'action': None}) # should be false
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/mnt/habitat/habitat-api/habitat/core/spaces.py", line 64, in contains
if not self.spaces[x["action"]].contains(x["action_args"]):
KeyError: None
>>>
>>> space = ListSpace(gym.spaces.Discrete(2), 5, 10)
>>> print(space.contains([0, 1, 0, 1])) # should be True
False
```
## Expected behavior
The `contains` methods should properly return `True`, `False`, `False`, and `True` in the four cases above, respectively.
## Additional context
There is also an error in example code in the docs for the `ActionSpace` class.
</issue>
<code>
[start of habitat/core/spaces.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 from collections import OrderedDict
8 from typing import Sized
9
10 import gym
11 from gym import Space
12
13
14 class EmptySpace(Space):
15 """
16 A ``gym.Space`` that reflects arguments space for action that doesn't have
17 arguments. Needed for consistency ang always samples `None` value.
18 """
19
20 def sample(self):
21 return None
22
23 def contains(self, x):
24 return False
25
26
27 class ActionSpace(gym.spaces.Dict):
28 """
29 A dictionary of ``EmbodiedTask`` actions and their argument spaces.
30
31 .. code:: py
32
33 self.observation_space = spaces.ActionSpace(
34 "move": spaces.Dict({
35 "position": spaces.Discrete(2),
36 "velocity": spaces.Discrete(3)
37 },
38 "move_forward": EmptySpace,
39 )
40 )
41 """
42
43 def __init__(self, spaces):
44 if isinstance(spaces, dict):
45 self.spaces = OrderedDict(sorted(list(spaces.items())))
46 if isinstance(spaces, list):
47 self.spaces = OrderedDict(spaces)
48 self.actions_select = gym.spaces.Discrete(len(self.spaces))
49
50 @property
51 def n(self):
52 return len(self.spaces)
53
54 def sample(self):
55 action_index = self.actions_select.sample()
56 return {
57 "action": list(self.spaces.keys())[action_index],
58 "action_args": list(self.spaces.values())[action_index].sample(),
59 }
60
61 def contains(self, x):
62 if not isinstance(x, dict) and {"action", "action_args"} not in x:
63 return False
64 if not self.spaces[x["action"]].contains(x["action_args"]):
65 return False
66 return True
67
68 def __repr__(self):
69 return (
70 "ActionSpace("
71 + ", ".join([k + ":" + str(s) for k, s in self.spaces.items()])
72 + ")"
73 )
74
75
76 class ListSpace(Space):
77 """
78 A ``gym.Space`` that describes a list of other Space. Used to describe
79 list of tokens ids, vectors and etc.
80
81 .. code:: py
82
83 observation_space = ListSpace(spaces.Discrete(
84 dataset.question_vocab.get_size()))
85 """
86
87 def __init__(self, space, min_seq_length=0, max_seq_length=1 << 15):
88 self.min_seq_length = min_seq_length
89 self.max_seq_length = max_seq_length
90 self.space = space
91 self.length_select = gym.spaces.Discrete(
92 max_seq_length - min_seq_length
93 )
94
95 def sample(self):
96 seq_length = self.length_select.sample() + self.min_seq_length
97 return [self.space.sample() for _ in range(seq_length)]
98
99 def contains(self, x):
100 if not isinstance(x, Sized):
101 return False
102
103 if self.min_seq_length <= len(x) <= self.max_seq_length:
104 return False
105
106 return all([self.space.contains(el) for el in x])
107
108 def __repr__(self):
109 return (
110 f"ListSpace({self.space}, min_seq_length="
111 f"{self.min_seq_length}, max_seq_length={self.max_seq_length})"
112 )
113
[end of habitat/core/spaces.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/habitat/core/spaces.py b/habitat/core/spaces.py
--- a/habitat/core/spaces.py
+++ b/habitat/core/spaces.py
@@ -21,8 +21,13 @@
return None
def contains(self, x):
+ if x is None:
+ return True
return False
+ def __repr__(self):
+ return "EmptySpace()"
+
class ActionSpace(gym.spaces.Dict):
"""
@@ -30,14 +35,13 @@
.. code:: py
- self.observation_space = spaces.ActionSpace(
+ self.observation_space = spaces.ActionSpace({
"move": spaces.Dict({
"position": spaces.Discrete(2),
"velocity": spaces.Discrete(3)
- },
- "move_forward": EmptySpace,
- )
- )
+ }),
+ "move_forward": EmptySpace(),
+ })
"""
def __init__(self, spaces):
@@ -59,9 +63,11 @@
}
def contains(self, x):
- if not isinstance(x, dict) and {"action", "action_args"} not in x:
+ if not isinstance(x, dict) or "action" not in x:
+ return False
+ if x["action"] not in self.spaces:
return False
- if not self.spaces[x["action"]].contains(x["action_args"]):
+ if not self.spaces[x["action"]].contains(x.get("action_args", None)):
return False
return True
@@ -100,7 +106,7 @@
if not isinstance(x, Sized):
return False
- if self.min_seq_length <= len(x) <= self.max_seq_length:
+ if not (self.min_seq_length <= len(x) <= self.max_seq_length):
return False
return all([self.space.contains(el) for el in x])
| {"golden_diff": "diff --git a/habitat/core/spaces.py b/habitat/core/spaces.py\n--- a/habitat/core/spaces.py\n+++ b/habitat/core/spaces.py\n@@ -21,8 +21,13 @@\n return None\n \n def contains(self, x):\n+ if x is None:\n+ return True\n return False\n \n+ def __repr__(self):\n+ return \"EmptySpace()\"\n+\n \n class ActionSpace(gym.spaces.Dict):\n \"\"\"\n@@ -30,14 +35,13 @@\n \n .. code:: py\n \n- self.observation_space = spaces.ActionSpace(\n+ self.observation_space = spaces.ActionSpace({\n \"move\": spaces.Dict({\n \"position\": spaces.Discrete(2),\n \"velocity\": spaces.Discrete(3)\n- },\n- \"move_forward\": EmptySpace,\n- )\n- )\n+ }),\n+ \"move_forward\": EmptySpace(),\n+ })\n \"\"\"\n \n def __init__(self, spaces):\n@@ -59,9 +63,11 @@\n }\n \n def contains(self, x):\n- if not isinstance(x, dict) and {\"action\", \"action_args\"} not in x:\n+ if not isinstance(x, dict) or \"action\" not in x:\n+ return False\n+ if x[\"action\"] not in self.spaces:\n return False\n- if not self.spaces[x[\"action\"]].contains(x[\"action_args\"]):\n+ if not self.spaces[x[\"action\"]].contains(x.get(\"action_args\", None)):\n return False\n return True\n \n@@ -100,7 +106,7 @@\n if not isinstance(x, Sized):\n return False\n \n- if self.min_seq_length <= len(x) <= self.max_seq_length:\n+ if not (self.min_seq_length <= len(x) <= self.max_seq_length):\n return False\n \n return all([self.space.contains(el) for el in x])\n", "issue": "Fix contains method in habitat space classes\n## \ud83d\udc1b Bug\r\n\r\nThe `contains` method for the `EmptySpace`, `ActionSpace`, and `ListSpace` classes in `habitat.core.spaces` all contain errors. Specifically:\r\n1. `EmptySpace.contains(x)` should return `True` if `x is None`.\r\n2. `ActionSpace.contains(x)` improperly handles invalid keys and invalid `x[\"action\"]` values.\r\n3. `ListSpace.contains(x)` improperly checks the length of `x`.\r\n\r\n## Command\r\n\r\n`EmptySpace.contains()`\r\n`ActionSpace.contains()`\r\n`ListSpace.contains()`\r\n\r\n## To Reproduce\r\n\r\nCode to reproduce the behavior:\r\n\r\n```python\r\n>>> import gym\r\n>>> from habitat.core.spaces import EmptySpace, ActionSpace, ListSpace\r\n>>>\r\n>>> space = EmptySpace()\r\n>>> print(space.contains(None)) # should be True\r\nFalse\r\n>>>\r\n>>> space = ActionSpace({\r\n... \"move\": gym.spaces.Dict({\r\n... \"position\": gym.spaces.Discrete(2),\r\n... \"velocity\": gym.spaces.Discrete(3)\r\n... }),\r\n... \"move_forward\": EmptySpace(),\r\n... })\r\n>>> space.contains({'action': 'move'}) # should be False\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/mnt/habitat/habitat-api/habitat/core/spaces.py\", line 64, in contains\r\n if not self.spaces[x[\"action\"]].contains(x[\"action_args\"]):\r\nKeyError: 'action_args'\r\n>>>\r\n>>> space.contains({'action': None}) # should be false\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/mnt/habitat/habitat-api/habitat/core/spaces.py\", line 64, in contains\r\n if not self.spaces[x[\"action\"]].contains(x[\"action_args\"]):\r\nKeyError: None\r\n>>>\r\n>>> space = ListSpace(gym.spaces.Discrete(2), 5, 10)\r\n>>> print(space.contains([0, 1, 0, 1])) # should be True\r\nFalse\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe `contains` methods should properly return `True`, `False`, `False`, and `True` in the four cases above, respectively.\r\n\r\n## Additional context\r\n\r\nThere is also an error in example code in the docs for the `ActionSpace` class.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom collections import OrderedDict\nfrom typing import Sized\n\nimport gym\nfrom gym import Space\n\n\nclass EmptySpace(Space):\n \"\"\"\n A ``gym.Space`` that reflects arguments space for action that doesn't have\n arguments. Needed for consistency ang always samples `None` value.\n \"\"\"\n\n def sample(self):\n return None\n\n def contains(self, x):\n return False\n\n\nclass ActionSpace(gym.spaces.Dict):\n \"\"\"\n A dictionary of ``EmbodiedTask`` actions and their argument spaces.\n\n .. code:: py\n\n self.observation_space = spaces.ActionSpace(\n \"move\": spaces.Dict({\n \"position\": spaces.Discrete(2),\n \"velocity\": spaces.Discrete(3)\n },\n \"move_forward\": EmptySpace,\n )\n )\n \"\"\"\n\n def __init__(self, spaces):\n if isinstance(spaces, dict):\n self.spaces = OrderedDict(sorted(list(spaces.items())))\n if isinstance(spaces, list):\n self.spaces = OrderedDict(spaces)\n self.actions_select = gym.spaces.Discrete(len(self.spaces))\n\n @property\n def n(self):\n return len(self.spaces)\n\n def sample(self):\n action_index = self.actions_select.sample()\n return {\n \"action\": list(self.spaces.keys())[action_index],\n \"action_args\": list(self.spaces.values())[action_index].sample(),\n }\n\n def contains(self, x):\n if not isinstance(x, dict) and {\"action\", \"action_args\"} not in x:\n return False\n if not self.spaces[x[\"action\"]].contains(x[\"action_args\"]):\n return False\n return True\n\n def __repr__(self):\n return (\n \"ActionSpace(\"\n + \", \".join([k + \":\" + str(s) for k, s in self.spaces.items()])\n + \")\"\n )\n\n\nclass ListSpace(Space):\n \"\"\"\n A ``gym.Space`` that describes a list of other Space. Used to describe\n list of tokens ids, vectors and etc.\n\n .. code:: py\n\n observation_space = ListSpace(spaces.Discrete(\n dataset.question_vocab.get_size()))\n \"\"\"\n\n def __init__(self, space, min_seq_length=0, max_seq_length=1 << 15):\n self.min_seq_length = min_seq_length\n self.max_seq_length = max_seq_length\n self.space = space\n self.length_select = gym.spaces.Discrete(\n max_seq_length - min_seq_length\n )\n\n def sample(self):\n seq_length = self.length_select.sample() + self.min_seq_length\n return [self.space.sample() for _ in range(seq_length)]\n\n def contains(self, x):\n if not isinstance(x, Sized):\n return False\n\n if self.min_seq_length <= len(x) <= self.max_seq_length:\n return False\n\n return all([self.space.contains(el) for el in x])\n\n def __repr__(self):\n return (\n f\"ListSpace({self.space}, min_seq_length=\"\n f\"{self.min_seq_length}, max_seq_length={self.max_seq_length})\"\n )\n", "path": "habitat/core/spaces.py"}]} | 2,017 | 434 |
gh_patches_debug_47322 | rasdani/github-patches | git_diff | python-gitlab__python-gitlab-838 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Pickle support: dump() after load() fails
This is a one-liner fix to pickle support: pickle.dump() fails after pickle.load()
```
File "/home/bourgesl/.local/lib/python3.6/site-packages/gitlab/base.py", line 50, in __getstate__
module = state.pop("_module")
KeyError: '_module'
```
Reason:
Former self._module call invokes ```set_attr()``` that stores '_module' attribute in ```self.__dict__["_updated_attrs"]["_module"]``` that is not its correct place: ```self.__dict__["_module"]``` as expected by getstate()
</issue>
<code>
[start of gitlab/base.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Lesser General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17
18 import importlib
19
20
21 class RESTObject(object):
22 """Represents an object built from server data.
23
24 It holds the attributes know from the server, and the updated attributes in
25 another. This allows smart updates, if the object allows it.
26
27 You can redefine ``_id_attr`` in child classes to specify which attribute
28 must be used as uniq ID. ``None`` means that the object can be updated
29 without ID in the url.
30 """
31
32 _id_attr = "id"
33
34 def __init__(self, manager, attrs):
35 self.__dict__.update(
36 {
37 "manager": manager,
38 "_attrs": attrs,
39 "_updated_attrs": {},
40 "_module": importlib.import_module(self.__module__),
41 }
42 )
43 self.__dict__["_parent_attrs"] = self.manager.parent_attrs
44 self._create_managers()
45
46 def __getstate__(self):
47 state = self.__dict__.copy()
48 module = state.pop("_module")
49 state["_module_name"] = module.__name__
50 return state
51
52 def __setstate__(self, state):
53 module_name = state.pop("_module_name")
54 self.__dict__.update(state)
55 self._module = importlib.import_module(module_name)
56
57 def __getattr__(self, name):
58 try:
59 return self.__dict__["_updated_attrs"][name]
60 except KeyError:
61 try:
62 value = self.__dict__["_attrs"][name]
63
64 # If the value is a list, we copy it in the _updated_attrs dict
65 # because we are not able to detect changes made on the object
66 # (append, insert, pop, ...). Without forcing the attr
67 # creation __setattr__ is never called, the list never ends up
68 # in the _updated_attrs dict, and the update() and save()
69 # method never push the new data to the server.
70 # See https://github.com/python-gitlab/python-gitlab/issues/306
71 #
72 # note: _parent_attrs will only store simple values (int) so we
73 # don't make this check in the next except block.
74 if isinstance(value, list):
75 self.__dict__["_updated_attrs"][name] = value[:]
76 return self.__dict__["_updated_attrs"][name]
77
78 return value
79
80 except KeyError:
81 try:
82 return self.__dict__["_parent_attrs"][name]
83 except KeyError:
84 raise AttributeError(name)
85
86 def __setattr__(self, name, value):
87 self.__dict__["_updated_attrs"][name] = value
88
89 def __str__(self):
90 data = self._attrs.copy()
91 data.update(self._updated_attrs)
92 return "%s => %s" % (type(self), data)
93
94 def __repr__(self):
95 if self._id_attr:
96 return "<%s %s:%s>" % (
97 self.__class__.__name__,
98 self._id_attr,
99 self.get_id(),
100 )
101 else:
102 return "<%s>" % self.__class__.__name__
103
104 def __eq__(self, other):
105 if self.get_id() and other.get_id():
106 return self.get_id() == other.get_id()
107 return super(RESTObject, self) == other
108
109 def __ne__(self, other):
110 if self.get_id() and other.get_id():
111 return self.get_id() != other.get_id()
112 return super(RESTObject, self) != other
113
114 def __hash__(self):
115 if not self.get_id():
116 return super(RESTObject, self).__hash__()
117 return hash(self.get_id())
118
119 def _create_managers(self):
120 managers = getattr(self, "_managers", None)
121 if managers is None:
122 return
123
124 for attr, cls_name in self._managers:
125 cls = getattr(self._module, cls_name)
126 manager = cls(self.manager.gitlab, parent=self)
127 self.__dict__[attr] = manager
128
129 def _update_attrs(self, new_attrs):
130 self.__dict__["_updated_attrs"] = {}
131 self.__dict__["_attrs"].update(new_attrs)
132
133 def get_id(self):
134 """Returns the id of the resource."""
135 if self._id_attr is None or not hasattr(self, self._id_attr):
136 return None
137 return getattr(self, self._id_attr)
138
139 @property
140 def attributes(self):
141 d = self.__dict__["_updated_attrs"].copy()
142 d.update(self.__dict__["_attrs"])
143 d.update(self.__dict__["_parent_attrs"])
144 return d
145
146
147 class RESTObjectList(object):
148 """Generator object representing a list of RESTObject's.
149
150 This generator uses the Gitlab pagination system to fetch new data when
151 required.
152
153 Note: you should not instanciate such objects, they are returned by calls
154 to RESTManager.list()
155
156 Args:
157 manager: Manager to attach to the created objects
158 obj_cls: Type of objects to create from the json data
159 _list: A GitlabList object
160 """
161
162 def __init__(self, manager, obj_cls, _list):
163 """Creates an objects list from a GitlabList.
164
165 You should not create objects of this type, but use managers list()
166 methods instead.
167
168 Args:
169 manager: the RESTManager to attach to the objects
170 obj_cls: the class of the created objects
171 _list: the GitlabList holding the data
172 """
173 self.manager = manager
174 self._obj_cls = obj_cls
175 self._list = _list
176
177 def __iter__(self):
178 return self
179
180 def __len__(self):
181 return len(self._list)
182
183 def __next__(self):
184 return self.next()
185
186 def next(self):
187 data = self._list.next()
188 return self._obj_cls(self.manager, data)
189
190 @property
191 def current_page(self):
192 """The current page number."""
193 return self._list.current_page
194
195 @property
196 def prev_page(self):
197 """The next page number.
198
199 If None, the current page is the last.
200 """
201 return self._list.prev_page
202
203 @property
204 def next_page(self):
205 """The next page number.
206
207 If None, the current page is the last.
208 """
209 return self._list.next_page
210
211 @property
212 def per_page(self):
213 """The number of items per page."""
214 return self._list.per_page
215
216 @property
217 def total_pages(self):
218 """The total number of pages."""
219 return self._list.total_pages
220
221 @property
222 def total(self):
223 """The total number of items."""
224 return self._list.total
225
226
227 class RESTManager(object):
228 """Base class for CRUD operations on objects.
229
230 Derivated class must define ``_path`` and ``_obj_cls``.
231
232 ``_path``: Base URL path on which requests will be sent (e.g. '/projects')
233 ``_obj_cls``: The class of objects that will be created
234 """
235
236 _path = None
237 _obj_cls = None
238
239 def __init__(self, gl, parent=None):
240 """REST manager constructor.
241
242 Args:
243 gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make
244 requests.
245 parent: REST object to which the manager is attached.
246 """
247 self.gitlab = gl
248 self._parent = parent # for nested managers
249 self._computed_path = self._compute_path()
250
251 @property
252 def parent_attrs(self):
253 return self._parent_attrs
254
255 def _compute_path(self, path=None):
256 self._parent_attrs = {}
257 if path is None:
258 path = self._path
259 if self._parent is None or not hasattr(self, "_from_parent_attrs"):
260 return path
261
262 data = {
263 self_attr: getattr(self._parent, parent_attr, None)
264 for self_attr, parent_attr in self._from_parent_attrs.items()
265 }
266 self._parent_attrs = data
267 return path % data
268
269 @property
270 def path(self):
271 return self._computed_path
272
[end of gitlab/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gitlab/base.py b/gitlab/base.py
--- a/gitlab/base.py
+++ b/gitlab/base.py
@@ -52,7 +52,7 @@
def __setstate__(self, state):
module_name = state.pop("_module_name")
self.__dict__.update(state)
- self._module = importlib.import_module(module_name)
+ self.__dict__["_module"] = importlib.import_module(module_name)
def __getattr__(self, name):
try:
| {"golden_diff": "diff --git a/gitlab/base.py b/gitlab/base.py\n--- a/gitlab/base.py\n+++ b/gitlab/base.py\n@@ -52,7 +52,7 @@\n def __setstate__(self, state):\n module_name = state.pop(\"_module_name\")\n self.__dict__.update(state)\n- self._module = importlib.import_module(module_name)\n+ self.__dict__[\"_module\"] = importlib.import_module(module_name)\n \n def __getattr__(self, name):\n try:\n", "issue": "Fix Pickle support: dump() after load() fails\nThis is a one-liner fix to pickle support: pickle.dump() fails after pickle.load()\r\n```\r\n File \"/home/bourgesl/.local/lib/python3.6/site-packages/gitlab/base.py\", line 50, in __getstate__\r\n module = state.pop(\"_module\")\r\nKeyError: '_module'\r\n```\r\n\r\nReason:\r\nFormer self._module call invokes ```set_attr()``` that stores '_module' attribute in ```self.__dict__[\"_updated_attrs\"][\"_module\"]``` that is not its correct place: ```self.__dict__[\"_module\"]``` as expected by getstate()\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport importlib\n\n\nclass RESTObject(object):\n \"\"\"Represents an object built from server data.\n\n It holds the attributes know from the server, and the updated attributes in\n another. This allows smart updates, if the object allows it.\n\n You can redefine ``_id_attr`` in child classes to specify which attribute\n must be used as uniq ID. ``None`` means that the object can be updated\n without ID in the url.\n \"\"\"\n\n _id_attr = \"id\"\n\n def __init__(self, manager, attrs):\n self.__dict__.update(\n {\n \"manager\": manager,\n \"_attrs\": attrs,\n \"_updated_attrs\": {},\n \"_module\": importlib.import_module(self.__module__),\n }\n )\n self.__dict__[\"_parent_attrs\"] = self.manager.parent_attrs\n self._create_managers()\n\n def __getstate__(self):\n state = self.__dict__.copy()\n module = state.pop(\"_module\")\n state[\"_module_name\"] = module.__name__\n return state\n\n def __setstate__(self, state):\n module_name = state.pop(\"_module_name\")\n self.__dict__.update(state)\n self._module = importlib.import_module(module_name)\n\n def __getattr__(self, name):\n try:\n return self.__dict__[\"_updated_attrs\"][name]\n except KeyError:\n try:\n value = self.__dict__[\"_attrs\"][name]\n\n # If the value is a list, we copy it in the _updated_attrs dict\n # because we are not able to detect changes made on the object\n # (append, insert, pop, ...). Without forcing the attr\n # creation __setattr__ is never called, the list never ends up\n # in the _updated_attrs dict, and the update() and save()\n # method never push the new data to the server.\n # See https://github.com/python-gitlab/python-gitlab/issues/306\n #\n # note: _parent_attrs will only store simple values (int) so we\n # don't make this check in the next except block.\n if isinstance(value, list):\n self.__dict__[\"_updated_attrs\"][name] = value[:]\n return self.__dict__[\"_updated_attrs\"][name]\n\n return value\n\n except KeyError:\n try:\n return self.__dict__[\"_parent_attrs\"][name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n self.__dict__[\"_updated_attrs\"][name] = value\n\n def __str__(self):\n data = self._attrs.copy()\n data.update(self._updated_attrs)\n return \"%s => %s\" % (type(self), data)\n\n def __repr__(self):\n if self._id_attr:\n return \"<%s %s:%s>\" % (\n self.__class__.__name__,\n self._id_attr,\n self.get_id(),\n )\n else:\n return \"<%s>\" % self.__class__.__name__\n\n def __eq__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() == other.get_id()\n return super(RESTObject, self) == other\n\n def __ne__(self, other):\n if self.get_id() and other.get_id():\n return self.get_id() != other.get_id()\n return super(RESTObject, self) != other\n\n def __hash__(self):\n if not self.get_id():\n return super(RESTObject, self).__hash__()\n return hash(self.get_id())\n\n def _create_managers(self):\n managers = getattr(self, \"_managers\", None)\n if managers is None:\n return\n\n for attr, cls_name in self._managers:\n cls = getattr(self._module, cls_name)\n manager = cls(self.manager.gitlab, parent=self)\n self.__dict__[attr] = manager\n\n def _update_attrs(self, new_attrs):\n self.__dict__[\"_updated_attrs\"] = {}\n self.__dict__[\"_attrs\"].update(new_attrs)\n\n def get_id(self):\n \"\"\"Returns the id of the resource.\"\"\"\n if self._id_attr is None or not hasattr(self, self._id_attr):\n return None\n return getattr(self, self._id_attr)\n\n @property\n def attributes(self):\n d = self.__dict__[\"_updated_attrs\"].copy()\n d.update(self.__dict__[\"_attrs\"])\n d.update(self.__dict__[\"_parent_attrs\"])\n return d\n\n\nclass RESTObjectList(object):\n \"\"\"Generator object representing a list of RESTObject's.\n\n This generator uses the Gitlab pagination system to fetch new data when\n required.\n\n Note: you should not instanciate such objects, they are returned by calls\n to RESTManager.list()\n\n Args:\n manager: Manager to attach to the created objects\n obj_cls: Type of objects to create from the json data\n _list: A GitlabList object\n \"\"\"\n\n def __init__(self, manager, obj_cls, _list):\n \"\"\"Creates an objects list from a GitlabList.\n\n You should not create objects of this type, but use managers list()\n methods instead.\n\n Args:\n manager: the RESTManager to attach to the objects\n obj_cls: the class of the created objects\n _list: the GitlabList holding the data\n \"\"\"\n self.manager = manager\n self._obj_cls = obj_cls\n self._list = _list\n\n def __iter__(self):\n return self\n\n def __len__(self):\n return len(self._list)\n\n def __next__(self):\n return self.next()\n\n def next(self):\n data = self._list.next()\n return self._obj_cls(self.manager, data)\n\n @property\n def current_page(self):\n \"\"\"The current page number.\"\"\"\n return self._list.current_page\n\n @property\n def prev_page(self):\n \"\"\"The next page number.\n\n If None, the current page is the last.\n \"\"\"\n return self._list.prev_page\n\n @property\n def next_page(self):\n \"\"\"The next page number.\n\n If None, the current page is the last.\n \"\"\"\n return self._list.next_page\n\n @property\n def per_page(self):\n \"\"\"The number of items per page.\"\"\"\n return self._list.per_page\n\n @property\n def total_pages(self):\n \"\"\"The total number of pages.\"\"\"\n return self._list.total_pages\n\n @property\n def total(self):\n \"\"\"The total number of items.\"\"\"\n return self._list.total\n\n\nclass RESTManager(object):\n \"\"\"Base class for CRUD operations on objects.\n\n Derivated class must define ``_path`` and ``_obj_cls``.\n\n ``_path``: Base URL path on which requests will be sent (e.g. '/projects')\n ``_obj_cls``: The class of objects that will be created\n \"\"\"\n\n _path = None\n _obj_cls = None\n\n def __init__(self, gl, parent=None):\n \"\"\"REST manager constructor.\n\n Args:\n gl (Gitlab): :class:`~gitlab.Gitlab` connection to use to make\n requests.\n parent: REST object to which the manager is attached.\n \"\"\"\n self.gitlab = gl\n self._parent = parent # for nested managers\n self._computed_path = self._compute_path()\n\n @property\n def parent_attrs(self):\n return self._parent_attrs\n\n def _compute_path(self, path=None):\n self._parent_attrs = {}\n if path is None:\n path = self._path\n if self._parent is None or not hasattr(self, \"_from_parent_attrs\"):\n return path\n\n data = {\n self_attr: getattr(self._parent, parent_attr, None)\n for self_attr, parent_attr in self._from_parent_attrs.items()\n }\n self._parent_attrs = data\n return path % data\n\n @property\n def path(self):\n return self._computed_path\n", "path": "gitlab/base.py"}]} | 3,333 | 111 |
gh_patches_debug_20970 | rasdani/github-patches | git_diff | fidals__shopelectro-453 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Π£Π±Π΅ΡΠΈ ΠΊΠ°ΡΠ΅Π³ΠΎΡΠΈΡ ΠΈΠ· ΡΠΈΠ΄Π°
ΠΠ· ΡΠΈΠ΄Π° gm.yml - ΡΠ±ΠΈΡΠ°ΠΉ ΠΊΠ°ΡΠ΅Π³ΠΎΡΠΈΡ https://www.shopelectro.ru/catalog/categories/usiliteli-zvuka-dlia-slaboslyshashchikh/
Π£Π±ΡΠ°ΡΡ ΡΠΎΠ»ΡΠΊΠΎ ΠΈΠ· ΡΡΠΎΠ³ΠΎ ΡΠΈΠ΄Π°.
</issue>
<code>
[start of shopelectro/management/commands/price.py]
1 import os
2
3 from django.conf import settings
4 from django.core.management.base import BaseCommand
5 from django.template.loader import render_to_string
6 from django.urls import reverse
7
8 from shopelectro.models import Product, Category
9
10
11 class Command(BaseCommand):
12 """Generate yml file for a given vendor (YM or price.ru)."""
13
14 # Online market services, that works with our prices.
15 # Dict keys - url targets for every service
16 TARGETS = {
17 'YM': 'yandex.yml',
18 'priceru': 'priceru.xml',
19 'GM': 'gm.yml',
20 'SE78': 'se78.yml',
21 }
22 # price files will be stored at this dir
23 BASE_DIR = settings.ASSETS_DIR
24
25 IGNORED_CATEGORIES = [
26 'ΠΠ·ΠΌΠ΅ΡΠΈΡΠ΅Π»ΡΠ½ΡΠ΅ ΠΏΡΠΈΠ±ΠΎΡΡ', 'ΠΠΎΠ²ΠΎΠ³ΠΎΠ΄Π½ΠΈΠ΅ Π²ΡΠ°ΡΠ°ΡΡΠΈΠ΅ΡΡ ΡΠ²Π΅ΡΠΎΠ΄ΠΈΠΎΠ΄Π½ΡΠ΅ Π»Π°ΠΌΠΏΡ',
27 'ΠΠΎΠ²ΠΎΠ³ΠΎΠ΄Π½ΠΈΠ΅ Π»Π°Π·Π΅ΡΠ½ΡΠ΅ ΠΏΡΠΎΠ΅ΠΊΡΠΎΡΡ', 'MP3- ΠΊΠΎΠ»ΠΎΠ½ΠΊΠΈ', 'ΠΠ΅ΡΠΏΡΠΎΠ²ΠΎΠ΄Π½ΡΠ΅ Π·Π²ΠΎΠ½ΠΊΠΈ',
28 'Π Π°Π΄ΠΈΠΎΠΏΡΠΈΡΠΌΠ½ΠΈΠΊΠΈ', 'Π€ΠΎΠ½Π°ΡΠΈ', 'ΠΡΠ²Π΅ΡΡΠΊΠΈ', 'ΠΠ΅ΡΡ ΡΠ»Π΅ΠΊΡΡΠΎΠ½Π½ΡΠ΅ ΠΏΠΎΡΡΠ°ΡΠΈΠ²Π½ΡΠ΅',
29 ]
30
31 def create_prices(self):
32 for target in self.TARGETS.items():
33 self.generate_yml(*target)
34
35 def handle(self, *args, **options):
36 self.create_prices()
37
38 @classmethod
39 def get_context_for_yml(cls, utm):
40 """Create context dictionary for rendering files."""
41 def put_utm(product):
42 """Put UTM attribute to product."""
43 utm_marks = [
44 ('utm_source', utm),
45 ('utm_medium', 'cpc'),
46 ('utm_content', product.get_root_category().page.slug),
47 ('utm_term', str(product.vendor_code)),
48 ]
49
50 url = reverse('product', args=(product.vendor_code,))
51 utm_mark_query = '&'.join('{}={}'.format(k, v) for k, v in utm_marks)
52 product.utm_url = '{}{}?{}'.format(settings.BASE_URL, url, utm_mark_query)
53
54 product.prepared_params = list(
55 filter(
56 lambda x: x[0].name != 'ΠΡΠΎΠΈΠ·Π²ΠΎΠ΄ΠΈΡΠ΅Π»Ρ',
57 product.params
58 )
59 )
60
61 return product
62
63 def put_crumbs(product): # Ignore PyDocStyleBear
64 """Crumbs for google merchant. https://goo.gl/b0UJQp"""
65 product.crumbs = ' > '.join(
66 product.page.get_ancestors_fields('h1', include_self=False)[1:]
67 )
68 return product
69
70 def filter_categories(utm):
71 categories_to_exclude = (
72 Category.objects
73 .filter(name__in=cls.IGNORED_CATEGORIES)
74 .get_descendants(include_self=True)
75 )
76
77 result_categories = Category.objects.exclude(id__in=categories_to_exclude)
78
79 if utm == 'YM':
80 """
81 Yandex Market feed requires items in some categories to have pictures
82 To simplify filtering we are excluding all categories
83 which don't contain at least one product with picture
84 """
85 result_categories = result_categories.get_categories_tree_with_pictures()
86
87 return result_categories
88
89 def prepare_products(categories_, utm):
90 """Filter product list and patch it for rendering."""
91 products_except_others = (
92 Product.objects
93 .select_related('page')
94 .prefetch_related('category')
95 .prefetch_related('page__images')
96 .filter(category__in=categories_, price__gt=0, page__is_active=True)
97 )
98
99 if utm == 'YM':
100 """
101 Yandex Market feed requires items in some categories to have pictures
102 To simplify filtering we are excluding all products without pictures
103 """
104 products_except_others = (
105 products_except_others
106 .filter(page__images__isnull=False)
107 .distinct()
108 )
109
110 result_products = [
111 put_crumbs(put_utm(product))
112 for product in products_except_others
113 ]
114
115 return result_products
116
117 categories = (
118 filter_categories(utm) if utm != 'SE78'
119 else Category.objects.all()
120 )
121
122 products = prepare_products(categories, utm)
123
124 return {
125 'base_url': settings.BASE_URL,
126 'categories': categories,
127 'products': products,
128 'shop': settings.SHOP,
129 'utm': utm,
130 }
131
132 @classmethod
133 def generate_yml(cls, utm, file_name):
134 """Generate yml file."""
135 file_to_write = os.path.join(cls.BASE_DIR, file_name)
136 context = cls.get_context_for_yml(utm)
137
138 with open(file_to_write, 'w', encoding='utf-8') as file:
139 file.write(render_to_string('prices/price.yml', context).strip())
140
141 return '{} generated...'.format(file_name)
142
[end of shopelectro/management/commands/price.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shopelectro/management/commands/price.py b/shopelectro/management/commands/price.py
--- a/shopelectro/management/commands/price.py
+++ b/shopelectro/management/commands/price.py
@@ -1,3 +1,4 @@
+from collections import defaultdict
import os
from django.conf import settings
@@ -28,6 +29,10 @@
'Π Π°Π΄ΠΈΠΎΠΏΡΠΈΡΠΌΠ½ΠΈΠΊΠΈ', 'Π€ΠΎΠ½Π°ΡΠΈ', 'ΠΡΠ²Π΅ΡΡΠΊΠΈ', 'ΠΠ΅ΡΡ ΡΠ»Π΅ΠΊΡΡΠΎΠ½Π½ΡΠ΅ ΠΏΠΎΡΡΠ°ΡΠΈΠ²Π½ΡΠ΅',
]
+ IGNORED_CATEGORIES_BY_TARGET = defaultdict(list, {
+ 'GM': ['Π£ΡΠΈΠ»ΠΈΡΠ΅Π»ΠΈ Π·Π²ΡΠΊΠ° Π΄Π»Ρ ΡΠ»Π°Π±ΠΎΡΠ»ΡΡΠ°ΡΠΈΡ
'],
+ })
+
def create_prices(self):
for target in self.TARGETS.items():
self.generate_yml(*target)
@@ -71,6 +76,7 @@
categories_to_exclude = (
Category.objects
.filter(name__in=cls.IGNORED_CATEGORIES)
+ .filter(name__in=cls.IGNORED_CATEGORIES_BY_TARGET[utm])
.get_descendants(include_self=True)
)
| {"golden_diff": "diff --git a/shopelectro/management/commands/price.py b/shopelectro/management/commands/price.py\n--- a/shopelectro/management/commands/price.py\n+++ b/shopelectro/management/commands/price.py\n@@ -1,3 +1,4 @@\n+from collections import defaultdict\n import os\n \n from django.conf import settings\n@@ -28,6 +29,10 @@\n '\u0420\u0430\u0434\u0438\u043e\u043f\u0440\u0438\u0451\u043c\u043d\u0438\u043a\u0438', '\u0424\u043e\u043d\u0430\u0440\u0438', '\u041e\u0442\u0432\u0435\u0440\u0442\u043a\u0438', '\u0412\u0435\u0441\u044b \u044d\u043b\u0435\u043a\u0442\u0440\u043e\u043d\u043d\u044b\u0435 \u043f\u043e\u0440\u0442\u0430\u0442\u0438\u0432\u043d\u044b\u0435',\n ]\n \n+ IGNORED_CATEGORIES_BY_TARGET = defaultdict(list, {\n+ 'GM': ['\u0423\u0441\u0438\u043b\u0438\u0442\u0435\u043b\u0438 \u0437\u0432\u0443\u043a\u0430 \u0434\u043b\u044f \u0441\u043b\u0430\u0431\u043e\u0441\u043b\u044b\u0448\u0430\u0449\u0438\u0445'],\n+ })\n+\n def create_prices(self):\n for target in self.TARGETS.items():\n self.generate_yml(*target)\n@@ -71,6 +76,7 @@\n categories_to_exclude = (\n Category.objects\n .filter(name__in=cls.IGNORED_CATEGORIES)\n+ .filter(name__in=cls.IGNORED_CATEGORIES_BY_TARGET[utm])\n .get_descendants(include_self=True)\n )\n", "issue": "\u0423\u0431\u0435\u0440\u0438 \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e \u0438\u0437 \u0444\u0438\u0434\u0430\n\u0418\u0437 \u0444\u0438\u0434\u0430 gm.yml - \u0443\u0431\u0438\u0440\u0430\u0439 \u043a\u0430\u0442\u0435\u0433\u043e\u0440\u0438\u044e https://www.shopelectro.ru/catalog/categories/usiliteli-zvuka-dlia-slaboslyshashchikh/\r\n\u0423\u0431\u0440\u0430\u0442\u044c \u0442\u043e\u043b\u044c\u043a\u043e \u0438\u0437 \u044d\u0442\u043e\u0433\u043e \u0444\u0438\u0434\u0430. \n", "before_files": [{"content": "import os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\n\nfrom shopelectro.models import Product, Category\n\n\nclass Command(BaseCommand):\n \"\"\"Generate yml file for a given vendor (YM or price.ru).\"\"\"\n\n # Online market services, that works with our prices.\n # Dict keys - url targets for every service\n TARGETS = {\n 'YM': 'yandex.yml',\n 'priceru': 'priceru.xml',\n 'GM': 'gm.yml',\n 'SE78': 'se78.yml',\n }\n # price files will be stored at this dir\n BASE_DIR = settings.ASSETS_DIR\n\n IGNORED_CATEGORIES = [\n '\u0418\u0437\u043c\u0435\u0440\u0438\u0442\u0435\u043b\u044c\u043d\u044b\u0435 \u043f\u0440\u0438\u0431\u043e\u0440\u044b', '\u041d\u043e\u0432\u043e\u0433\u043e\u0434\u043d\u0438\u0435 \u0432\u0440\u0430\u0449\u0430\u044e\u0449\u0438\u0435\u0441\u044f \u0441\u0432\u0435\u0442\u043e\u0434\u0438\u043e\u0434\u043d\u044b\u0435 \u043b\u0430\u043c\u043f\u044b',\n '\u041d\u043e\u0432\u043e\u0433\u043e\u0434\u043d\u0438\u0435 \u043b\u0430\u0437\u0435\u0440\u043d\u044b\u0435 \u043f\u0440\u043e\u0435\u043a\u0442\u043e\u0440\u044b', 'MP3- \u043a\u043e\u043b\u043e\u043d\u043a\u0438', '\u0411\u0435\u0441\u043f\u0440\u043e\u0432\u043e\u0434\u043d\u044b\u0435 \u0437\u0432\u043e\u043d\u043a\u0438',\n '\u0420\u0430\u0434\u0438\u043e\u043f\u0440\u0438\u0451\u043c\u043d\u0438\u043a\u0438', '\u0424\u043e\u043d\u0430\u0440\u0438', '\u041e\u0442\u0432\u0435\u0440\u0442\u043a\u0438', '\u0412\u0435\u0441\u044b \u044d\u043b\u0435\u043a\u0442\u0440\u043e\u043d\u043d\u044b\u0435 \u043f\u043e\u0440\u0442\u0430\u0442\u0438\u0432\u043d\u044b\u0435',\n ]\n\n def create_prices(self):\n for target in self.TARGETS.items():\n self.generate_yml(*target)\n\n def handle(self, *args, **options):\n self.create_prices()\n\n @classmethod\n def get_context_for_yml(cls, utm):\n \"\"\"Create context dictionary for rendering files.\"\"\"\n def put_utm(product):\n \"\"\"Put UTM attribute to product.\"\"\"\n utm_marks = [\n ('utm_source', utm),\n ('utm_medium', 'cpc'),\n ('utm_content', product.get_root_category().page.slug),\n ('utm_term', str(product.vendor_code)),\n ]\n\n url = reverse('product', args=(product.vendor_code,))\n utm_mark_query = '&'.join('{}={}'.format(k, v) for k, v in utm_marks)\n product.utm_url = '{}{}?{}'.format(settings.BASE_URL, url, utm_mark_query)\n\n product.prepared_params = list(\n filter(\n lambda x: x[0].name != '\u041f\u0440\u043e\u0438\u0437\u0432\u043e\u0434\u0438\u0442\u0435\u043b\u044c',\n product.params\n )\n )\n\n return product\n\n def put_crumbs(product): # Ignore PyDocStyleBear\n \"\"\"Crumbs for google merchant. https://goo.gl/b0UJQp\"\"\"\n product.crumbs = ' > '.join(\n product.page.get_ancestors_fields('h1', include_self=False)[1:]\n )\n return product\n\n def filter_categories(utm):\n categories_to_exclude = (\n Category.objects\n .filter(name__in=cls.IGNORED_CATEGORIES)\n .get_descendants(include_self=True)\n )\n\n result_categories = Category.objects.exclude(id__in=categories_to_exclude)\n\n if utm == 'YM':\n \"\"\"\n Yandex Market feed requires items in some categories to have pictures\n To simplify filtering we are excluding all categories\n which don't contain at least one product with picture\n \"\"\"\n result_categories = result_categories.get_categories_tree_with_pictures()\n\n return result_categories\n\n def prepare_products(categories_, utm):\n \"\"\"Filter product list and patch it for rendering.\"\"\"\n products_except_others = (\n Product.objects\n .select_related('page')\n .prefetch_related('category')\n .prefetch_related('page__images')\n .filter(category__in=categories_, price__gt=0, page__is_active=True)\n )\n\n if utm == 'YM':\n \"\"\"\n Yandex Market feed requires items in some categories to have pictures\n To simplify filtering we are excluding all products without pictures\n \"\"\"\n products_except_others = (\n products_except_others\n .filter(page__images__isnull=False)\n .distinct()\n )\n\n result_products = [\n put_crumbs(put_utm(product))\n for product in products_except_others\n ]\n\n return result_products\n\n categories = (\n filter_categories(utm) if utm != 'SE78'\n else Category.objects.all()\n )\n\n products = prepare_products(categories, utm)\n\n return {\n 'base_url': settings.BASE_URL,\n 'categories': categories,\n 'products': products,\n 'shop': settings.SHOP,\n 'utm': utm,\n }\n\n @classmethod\n def generate_yml(cls, utm, file_name):\n \"\"\"Generate yml file.\"\"\"\n file_to_write = os.path.join(cls.BASE_DIR, file_name)\n context = cls.get_context_for_yml(utm)\n\n with open(file_to_write, 'w', encoding='utf-8') as file:\n file.write(render_to_string('prices/price.yml', context).strip())\n\n return '{} generated...'.format(file_name)\n", "path": "shopelectro/management/commands/price.py"}]} | 1,994 | 258 |
gh_patches_debug_16962 | rasdani/github-patches | git_diff | cal-itp__benefits-921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use the common base Docker image
This is a corollary to work done in cal-itp/eligibility-server#123.
We have a new repository [`cal-itp/docker-python-web`](https://github.com/cal-itp/docker-python-web) that publishes a Docker image to GitHub Container Registry at [`ghcr.io/cal-itp/docker-python-web`](https://github.com/cal-itp/docker-python-web/pkgs/container/docker-python-web).
This image contains a baseline `nginx` and `gunicorn` setup and configuration. We can replace the Benefits app container's base image with this image to simplify the repository and build process for Benefits.
## Acceptance Criteria
<!-- Remember to consider edge cases -->
- [ ] the Benefits app container builds `FROM ghcr.io/cal-itp/docker-python-web`
- [ ] duplicate `nginx` and `gunicorn` setup in this repository is removed
- [ ] the app runs locally as a container `docker compose up client`
- [ ] the devcontainer starts up
- [ ] the app runs in DEBUG mode in the devcontainer with `F5`
## Additional context
See the [`docker-python-web` Docs](https://docs.calitp.org/docker-python-web/) for more information.
</issue>
<code>
[start of appcontainer/gunicorn.conf.py]
1 """
2 The Gunicorn configuration file
3 More information: https://docs.gunicorn.org/en/stable/settings.html
4 """
5
6 import multiprocessing
7
8 # the unix socket defined in nginx.conf
9 bind = "unix:/home/calitp/app/run/gunicorn.sock"
10
11 # Recommend (2 x $num_cores) + 1 as the number of workers to start off with
12 workers = multiprocessing.cpu_count() * 2 + 1
13
14 # send logs to stdout and stderr
15 accesslog = "-"
16 errorlog = "-"
17
18 # Preloading can save some RAM resources as well as speed up server boot times,
19 # at the cost of not being able to reload app code by restarting workers
20 # (in an ECS Fargate environment, this isn't possible anyway)
21 preload_app = True
22
[end of appcontainer/gunicorn.conf.py]
[start of .devcontainer/server/settings.py]
1 # App settings
2
3 LOG_LEVEL = "DEBUG"
4
5 # Eligibility Verification settings
6
7 CLIENT_KEY_PATH = "https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/client.pub"
8 SERVER_PRIVATE_KEY_PATH = "https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/server.key"
9 SERVER_PUBLIC_KEY_PATH = "https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/server.pub"
10 SUB_FORMAT_REGEX = r".+"
11
12 # Data settings
13
14 IMPORT_FILE_PATH = "/.devcontainer/server/data.csv"
15 INPUT_HASH_ALGO = ""
16
17 # CSV-specific settings
18
19 CSV_DELIMITER = ";"
20 CSV_NEWLINE = ""
21 CSV_QUOTING = 3
22 CSV_QUOTECHAR = ""
23
[end of .devcontainer/server/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/.devcontainer/server/settings.py b/.devcontainer/server/settings.py
--- a/.devcontainer/server/settings.py
+++ b/.devcontainer/server/settings.py
@@ -11,7 +11,7 @@
# Data settings
-IMPORT_FILE_PATH = "/.devcontainer/server/data.csv"
+IMPORT_FILE_PATH = "https://raw.githubusercontent.com/cal-itp/eligibility-server/main/data/server.csv"
INPUT_HASH_ALGO = ""
# CSV-specific settings
diff --git a/appcontainer/gunicorn.conf.py b/appcontainer/gunicorn.conf.py
deleted file mode 100644
--- a/appcontainer/gunicorn.conf.py
+++ /dev/null
@@ -1,21 +0,0 @@
-"""
-The Gunicorn configuration file
-More information: https://docs.gunicorn.org/en/stable/settings.html
-"""
-
-import multiprocessing
-
-# the unix socket defined in nginx.conf
-bind = "unix:/home/calitp/app/run/gunicorn.sock"
-
-# Recommend (2 x $num_cores) + 1 as the number of workers to start off with
-workers = multiprocessing.cpu_count() * 2 + 1
-
-# send logs to stdout and stderr
-accesslog = "-"
-errorlog = "-"
-
-# Preloading can save some RAM resources as well as speed up server boot times,
-# at the cost of not being able to reload app code by restarting workers
-# (in an ECS Fargate environment, this isn't possible anyway)
-preload_app = True
| {"golden_diff": "diff --git a/.devcontainer/server/settings.py b/.devcontainer/server/settings.py\n--- a/.devcontainer/server/settings.py\n+++ b/.devcontainer/server/settings.py\n@@ -11,7 +11,7 @@\n \n # Data settings\n \n-IMPORT_FILE_PATH = \"/.devcontainer/server/data.csv\"\n+IMPORT_FILE_PATH = \"https://raw.githubusercontent.com/cal-itp/eligibility-server/main/data/server.csv\"\n INPUT_HASH_ALGO = \"\"\n \n # CSV-specific settings\ndiff --git a/appcontainer/gunicorn.conf.py b/appcontainer/gunicorn.conf.py\ndeleted file mode 100644\n--- a/appcontainer/gunicorn.conf.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-\"\"\"\n-The Gunicorn configuration file\n-More information: https://docs.gunicorn.org/en/stable/settings.html\n-\"\"\"\n-\n-import multiprocessing\n-\n-# the unix socket defined in nginx.conf\n-bind = \"unix:/home/calitp/app/run/gunicorn.sock\"\n-\n-# Recommend (2 x $num_cores) + 1 as the number of workers to start off with\n-workers = multiprocessing.cpu_count() * 2 + 1\n-\n-# send logs to stdout and stderr\n-accesslog = \"-\"\n-errorlog = \"-\"\n-\n-# Preloading can save some RAM resources as well as speed up server boot times,\n-# at the cost of not being able to reload app code by restarting workers\n-# (in an ECS Fargate environment, this isn't possible anyway)\n-preload_app = True\n", "issue": "Use the common base Docker image\nThis is a corollary to work done in cal-itp/eligibility-server#123.\r\n\r\nWe have a new repository [`cal-itp/docker-python-web`](https://github.com/cal-itp/docker-python-web) that publishes a Docker image to GitHub Container Registry at [`ghcr.io/cal-itp/docker-python-web`](https://github.com/cal-itp/docker-python-web/pkgs/container/docker-python-web).\r\n\r\nThis image contains a baseline `nginx` and `gunicorn` setup and configuration. We can replace the Benefits app container's base image with this image to simplify the repository and build process for Benefits.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] the Benefits app container builds `FROM ghcr.io/cal-itp/docker-python-web`\r\n- [ ] duplicate `nginx` and `gunicorn` setup in this repository is removed\r\n- [ ] the app runs locally as a container `docker compose up client`\r\n- [ ] the devcontainer starts up\r\n- [ ] the app runs in DEBUG mode in the devcontainer with `F5`\r\n\r\n## Additional context\r\n\r\nSee the [`docker-python-web` Docs](https://docs.calitp.org/docker-python-web/) for more information.\n", "before_files": [{"content": "\"\"\"\nThe Gunicorn configuration file\nMore information: https://docs.gunicorn.org/en/stable/settings.html\n\"\"\"\n\nimport multiprocessing\n\n# the unix socket defined in nginx.conf\nbind = \"unix:/home/calitp/app/run/gunicorn.sock\"\n\n# Recommend (2 x $num_cores) + 1 as the number of workers to start off with\nworkers = multiprocessing.cpu_count() * 2 + 1\n\n# send logs to stdout and stderr\naccesslog = \"-\"\nerrorlog = \"-\"\n\n# Preloading can save some RAM resources as well as speed up server boot times,\n# at the cost of not being able to reload app code by restarting workers\n# (in an ECS Fargate environment, this isn't possible anyway)\npreload_app = True\n", "path": "appcontainer/gunicorn.conf.py"}, {"content": "# App settings\n\nLOG_LEVEL = \"DEBUG\"\n\n# Eligibility Verification settings\n\nCLIENT_KEY_PATH = \"https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/client.pub\"\nSERVER_PRIVATE_KEY_PATH = \"https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/server.key\"\nSERVER_PUBLIC_KEY_PATH = \"https://raw.githubusercontent.com/cal-itp/eligibility-server/main/keys/server.pub\"\nSUB_FORMAT_REGEX = r\".+\"\n\n# Data settings\n\nIMPORT_FILE_PATH = \"/.devcontainer/server/data.csv\"\nINPUT_HASH_ALGO = \"\"\n\n# CSV-specific settings\n\nCSV_DELIMITER = \";\"\nCSV_NEWLINE = \"\"\nCSV_QUOTING = 3\nCSV_QUOTECHAR = \"\"\n", "path": ".devcontainer/server/settings.py"}]} | 1,198 | 325 |
gh_patches_debug_17197 | rasdani/github-patches | git_diff | DataBiosphere__toil-3625 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type hints to toilDestroyCluster.py
Add type hints to src/toil/utils/toilDestroyCluster.py so it can be checked under mypy during linting.
Refers to #3568.
βIssue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-905)
βIssue Number: TOIL-905
</issue>
<code>
[start of src/toil/utils/toilDestroyCluster.py]
1 # Copyright (C) 2015-2021 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Terminates the specified cluster and associated resources."""
15 import logging
16 from toil.common import parser_with_common_options
17 from toil.provisioners import cluster_factory
18 from toil.statsAndLogging import set_logging_from_options
19
20
21 logger = logging.getLogger(__name__)
22
23 def main():
24 parser = parser_with_common_options(provisioner_options=True, jobstore_option=False)
25 options = parser.parse_args()
26 set_logging_from_options(options)
27
28 logger.info('Destroying cluster %s', options.clusterName)
29
30 cluster = cluster_factory(provisioner=options.provisioner,
31 clusterName=options.clusterName,
32 zone=options.zone)
33 cluster.destroyCluster()
34
35 logger.info('Cluster %s is now gone.', options.clusterName)
36
[end of src/toil/utils/toilDestroyCluster.py]
[start of contrib/admin/mypy-with-ignore.py]
1 #!/usr/bin/env python3
2 """
3 Runs mypy and ignores files that do not yet have passing type hints.
4
5 Does not type check test files (any path including "src/toil/test").
6 """
7 import os
8 import subprocess
9 import sys
10
11 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
12 sys.path.insert(0, pkg_root) # noqa
13
14 from src.toil.lib.resources import glob # type: ignore
15
16
17 def main():
18 all_files_to_check = []
19 for d in ['dashboard', 'docker', 'docs', 'src']:
20 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))
21
22 # TODO: Remove these paths as typing is added and mypy conflicts are addressed
23 ignore_paths = [os.path.abspath(f) for f in [
24 'docker/Dockerfile.py',
25 'docs/conf.py',
26 'docs/vendor/sphinxcontrib/fulltoc.py',
27 'docs/vendor/sphinxcontrib/__init__.py',
28 'src/toil/job.py',
29 'src/toil/leader.py',
30 'src/toil/statsAndLogging.py',
31 'src/toil/common.py',
32 'src/toil/realtimeLogger.py',
33 'src/toil/worker.py',
34 'src/toil/serviceManager.py',
35 'src/toil/toilState.py',
36 'src/toil/__init__.py',
37 'src/toil/resource.py',
38 'src/toil/deferred.py',
39 'src/toil/version.py',
40 'src/toil/wdl/utils.py',
41 'src/toil/wdl/wdl_types.py',
42 'src/toil/wdl/wdl_synthesis.py',
43 'src/toil/wdl/wdl_analysis.py',
44 'src/toil/wdl/wdl_functions.py',
45 'src/toil/wdl/toilwdl.py',
46 'src/toil/wdl/versions/draft2.py',
47 'src/toil/wdl/versions/v1.py',
48 'src/toil/wdl/versions/dev.py',
49 'src/toil/provisioners/clusterScaler.py',
50 'src/toil/provisioners/abstractProvisioner.py',
51 'src/toil/provisioners/gceProvisioner.py',
52 'src/toil/provisioners/__init__.py',
53 'src/toil/provisioners/node.py',
54 'src/toil/provisioners/aws/boto2Context.py',
55 'src/toil/provisioners/aws/awsProvisioner.py',
56 'src/toil/provisioners/aws/__init__.py',
57 'src/toil/batchSystems/slurm.py',
58 'src/toil/batchSystems/gridengine.py',
59 'src/toil/batchSystems/singleMachine.py',
60 'src/toil/batchSystems/abstractBatchSystem.py',
61 'src/toil/batchSystems/parasol.py',
62 'src/toil/batchSystems/kubernetes.py',
63 'src/toil/batchSystems/torque.py',
64 'src/toil/batchSystems/options.py',
65 'src/toil/batchSystems/registry.py',
66 'src/toil/batchSystems/lsf.py',
67 'src/toil/batchSystems/__init__.py',
68 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',
69 'src/toil/batchSystems/lsfHelper.py',
70 'src/toil/batchSystems/htcondor.py',
71 'src/toil/batchSystems/mesos/batchSystem.py',
72 'src/toil/batchSystems/mesos/executor.py',
73 'src/toil/batchSystems/mesos/conftest.py',
74 'src/toil/batchSystems/mesos/__init__.py',
75 'src/toil/batchSystems/mesos/test/__init__.py',
76 'src/toil/cwl/conftest.py',
77 'src/toil/cwl/__init__.py',
78 'src/toil/cwl/cwltoil.py',
79 'src/toil/fileStores/cachingFileStore.py',
80 'src/toil/fileStores/abstractFileStore.py',
81 'src/toil/fileStores/nonCachingFileStore.py',
82 'src/toil/fileStores/__init__.py',
83 'src/toil/jobStores/utils.py',
84 'src/toil/jobStores/abstractJobStore.py',
85 'src/toil/jobStores/conftest.py',
86 'src/toil/jobStores/fileJobStore.py',
87 'src/toil/jobStores/__init__.py',
88 'src/toil/jobStores/googleJobStore.py',
89 'src/toil/jobStores/aws/utils.py',
90 'src/toil/jobStores/aws/jobStore.py',
91 'src/toil/jobStores/aws/__init__.py',
92 'src/toil/utils/toilDebugFile.py',
93 'src/toil/utils/toilUpdateEC2Instances.py',
94 'src/toil/utils/toilStatus.py',
95 'src/toil/utils/toilStats.py',
96 'src/toil/utils/toilSshCluster.py',
97 'src/toil/utils/toilMain.py',
98 'src/toil/utils/__init__.py',
99 'src/toil/utils/toilDestroyCluster.py',
100 'src/toil/utils/toilDebugJob.py',
101 'src/toil/utils/toilRsyncCluster.py',
102 'src/toil/utils/toilClean.py',
103 'src/toil/utils/toilLaunchCluster.py',
104 'src/toil/lib/memoize.py',
105 'src/toil/lib/throttle.py',
106 'src/toil/lib/humanize.py',
107 'src/toil/lib/compatibility.py',
108 'src/toil/lib/iterables.py',
109 'src/toil/lib/bioio.py',
110 'src/toil/lib/ec2.py',
111 # 'src/toil/lib/conversions.py',
112 'src/toil/lib/ec2nodes.py',
113 # 'src/toil/lib/misc.py',
114 'src/toil/lib/expando.py',
115 'src/toil/lib/threading.py',
116 'src/toil/lib/exceptions.py',
117 'src/toil/lib/__init__.py',
118 'src/toil/lib/generatedEC2Lists.py',
119 'src/toil/lib/retry.py',
120 'src/toil/lib/objects.py',
121 'src/toil/lib/io.py',
122 'src/toil/lib/docker.py',
123 'src/toil/lib/encryption/_nacl.py',
124 'src/toil/lib/encryption/_dummy.py',
125 'src/toil/lib/encryption/conftest.py',
126 'src/toil/lib/encryption/__init__.py',
127 'src/toil/lib/aws/utils.py',
128 'src/toil/lib/aws/__init__.py'
129 ]]
130
131 filtered_files_to_check = []
132 for file_path in all_files_to_check:
133 if file_path not in ignore_paths and 'src/toil/test' not in file_path:
134 filtered_files_to_check.append(file_path)
135 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?
136 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check
137 p = subprocess.run(args=args, stdout=subprocess.PIPE)
138 result = p.stdout.decode()
139 print(result)
140 if 'Success: no issues found' not in result:
141 exit(1)
142
143
144 if __name__ == '__main__':
145 main()
146
[end of contrib/admin/mypy-with-ignore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py
--- a/contrib/admin/mypy-with-ignore.py
+++ b/contrib/admin/mypy-with-ignore.py
@@ -96,7 +96,6 @@
'src/toil/utils/toilSshCluster.py',
'src/toil/utils/toilMain.py',
'src/toil/utils/__init__.py',
- 'src/toil/utils/toilDestroyCluster.py',
'src/toil/utils/toilDebugJob.py',
'src/toil/utils/toilRsyncCluster.py',
'src/toil/utils/toilClean.py',
diff --git a/src/toil/utils/toilDestroyCluster.py b/src/toil/utils/toilDestroyCluster.py
--- a/src/toil/utils/toilDestroyCluster.py
+++ b/src/toil/utils/toilDestroyCluster.py
@@ -20,7 +20,7 @@
logger = logging.getLogger(__name__)
-def main():
+def main() -> None:
parser = parser_with_common_options(provisioner_options=True, jobstore_option=False)
options = parser.parse_args()
set_logging_from_options(options)
| {"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -96,7 +96,6 @@\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n- 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\ndiff --git a/src/toil/utils/toilDestroyCluster.py b/src/toil/utils/toilDestroyCluster.py\n--- a/src/toil/utils/toilDestroyCluster.py\n+++ b/src/toil/utils/toilDestroyCluster.py\n@@ -20,7 +20,7 @@\n \n logger = logging.getLogger(__name__)\n \n-def main():\n+def main() -> None:\n parser = parser_with_common_options(provisioner_options=True, jobstore_option=False)\n options = parser.parse_args()\n set_logging_from_options(options)\n", "issue": "Add type hints to toilDestroyCluster.py\nAdd type hints to src/toil/utils/toilDestroyCluster.py so it can be checked under mypy during linting. \n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-905)\n\u2506Issue Number: TOIL-905\n\n", "before_files": [{"content": "# Copyright (C) 2015-2021 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Terminates the specified cluster and associated resources.\"\"\"\nimport logging\nfrom toil.common import parser_with_common_options\nfrom toil.provisioners import cluster_factory\nfrom toil.statsAndLogging import set_logging_from_options\n\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n parser = parser_with_common_options(provisioner_options=True, jobstore_option=False)\n options = parser.parse_args()\n set_logging_from_options(options)\n\n logger.info('Destroying cluster %s', options.clusterName)\n\n cluster = cluster_factory(provisioner=options.provisioner,\n clusterName=options.clusterName,\n zone=options.zone)\n cluster.destroyCluster()\n\n logger.info('Cluster %s is now gone.', options.clusterName)\n", "path": "src/toil/utils/toilDestroyCluster.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n # 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]} | 2,914 | 251 |
gh_patches_debug_14770 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not able to import flash as transformer.__spec__ is returning None
## π Bug
<!-- A clear and concise description of what the bug is. -->
I am trying to complete a summarization project and decided to use flash for it but am getting import error as `transformers.__spec__` is returning None. I have opened huggingface/transformers#12904 issue on transformers page meanwhile, would like to know if there's a possible workaround to it.
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
```
import transformers
print(transformers.__version__)
print(transformers.__spec__)
import flash
```
**OUTPUT**:
```
4.8.2
None
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-1-d02f1c770adc> in <module>
3 print(transformers.__spec__)
4
----> 5 import flash
/opt/conda/lib/python3.7/site-packages/flash/__init__.py in <module>
16
17 from flash.__about__ import * # noqa: F401 F403
---> 18 from flash.core.utilities.imports import _TORCH_AVAILABLE
19
20 if _TORCH_AVAILABLE:
/opt/conda/lib/python3.7/site-packages/flash/core/utilities/imports.py in <module>
75 _PYTORCHVIDEO_AVAILABLE = _module_available("pytorchvideo")
76 _MATPLOTLIB_AVAILABLE = _module_available("matplotlib")
---> 77 _TRANSFORMERS_AVAILABLE = _module_available("transformers")
78 _PYSTICHE_AVAILABLE = _module_available("pystiche")
79 _FIFTYONE_AVAILABLE = _module_available("fiftyone")
/opt/conda/lib/python3.7/site-packages/flash/core/utilities/imports.py in _module_available(module_path)
36 """
37 try:
---> 38 return find_spec(module_path) is not None
39 except AttributeError:
40 # Python 3.6
/opt/conda/lib/python3.7/importlib/util.py in find_spec(name, package)
112 else:
113 if spec is None:
--> 114 raise ValueError('{}.__spec__ is None'.format(name))
115 return spec
116
ValueError: transformers.__spec__ is None
```
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
Able to import flash
### Environment
- PyTorch Version (e.g., 1.0): 3.7.0/3.8.1
- OS (e.g., Linux):
- Platform: Colab/Kaggle
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of flash/core/utilities/imports.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import functools
15 import importlib
16 import operator
17 import types
18 from importlib.util import find_spec
19 from typing import Callable, List, Union
20
21 from pkg_resources import DistributionNotFound
22
23 try:
24 from packaging.version import Version
25 except (ModuleNotFoundError, DistributionNotFound):
26 Version = None
27
28
29 def _module_available(module_path: str) -> bool:
30 """
31 Check if a path is available in your environment
32
33 >>> _module_available('os')
34 True
35 >>> _module_available('bla.bla')
36 False
37 """
38 try:
39 return find_spec(module_path) is not None
40 except AttributeError:
41 # Python 3.6
42 return False
43 except ModuleNotFoundError:
44 # Python 3.7+
45 return False
46
47
48 def _compare_version(package: str, op, version) -> bool:
49 """
50 Compare package version with some requirements
51
52 >>> _compare_version("torch", operator.ge, "0.1")
53 True
54 """
55 try:
56 pkg = importlib.import_module(package)
57 except (ModuleNotFoundError, DistributionNotFound, ValueError):
58 return False
59 try:
60 pkg_version = Version(pkg.__version__)
61 except TypeError:
62 # this is mock by sphinx, so it shall return True ro generate all summaries
63 return True
64 return op(pkg_version, Version(version))
65
66
67 _TORCH_AVAILABLE = _module_available("torch")
68 _BOLTS_AVAILABLE = _module_available("pl_bolts") and _compare_version("torch", operator.lt, "1.9.0")
69 _PANDAS_AVAILABLE = _module_available("pandas")
70 _SKLEARN_AVAILABLE = _module_available("sklearn")
71 _TABNET_AVAILABLE = _module_available("pytorch_tabnet")
72 _KORNIA_AVAILABLE = _module_available("kornia")
73 _COCO_AVAILABLE = _module_available("pycocotools")
74 _TIMM_AVAILABLE = _module_available("timm")
75 _TORCHVISION_AVAILABLE = _module_available("torchvision")
76 _PYTORCHVIDEO_AVAILABLE = _module_available("pytorchvideo")
77 _MATPLOTLIB_AVAILABLE = _module_available("matplotlib")
78 _TRANSFORMERS_AVAILABLE = _module_available("transformers")
79 _PYSTICHE_AVAILABLE = _module_available("pystiche")
80 _FIFTYONE_AVAILABLE = _module_available("fiftyone")
81 _FASTAPI_AVAILABLE = _module_available("fastapi")
82 _PYDANTIC_AVAILABLE = _module_available("pydantic")
83 _GRAPHVIZ_AVAILABLE = _module_available("graphviz")
84 _CYTOOLZ_AVAILABLE = _module_available("cytoolz")
85 _UVICORN_AVAILABLE = _module_available("uvicorn")
86 _PIL_AVAILABLE = _module_available("PIL")
87 _OPEN3D_AVAILABLE = _module_available("open3d")
88 _ASTEROID_AVAILABLE = _module_available("asteroid")
89 _SEGMENTATION_MODELS_AVAILABLE = _module_available("segmentation_models_pytorch")
90 _SOUNDFILE_AVAILABLE = _module_available("soundfile")
91 _TORCH_SCATTER_AVAILABLE = _module_available("torch_scatter")
92 _TORCH_SPARSE_AVAILABLE = _module_available("torch_sparse")
93 _TORCH_GEOMETRIC_AVAILABLE = _module_available("torch_geometric")
94 _TORCHAUDIO_AVAILABLE = _module_available("torchaudio")
95 _ROUGE_SCORE_AVAILABLE = _module_available("rouge_score")
96 _SENTENCEPIECE_AVAILABLE = _module_available("sentencepiece")
97 _DATASETS_AVAILABLE = _module_available("datasets")
98
99 if Version:
100 _TORCHVISION_GREATER_EQUAL_0_9 = _compare_version("torchvision", operator.ge, "0.9.0")
101
102 _TEXT_AVAILABLE = all([
103 _TRANSFORMERS_AVAILABLE,
104 _ROUGE_SCORE_AVAILABLE,
105 _SENTENCEPIECE_AVAILABLE,
106 _DATASETS_AVAILABLE,
107 ])
108 _TABULAR_AVAILABLE = _TABNET_AVAILABLE and _PANDAS_AVAILABLE
109 _VIDEO_AVAILABLE = _PYTORCHVIDEO_AVAILABLE
110 _IMAGE_AVAILABLE = all([
111 _TORCHVISION_AVAILABLE,
112 _TIMM_AVAILABLE,
113 _PIL_AVAILABLE,
114 _KORNIA_AVAILABLE,
115 _PYSTICHE_AVAILABLE,
116 _SEGMENTATION_MODELS_AVAILABLE,
117 ])
118 _SERVE_AVAILABLE = _FASTAPI_AVAILABLE and _PYDANTIC_AVAILABLE and _CYTOOLZ_AVAILABLE and _UVICORN_AVAILABLE
119 _POINTCLOUD_AVAILABLE = _OPEN3D_AVAILABLE and _TORCHVISION_AVAILABLE
120 _AUDIO_AVAILABLE = all([_ASTEROID_AVAILABLE, _TORCHAUDIO_AVAILABLE, _SOUNDFILE_AVAILABLE, _TRANSFORMERS_AVAILABLE])
121 _GRAPH_AVAILABLE = _TORCH_SCATTER_AVAILABLE and _TORCH_SPARSE_AVAILABLE and _TORCH_GEOMETRIC_AVAILABLE
122
123 _EXTRAS_AVAILABLE = {
124 'image': _IMAGE_AVAILABLE,
125 'tabular': _TABULAR_AVAILABLE,
126 'text': _TEXT_AVAILABLE,
127 'video': _VIDEO_AVAILABLE,
128 'pointcloud': _POINTCLOUD_AVAILABLE,
129 'serve': _SERVE_AVAILABLE,
130 'audio': _AUDIO_AVAILABLE,
131 'graph': _GRAPH_AVAILABLE,
132 }
133
134
135 def _requires(
136 module_paths: Union[str, List],
137 module_available: Callable[[str], bool],
138 formatter: Callable[[List[str]], str],
139 ):
140
141 if not isinstance(module_paths, list):
142 module_paths = [module_paths]
143
144 def decorator(func):
145 if not all(module_available(module_path) for module_path in module_paths):
146
147 @functools.wraps(func)
148 def wrapper(*args, **kwargs):
149 raise ModuleNotFoundError(
150 f"Required dependencies not available. Please run: pip install {formatter(module_paths)}"
151 )
152
153 return wrapper
154 else:
155 return func
156
157 return decorator
158
159
160 def requires(module_paths: Union[str, List]):
161 return _requires(module_paths, _module_available, lambda module_paths: " ".join(module_paths))
162
163
164 def requires_extras(extras: Union[str, List]):
165 return _requires(
166 extras, lambda extras: _EXTRAS_AVAILABLE[extras], lambda extras: f"'lightning-flash[{','.join(extras)}]'"
167 )
168
169
170 def lazy_import(module_name, callback=None):
171 """Returns a proxy module object that will lazily import the given module
172 the first time it is used.
173
174 Example usage::
175
176 # Lazy version of `import tensorflow as tf`
177 tf = lazy_import("tensorflow")
178
179 # Other commands
180
181 # Now the module is loaded
182 tf.__version__
183
184 Args:
185 module_name: the fully-qualified module name to import
186 callback (None): a callback function to call before importing the
187 module
188
189 Returns:
190 a proxy module object that will be lazily imported when first used
191 """
192 return LazyModule(module_name, callback=callback)
193
194
195 class LazyModule(types.ModuleType):
196 """Proxy module that lazily imports the underlying module the first time it
197 is actually used.
198
199 Args:
200 module_name: the fully-qualified module name to import
201 callback (None): a callback function to call before importing the
202 module
203 """
204
205 def __init__(self, module_name, callback=None):
206 super().__init__(module_name)
207 self._module = None
208 self._callback = callback
209
210 def __getattr__(self, item):
211 if self._module is None:
212 self._import_module()
213
214 return getattr(self._module, item)
215
216 def __dir__(self):
217 if self._module is None:
218 self._import_module()
219
220 return dir(self._module)
221
222 def _import_module(self):
223 # Execute callback, if any
224 if self._callback is not None:
225 self._callback()
226
227 # Actually import the module
228 module = importlib.import_module(self.__name__)
229 self._module = module
230
231 # Update this object's dict so that attribute references are efficient
232 # (__getattr__ is only called on lookups that fail)
233 self.__dict__.update(module.__dict__)
234
[end of flash/core/utilities/imports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flash/core/utilities/imports.py b/flash/core/utilities/imports.py
--- a/flash/core/utilities/imports.py
+++ b/flash/core/utilities/imports.py
@@ -43,6 +43,9 @@
except ModuleNotFoundError:
# Python 3.7+
return False
+ except ValueError:
+ # Sometimes __spec__ can be None and gives a ValueError
+ return True
def _compare_version(package: str, op, version) -> bool:
@@ -59,7 +62,7 @@
try:
pkg_version = Version(pkg.__version__)
except TypeError:
- # this is mock by sphinx, so it shall return True ro generate all summaries
+ # this is mock by sphinx, so it shall return True to generate all summaries
return True
return op(pkg_version, Version(version))
| {"golden_diff": "diff --git a/flash/core/utilities/imports.py b/flash/core/utilities/imports.py\n--- a/flash/core/utilities/imports.py\n+++ b/flash/core/utilities/imports.py\n@@ -43,6 +43,9 @@\n except ModuleNotFoundError:\n # Python 3.7+\n return False\n+ except ValueError:\n+ # Sometimes __spec__ can be None and gives a ValueError\n+ return True\n \n \n def _compare_version(package: str, op, version) -> bool:\n@@ -59,7 +62,7 @@\n try:\n pkg_version = Version(pkg.__version__)\n except TypeError:\n- # this is mock by sphinx, so it shall return True ro generate all summaries\n+ # this is mock by sphinx, so it shall return True to generate all summaries\n return True\n return op(pkg_version, Version(version))\n", "issue": "Not able to import flash as transformer.__spec__ is returning None\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nI am trying to complete a summarization project and decided to use flash for it but am getting import error as `transformers.__spec__` is returning None. I have opened huggingface/transformers#12904 issue on transformers page meanwhile, would like to know if there's a possible workaround to it.\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n```\r\nimport transformers\r\nprint(transformers.__version__)\r\nprint(transformers.__spec__)\r\n\r\nimport flash\r\n```\r\n\r\n**OUTPUT**:\r\n```\r\n4.8.2\r\nNone\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-1-d02f1c770adc> in <module>\r\n 3 print(transformers.__spec__)\r\n 4 \r\n----> 5 import flash\r\n\r\n/opt/conda/lib/python3.7/site-packages/flash/__init__.py in <module>\r\n 16 \r\n 17 from flash.__about__ import * # noqa: F401 F403\r\n---> 18 from flash.core.utilities.imports import _TORCH_AVAILABLE\r\n 19 \r\n 20 if _TORCH_AVAILABLE:\r\n\r\n/opt/conda/lib/python3.7/site-packages/flash/core/utilities/imports.py in <module>\r\n 75 _PYTORCHVIDEO_AVAILABLE = _module_available(\"pytorchvideo\")\r\n 76 _MATPLOTLIB_AVAILABLE = _module_available(\"matplotlib\")\r\n---> 77 _TRANSFORMERS_AVAILABLE = _module_available(\"transformers\")\r\n 78 _PYSTICHE_AVAILABLE = _module_available(\"pystiche\")\r\n 79 _FIFTYONE_AVAILABLE = _module_available(\"fiftyone\")\r\n\r\n/opt/conda/lib/python3.7/site-packages/flash/core/utilities/imports.py in _module_available(module_path)\r\n 36 \"\"\"\r\n 37 try:\r\n---> 38 return find_spec(module_path) is not None\r\n 39 except AttributeError:\r\n 40 # Python 3.6\r\n\r\n/opt/conda/lib/python3.7/importlib/util.py in find_spec(name, package)\r\n 112 else:\r\n 113 if spec is None:\r\n--> 114 raise ValueError('{}.__spec__ is None'.format(name))\r\n 115 return spec\r\n 116 \r\n\r\nValueError: transformers.__spec__ is None\r\n```\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nAble to import flash\r\n\r\n### Environment\r\n\r\n - PyTorch Version (e.g., 1.0): 3.7.0/3.8.1\r\n - OS (e.g., Linux):\r\n - Platform: Colab/Kaggle\r\n - How you installed PyTorch (`conda`, `pip`, source): \r\n - Build command you used (if compiling from source):\r\n - Python version:\r\n - CUDA/cuDNN version:\r\n - GPU models and configuration:\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport functools\nimport importlib\nimport operator\nimport types\nfrom importlib.util import find_spec\nfrom typing import Callable, List, Union\n\nfrom pkg_resources import DistributionNotFound\n\ntry:\n from packaging.version import Version\nexcept (ModuleNotFoundError, DistributionNotFound):\n Version = None\n\n\ndef _module_available(module_path: str) -> bool:\n \"\"\"\n Check if a path is available in your environment\n\n >>> _module_available('os')\n True\n >>> _module_available('bla.bla')\n False\n \"\"\"\n try:\n return find_spec(module_path) is not None\n except AttributeError:\n # Python 3.6\n return False\n except ModuleNotFoundError:\n # Python 3.7+\n return False\n\n\ndef _compare_version(package: str, op, version) -> bool:\n \"\"\"\n Compare package version with some requirements\n\n >>> _compare_version(\"torch\", operator.ge, \"0.1\")\n True\n \"\"\"\n try:\n pkg = importlib.import_module(package)\n except (ModuleNotFoundError, DistributionNotFound, ValueError):\n return False\n try:\n pkg_version = Version(pkg.__version__)\n except TypeError:\n # this is mock by sphinx, so it shall return True ro generate all summaries\n return True\n return op(pkg_version, Version(version))\n\n\n_TORCH_AVAILABLE = _module_available(\"torch\")\n_BOLTS_AVAILABLE = _module_available(\"pl_bolts\") and _compare_version(\"torch\", operator.lt, \"1.9.0\")\n_PANDAS_AVAILABLE = _module_available(\"pandas\")\n_SKLEARN_AVAILABLE = _module_available(\"sklearn\")\n_TABNET_AVAILABLE = _module_available(\"pytorch_tabnet\")\n_KORNIA_AVAILABLE = _module_available(\"kornia\")\n_COCO_AVAILABLE = _module_available(\"pycocotools\")\n_TIMM_AVAILABLE = _module_available(\"timm\")\n_TORCHVISION_AVAILABLE = _module_available(\"torchvision\")\n_PYTORCHVIDEO_AVAILABLE = _module_available(\"pytorchvideo\")\n_MATPLOTLIB_AVAILABLE = _module_available(\"matplotlib\")\n_TRANSFORMERS_AVAILABLE = _module_available(\"transformers\")\n_PYSTICHE_AVAILABLE = _module_available(\"pystiche\")\n_FIFTYONE_AVAILABLE = _module_available(\"fiftyone\")\n_FASTAPI_AVAILABLE = _module_available(\"fastapi\")\n_PYDANTIC_AVAILABLE = _module_available(\"pydantic\")\n_GRAPHVIZ_AVAILABLE = _module_available(\"graphviz\")\n_CYTOOLZ_AVAILABLE = _module_available(\"cytoolz\")\n_UVICORN_AVAILABLE = _module_available(\"uvicorn\")\n_PIL_AVAILABLE = _module_available(\"PIL\")\n_OPEN3D_AVAILABLE = _module_available(\"open3d\")\n_ASTEROID_AVAILABLE = _module_available(\"asteroid\")\n_SEGMENTATION_MODELS_AVAILABLE = _module_available(\"segmentation_models_pytorch\")\n_SOUNDFILE_AVAILABLE = _module_available(\"soundfile\")\n_TORCH_SCATTER_AVAILABLE = _module_available(\"torch_scatter\")\n_TORCH_SPARSE_AVAILABLE = _module_available(\"torch_sparse\")\n_TORCH_GEOMETRIC_AVAILABLE = _module_available(\"torch_geometric\")\n_TORCHAUDIO_AVAILABLE = _module_available(\"torchaudio\")\n_ROUGE_SCORE_AVAILABLE = _module_available(\"rouge_score\")\n_SENTENCEPIECE_AVAILABLE = _module_available(\"sentencepiece\")\n_DATASETS_AVAILABLE = _module_available(\"datasets\")\n\nif Version:\n _TORCHVISION_GREATER_EQUAL_0_9 = _compare_version(\"torchvision\", operator.ge, \"0.9.0\")\n\n_TEXT_AVAILABLE = all([\n _TRANSFORMERS_AVAILABLE,\n _ROUGE_SCORE_AVAILABLE,\n _SENTENCEPIECE_AVAILABLE,\n _DATASETS_AVAILABLE,\n])\n_TABULAR_AVAILABLE = _TABNET_AVAILABLE and _PANDAS_AVAILABLE\n_VIDEO_AVAILABLE = _PYTORCHVIDEO_AVAILABLE\n_IMAGE_AVAILABLE = all([\n _TORCHVISION_AVAILABLE,\n _TIMM_AVAILABLE,\n _PIL_AVAILABLE,\n _KORNIA_AVAILABLE,\n _PYSTICHE_AVAILABLE,\n _SEGMENTATION_MODELS_AVAILABLE,\n])\n_SERVE_AVAILABLE = _FASTAPI_AVAILABLE and _PYDANTIC_AVAILABLE and _CYTOOLZ_AVAILABLE and _UVICORN_AVAILABLE\n_POINTCLOUD_AVAILABLE = _OPEN3D_AVAILABLE and _TORCHVISION_AVAILABLE\n_AUDIO_AVAILABLE = all([_ASTEROID_AVAILABLE, _TORCHAUDIO_AVAILABLE, _SOUNDFILE_AVAILABLE, _TRANSFORMERS_AVAILABLE])\n_GRAPH_AVAILABLE = _TORCH_SCATTER_AVAILABLE and _TORCH_SPARSE_AVAILABLE and _TORCH_GEOMETRIC_AVAILABLE\n\n_EXTRAS_AVAILABLE = {\n 'image': _IMAGE_AVAILABLE,\n 'tabular': _TABULAR_AVAILABLE,\n 'text': _TEXT_AVAILABLE,\n 'video': _VIDEO_AVAILABLE,\n 'pointcloud': _POINTCLOUD_AVAILABLE,\n 'serve': _SERVE_AVAILABLE,\n 'audio': _AUDIO_AVAILABLE,\n 'graph': _GRAPH_AVAILABLE,\n}\n\n\ndef _requires(\n module_paths: Union[str, List],\n module_available: Callable[[str], bool],\n formatter: Callable[[List[str]], str],\n):\n\n if not isinstance(module_paths, list):\n module_paths = [module_paths]\n\n def decorator(func):\n if not all(module_available(module_path) for module_path in module_paths):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n raise ModuleNotFoundError(\n f\"Required dependencies not available. Please run: pip install {formatter(module_paths)}\"\n )\n\n return wrapper\n else:\n return func\n\n return decorator\n\n\ndef requires(module_paths: Union[str, List]):\n return _requires(module_paths, _module_available, lambda module_paths: \" \".join(module_paths))\n\n\ndef requires_extras(extras: Union[str, List]):\n return _requires(\n extras, lambda extras: _EXTRAS_AVAILABLE[extras], lambda extras: f\"'lightning-flash[{','.join(extras)}]'\"\n )\n\n\ndef lazy_import(module_name, callback=None):\n \"\"\"Returns a proxy module object that will lazily import the given module\n the first time it is used.\n\n Example usage::\n\n # Lazy version of `import tensorflow as tf`\n tf = lazy_import(\"tensorflow\")\n\n # Other commands\n\n # Now the module is loaded\n tf.__version__\n\n Args:\n module_name: the fully-qualified module name to import\n callback (None): a callback function to call before importing the\n module\n\n Returns:\n a proxy module object that will be lazily imported when first used\n \"\"\"\n return LazyModule(module_name, callback=callback)\n\n\nclass LazyModule(types.ModuleType):\n \"\"\"Proxy module that lazily imports the underlying module the first time it\n is actually used.\n\n Args:\n module_name: the fully-qualified module name to import\n callback (None): a callback function to call before importing the\n module\n \"\"\"\n\n def __init__(self, module_name, callback=None):\n super().__init__(module_name)\n self._module = None\n self._callback = callback\n\n def __getattr__(self, item):\n if self._module is None:\n self._import_module()\n\n return getattr(self._module, item)\n\n def __dir__(self):\n if self._module is None:\n self._import_module()\n\n return dir(self._module)\n\n def _import_module(self):\n # Execute callback, if any\n if self._callback is not None:\n self._callback()\n\n # Actually import the module\n module = importlib.import_module(self.__name__)\n self._module = module\n\n # Update this object's dict so that attribute references are efficient\n # (__getattr__ is only called on lookups that fail)\n self.__dict__.update(module.__dict__)\n", "path": "flash/core/utilities/imports.py"}]} | 3,678 | 195 |
gh_patches_debug_17419 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1386 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Avoid "impossible" ZeroDivisionError we keep seeing
https://github.com/HypothesisWorks/hypothesis/blob/592bc7cd2c642bf273113083457444b4823021c9/hypothesis-python/src/hypothesis/statistics.py#L88-L95
This snippet occasionally leads to problems, when `total_drawtime` is nonzero but `total_runtime` *is* zero, causing test failures [like this one](https://ci.appveyor.com/project/DRMacIver/hypothesis/build/1.0.458/job/hl2d74810901ttf5#L926). I'm not sure why this happens - it's been observed on Python 2 (without) and Python 3 (with) a monotonic clock, so there might be multiple causes - but the solution is pretty simple. All you need to do to fix this is to change the first conditional to
```python
if total_drawtime <= 0.0 or total_runtime <= 0.0:
```
Then write up a short changelog (see guides/documentation.rst), add your name to the contributors list (in - and read! - contributing.rst), and open a pull request! If you have any questions, just let me know and I'd be happy to help out.
</issue>
<code>
[start of hypothesis-python/src/hypothesis/statistics.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import math
21
22 from hypothesis.utils.dynamicvariables import DynamicVariable
23 from hypothesis.internal.conjecture.data import Status
24 from hypothesis.internal.conjecture.engine import MAX_SHRINKS, ExitReason
25
26 collector = DynamicVariable(None)
27
28
29 class Statistics(object):
30
31 def __init__(self, engine):
32 self.passing_examples = len(
33 engine.status_runtimes.get(Status.VALID, ()))
34 self.invalid_examples = len(
35 engine.status_runtimes.get(Status.INVALID, []) +
36 engine.status_runtimes.get(Status.OVERRUN, [])
37 )
38 self.failing_examples = len(engine.status_runtimes.get(
39 Status.INTERESTING, ()))
40
41 runtimes = sorted(
42 engine.status_runtimes.get(Status.VALID, []) +
43 engine.status_runtimes.get(Status.INVALID, []) +
44 engine.status_runtimes.get(Status.INTERESTING, [])
45 )
46
47 self.has_runs = bool(runtimes)
48 if not self.has_runs:
49 return
50
51 n = max(0, len(runtimes) - 1)
52 lower = int(runtimes[int(math.floor(n * 0.05))] * 1000)
53 upper = int(runtimes[int(math.ceil(n * 0.95))] * 1000)
54 if upper == 0:
55 self.runtimes = '< 1ms'
56 elif lower == upper:
57 self.runtimes = '~ %dms' % (lower,)
58 else:
59 self.runtimes = '%d-%d ms' % (lower, upper)
60
61 if engine.exit_reason == ExitReason.finished:
62 self.exit_reason = 'nothing left to do'
63 elif engine.exit_reason == ExitReason.flaky:
64 self.exit_reason = 'test was flaky'
65 elif engine.exit_reason == ExitReason.max_shrinks:
66 self.exit_reason = 'shrunk example %s times' % (MAX_SHRINKS,)
67 elif engine.exit_reason == ExitReason.max_iterations:
68 self.exit_reason = ((
69 'settings.max_examples={}, but < 10% of examples satisfied '
70 'assumptions').format(engine.settings.max_examples)
71 )
72 else:
73 self.exit_reason = (
74 'settings.%s=%r' % (
75 engine.exit_reason.name,
76 getattr(engine.settings, engine.exit_reason.name)
77 )
78 )
79
80 self.events = [
81 '%.2f%%, %s' % (
82 c / engine.call_count * 100, e
83 ) for e, c in sorted(
84 engine.event_call_counts.items(), key=lambda x: -x[1])
85 ]
86
87 total_runtime = math.fsum(engine.all_runtimes)
88 total_drawtime = math.fsum(engine.all_drawtimes)
89
90 if total_drawtime == 0.0:
91 self.draw_time_percentage = '~ 0%'
92 else:
93 draw_time_percentage = 100.0 * min(
94 1, total_drawtime / total_runtime)
95
96 self.draw_time_percentage = '~ %d%%' % (
97 round(draw_time_percentage),)
98
99
100 def note_engine_for_statistics(engine):
101 callback = collector.value
102 if callback is not None:
103 callback(Statistics(engine))
104
[end of hypothesis-python/src/hypothesis/statistics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/src/hypothesis/statistics.py b/hypothesis-python/src/hypothesis/statistics.py
--- a/hypothesis-python/src/hypothesis/statistics.py
+++ b/hypothesis-python/src/hypothesis/statistics.py
@@ -87,8 +87,15 @@
total_runtime = math.fsum(engine.all_runtimes)
total_drawtime = math.fsum(engine.all_drawtimes)
- if total_drawtime == 0.0:
+ if total_drawtime == 0.0 and total_runtime >= 0.0:
self.draw_time_percentage = '~ 0%'
+ elif total_drawtime < 0.0 or total_runtime <= 0.0:
+ # This weird condition is possible in two ways:
+ # 1. drawtime and/or runtime are negative, due to clock changes
+ # on Python 2 or old OSs (we use monotonic() where available)
+ # 2. floating-point issues *very rarely* cause math.fsum to be
+ # off by the lowest bit, so drawtime==0 and runtime!=0, eek!
+ self.draw_time_percentage = 'NaN'
else:
draw_time_percentage = 100.0 * min(
1, total_drawtime / total_runtime)
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/statistics.py b/hypothesis-python/src/hypothesis/statistics.py\n--- a/hypothesis-python/src/hypothesis/statistics.py\n+++ b/hypothesis-python/src/hypothesis/statistics.py\n@@ -87,8 +87,15 @@\n total_runtime = math.fsum(engine.all_runtimes)\n total_drawtime = math.fsum(engine.all_drawtimes)\n \n- if total_drawtime == 0.0:\n+ if total_drawtime == 0.0 and total_runtime >= 0.0:\n self.draw_time_percentage = '~ 0%'\n+ elif total_drawtime < 0.0 or total_runtime <= 0.0:\n+ # This weird condition is possible in two ways:\n+ # 1. drawtime and/or runtime are negative, due to clock changes\n+ # on Python 2 or old OSs (we use monotonic() where available)\n+ # 2. floating-point issues *very rarely* cause math.fsum to be\n+ # off by the lowest bit, so drawtime==0 and runtime!=0, eek!\n+ self.draw_time_percentage = 'NaN'\n else:\n draw_time_percentage = 100.0 * min(\n 1, total_drawtime / total_runtime)\n", "issue": "Avoid \"impossible\" ZeroDivisionError we keep seeing\nhttps://github.com/HypothesisWorks/hypothesis/blob/592bc7cd2c642bf273113083457444b4823021c9/hypothesis-python/src/hypothesis/statistics.py#L88-L95\r\n\r\nThis snippet occasionally leads to problems, when `total_drawtime` is nonzero but `total_runtime` *is* zero, causing test failures [like this one](https://ci.appveyor.com/project/DRMacIver/hypothesis/build/1.0.458/job/hl2d74810901ttf5#L926). I'm not sure why this happens - it's been observed on Python 2 (without) and Python 3 (with) a monotonic clock, so there might be multiple causes - but the solution is pretty simple. All you need to do to fix this is to change the first conditional to\r\n\r\n```python\r\nif total_drawtime <= 0.0 or total_runtime <= 0.0:\r\n```\r\n\r\nThen write up a short changelog (see guides/documentation.rst), add your name to the contributors list (in - and read! - contributing.rst), and open a pull request! If you have any questions, just let me know and I'd be happy to help out.\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport math\n\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\nfrom hypothesis.internal.conjecture.data import Status\nfrom hypothesis.internal.conjecture.engine import MAX_SHRINKS, ExitReason\n\ncollector = DynamicVariable(None)\n\n\nclass Statistics(object):\n\n def __init__(self, engine):\n self.passing_examples = len(\n engine.status_runtimes.get(Status.VALID, ()))\n self.invalid_examples = len(\n engine.status_runtimes.get(Status.INVALID, []) +\n engine.status_runtimes.get(Status.OVERRUN, [])\n )\n self.failing_examples = len(engine.status_runtimes.get(\n Status.INTERESTING, ()))\n\n runtimes = sorted(\n engine.status_runtimes.get(Status.VALID, []) +\n engine.status_runtimes.get(Status.INVALID, []) +\n engine.status_runtimes.get(Status.INTERESTING, [])\n )\n\n self.has_runs = bool(runtimes)\n if not self.has_runs:\n return\n\n n = max(0, len(runtimes) - 1)\n lower = int(runtimes[int(math.floor(n * 0.05))] * 1000)\n upper = int(runtimes[int(math.ceil(n * 0.95))] * 1000)\n if upper == 0:\n self.runtimes = '< 1ms'\n elif lower == upper:\n self.runtimes = '~ %dms' % (lower,)\n else:\n self.runtimes = '%d-%d ms' % (lower, upper)\n\n if engine.exit_reason == ExitReason.finished:\n self.exit_reason = 'nothing left to do'\n elif engine.exit_reason == ExitReason.flaky:\n self.exit_reason = 'test was flaky'\n elif engine.exit_reason == ExitReason.max_shrinks:\n self.exit_reason = 'shrunk example %s times' % (MAX_SHRINKS,)\n elif engine.exit_reason == ExitReason.max_iterations:\n self.exit_reason = ((\n 'settings.max_examples={}, but < 10% of examples satisfied '\n 'assumptions').format(engine.settings.max_examples)\n )\n else:\n self.exit_reason = (\n 'settings.%s=%r' % (\n engine.exit_reason.name,\n getattr(engine.settings, engine.exit_reason.name)\n )\n )\n\n self.events = [\n '%.2f%%, %s' % (\n c / engine.call_count * 100, e\n ) for e, c in sorted(\n engine.event_call_counts.items(), key=lambda x: -x[1])\n ]\n\n total_runtime = math.fsum(engine.all_runtimes)\n total_drawtime = math.fsum(engine.all_drawtimes)\n\n if total_drawtime == 0.0:\n self.draw_time_percentage = '~ 0%'\n else:\n draw_time_percentage = 100.0 * min(\n 1, total_drawtime / total_runtime)\n\n self.draw_time_percentage = '~ %d%%' % (\n round(draw_time_percentage),)\n\n\ndef note_engine_for_statistics(engine):\n callback = collector.value\n if callback is not None:\n callback(Statistics(engine))\n", "path": "hypothesis-python/src/hypothesis/statistics.py"}]} | 1,931 | 293 |
gh_patches_debug_1771 | rasdani/github-patches | git_diff | matrix-org__synapse-11690 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add a capabilties flag for MSC3440
Per https://github.com/matrix-org/matrix-doc/commit/fc81bbd836955876c931c95277249981b3e2778c
</issue>
<code>
[start of synapse/rest/client/capabilities.py]
1 # Copyright 2019 New Vector
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15 from typing import TYPE_CHECKING, Tuple
16
17 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES
18 from synapse.http.server import HttpServer
19 from synapse.http.servlet import RestServlet
20 from synapse.http.site import SynapseRequest
21 from synapse.types import JsonDict
22
23 from ._base import client_patterns
24
25 if TYPE_CHECKING:
26 from synapse.server import HomeServer
27
28 logger = logging.getLogger(__name__)
29
30
31 class CapabilitiesRestServlet(RestServlet):
32 """End point to expose the capabilities of the server."""
33
34 PATTERNS = client_patterns("/capabilities$")
35
36 def __init__(self, hs: "HomeServer"):
37 super().__init__()
38 self.hs = hs
39 self.config = hs.config
40 self.auth = hs.get_auth()
41 self.auth_handler = hs.get_auth_handler()
42
43 async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
44 await self.auth.get_user_by_req(request, allow_guest=True)
45 change_password = self.auth_handler.can_change_password()
46
47 response: JsonDict = {
48 "capabilities": {
49 "m.room_versions": {
50 "default": self.config.server.default_room_version.identifier,
51 "available": {
52 v.identifier: v.disposition
53 for v in KNOWN_ROOM_VERSIONS.values()
54 },
55 },
56 "m.change_password": {"enabled": change_password},
57 }
58 }
59
60 if self.config.experimental.msc3244_enabled:
61 response["capabilities"]["m.room_versions"][
62 "org.matrix.msc3244.room_capabilities"
63 ] = MSC3244_CAPABILITIES
64
65 if self.config.experimental.msc3283_enabled:
66 response["capabilities"]["org.matrix.msc3283.set_displayname"] = {
67 "enabled": self.config.registration.enable_set_displayname
68 }
69 response["capabilities"]["org.matrix.msc3283.set_avatar_url"] = {
70 "enabled": self.config.registration.enable_set_avatar_url
71 }
72 response["capabilities"]["org.matrix.msc3283.3pid_changes"] = {
73 "enabled": self.config.registration.enable_3pid_changes
74 }
75
76 return 200, response
77
78
79 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
80 CapabilitiesRestServlet(hs).register(http_server)
81
[end of synapse/rest/client/capabilities.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py
--- a/synapse/rest/client/capabilities.py
+++ b/synapse/rest/client/capabilities.py
@@ -73,6 +73,9 @@
"enabled": self.config.registration.enable_3pid_changes
}
+ if self.config.experimental.msc3440_enabled:
+ response["capabilities"]["io.element.thread"] = {"enabled": True}
+
return 200, response
| {"golden_diff": "diff --git a/synapse/rest/client/capabilities.py b/synapse/rest/client/capabilities.py\n--- a/synapse/rest/client/capabilities.py\n+++ b/synapse/rest/client/capabilities.py\n@@ -73,6 +73,9 @@\n \"enabled\": self.config.registration.enable_3pid_changes\n }\n \n+ if self.config.experimental.msc3440_enabled:\n+ response[\"capabilities\"][\"io.element.thread\"] = {\"enabled\": True}\n+\n return 200, response\n", "issue": "Add a capabilties flag for MSC3440\nPer https://github.com/matrix-org/matrix-doc/commit/fc81bbd836955876c931c95277249981b3e2778c\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 New Vector\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom synapse.api.room_versions import KNOWN_ROOM_VERSIONS, MSC3244_CAPABILITIES\nfrom synapse.http.server import HttpServer\nfrom synapse.http.servlet import RestServlet\nfrom synapse.http.site import SynapseRequest\nfrom synapse.types import JsonDict\n\nfrom ._base import client_patterns\n\nif TYPE_CHECKING:\n from synapse.server import HomeServer\n\nlogger = logging.getLogger(__name__)\n\n\nclass CapabilitiesRestServlet(RestServlet):\n \"\"\"End point to expose the capabilities of the server.\"\"\"\n\n PATTERNS = client_patterns(\"/capabilities$\")\n\n def __init__(self, hs: \"HomeServer\"):\n super().__init__()\n self.hs = hs\n self.config = hs.config\n self.auth = hs.get_auth()\n self.auth_handler = hs.get_auth_handler()\n\n async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:\n await self.auth.get_user_by_req(request, allow_guest=True)\n change_password = self.auth_handler.can_change_password()\n\n response: JsonDict = {\n \"capabilities\": {\n \"m.room_versions\": {\n \"default\": self.config.server.default_room_version.identifier,\n \"available\": {\n v.identifier: v.disposition\n for v in KNOWN_ROOM_VERSIONS.values()\n },\n },\n \"m.change_password\": {\"enabled\": change_password},\n }\n }\n\n if self.config.experimental.msc3244_enabled:\n response[\"capabilities\"][\"m.room_versions\"][\n \"org.matrix.msc3244.room_capabilities\"\n ] = MSC3244_CAPABILITIES\n\n if self.config.experimental.msc3283_enabled:\n response[\"capabilities\"][\"org.matrix.msc3283.set_displayname\"] = {\n \"enabled\": self.config.registration.enable_set_displayname\n }\n response[\"capabilities\"][\"org.matrix.msc3283.set_avatar_url\"] = {\n \"enabled\": self.config.registration.enable_set_avatar_url\n }\n response[\"capabilities\"][\"org.matrix.msc3283.3pid_changes\"] = {\n \"enabled\": self.config.registration.enable_3pid_changes\n }\n\n return 200, response\n\n\ndef register_servlets(hs: \"HomeServer\", http_server: HttpServer) -> None:\n CapabilitiesRestServlet(hs).register(http_server)\n", "path": "synapse/rest/client/capabilities.py"}]} | 1,410 | 116 |
gh_patches_debug_15597 | rasdani/github-patches | git_diff | qtile__qtile-3073 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pomodoro widget duplicates the prefix and suffix texts of the fmt key
```
widget.Pomodoro(
fmt='prefix{}suffix',
),
widget.TextBox(
fmt='prefix{}suffix',
text='TEXTBOX'
),
```
### 1. Pomodoro widget
_expected to display:_
> **prefix**POMODORO**suffix**
_actual:_
> **prefixprefix**POMODORO**suffixsuffix**
### 2. TextBox widget (for example)
_expected to dispay = actual:_
>**prefix**TEXTBOX**suffix**
## Description:
- If there is prefix or suffix in **fmt** key at Pomodoro widget, It doubles the text of prefix or suffix
`qtile -v: 0.18.1`
`logs: nothing relevant`
- [x] I have searched past issues to see if this bug has already been reported. *
</issue>
<code>
[start of libqtile/widget/pomodoro.py]
1 # Copyright (c) 2017 Zordsdavini
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 from datetime import datetime, timedelta
22 from time import time
23
24 from libqtile.utils import send_notification
25 from libqtile.widget import base
26
27
28 class Pomodoro(base.ThreadPoolText):
29 """Pomodoro technique widget"""
30 orientations = base.ORIENTATION_HORIZONTAL
31 defaults = [
32 ("fmt", "{}", "fmt"),
33 ("num_pomodori", 4, "Number of pomodori to do in a cycle"),
34 ("length_pomodori", 25, "Length of one pomodori in minutes"),
35 ("length_short_break", 5, "Length of a short break in minutes"),
36 ("length_long_break", 15, "Length of a long break in minutes"),
37 ("color_inactive", "ff0000", "Colour then pomodoro is inactive"),
38 ("color_active", "00ff00", "Colour then pomodoro is running"),
39 ("color_break", "ffff00", "Colour then it is break time"),
40 ("notification_on", True, "Turn notifications on"),
41 ("prefix_inactive", "POMODORO", "Prefix when app is inactive"),
42 ("prefix_active", "", "Prefix then app is active"),
43 ("prefix_break", "B ", "Prefix during short break"),
44 ("prefix_long_break", "LB ", "Prefix during long break"),
45 ("prefix_paused", "PAUSE", "Prefix during pause"),
46 ("update_interval", 1, "Update interval in seconds, if none, the "
47 "widget updates whenever the event loop is idle."),
48 ]
49
50 STATUS_START = "start"
51 STATUS_INACTIVE = "inactive"
52 STATUS_ACTIVE = "active"
53 STATUS_BREAK = "break"
54 STATUS_LONG_BREAK = "long_break"
55 STATUS_PAUSED = "paused"
56
57 status = "inactive"
58 paused_status = None
59 end_time = datetime.now()
60 time_left = None
61 pomodoros = 1
62
63 def __init__(self, **config):
64 base.ThreadPoolText.__init__(self, "", **config)
65 self.add_defaults(Pomodoro.defaults)
66 self.prefix = {
67 'inactive': self.prefix_inactive,
68 'active': self.prefix_active,
69 'break': self.prefix_break,
70 'long_break': self.prefix_long_break,
71 'paused': self.prefix_paused
72 }
73
74 self.add_callbacks({
75 'Button1': self._toggle_break,
76 'Button3': self._toggle_active,
77 })
78
79 def tick(self):
80 self.update(self.poll())
81 return self.update_interval - time() % self.update_interval
82
83 def _update(self):
84 if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:
85 return
86
87 if self.end_time > datetime.now() and self.status != self.STATUS_START:
88 return
89
90 if self.status == self.STATUS_ACTIVE and self.pomodoros == self.num_pomodori:
91 self.status = self.STATUS_LONG_BREAK
92 self.end_time = datetime.now() + timedelta(minutes=self.length_long_break)
93 self.pomodoros = 1
94 if self.notification_on:
95 self._send_notification(
96 "normal", "Please take a long break! End Time: " + self.end_time.strftime("%H:%M")
97 )
98 return
99
100 if self.status == self.STATUS_ACTIVE:
101 self.status = self.STATUS_BREAK
102 self.end_time = datetime.now() + timedelta(minutes=self.length_short_break)
103 self.pomodoros += 1
104 if self.notification_on:
105 self._send_notification(
106 "normal", "Please take a short break! End Time: " + self.end_time.strftime("%H:%M")
107 )
108 return
109
110 self.status = self.STATUS_ACTIVE
111 self.end_time = datetime.now() + timedelta(minutes=self.length_pomodori)
112 if self.notification_on:
113 self._send_notification(
114 "critical", "Please start with the next Pomodori! End Time: " + self.end_time.strftime("%H:%M")
115 )
116
117 return
118
119 def _get_text(self):
120 self._update()
121
122 if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:
123 self.layout.colour = self.color_inactive
124 return self.prefix[self.status]
125
126 time_left = self.end_time - datetime.now()
127
128 if self.status == self.STATUS_ACTIVE:
129 self.layout.colour = self.color_active
130 else:
131 self.layout.colour = self.color_break
132
133 time_string = "%i:%i:%s" % (time_left.seconds // 3600, time_left.seconds % 3600 // 60, time_left.seconds % 60)
134 return self.prefix[self.status] + time_string
135
136 def _toggle_break(self):
137 if self.status == self.STATUS_INACTIVE:
138 self.status = self.STATUS_START
139 return
140
141 if self.paused_status is None:
142 self.paused_status = self.status
143 self.time_left = self.end_time - datetime.now()
144 self.status = self.STATUS_PAUSED
145 if self.notification_on:
146 self._send_notification('low', "Pomodoro has been paused")
147 else:
148 self.status = self.paused_status
149 self.paused_status = None
150 self.end_time = self.time_left + datetime.now()
151 if self.notification_on:
152 if self.status == self.STATUS_ACTIVE:
153 status = 'Pomodoro'
154 else:
155 status = 'break'
156
157 self._send_notification(
158 "normal",
159 "Please continue on %s! End Time: " % status + self.end_time.strftime("%H:%M")
160 )
161
162 def _toggle_active(self):
163 if self.status != self.STATUS_INACTIVE:
164 self.status = self.STATUS_INACTIVE
165 if self.notification_on:
166 self._send_notification('critical', "Pomodoro has been suspended")
167 else:
168 self.status = self.STATUS_START
169
170 def _send_notification(self, urgent, message):
171 send_notification("Pomodoro", message, urgent=urgent)
172
173 def poll(self):
174 return self.fmt.format(self._get_text())
175
[end of libqtile/widget/pomodoro.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/pomodoro.py b/libqtile/widget/pomodoro.py
--- a/libqtile/widget/pomodoro.py
+++ b/libqtile/widget/pomodoro.py
@@ -29,7 +29,6 @@
"""Pomodoro technique widget"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
- ("fmt", "{}", "fmt"),
("num_pomodori", 4, "Number of pomodori to do in a cycle"),
("length_pomodori", 25, "Length of one pomodori in minutes"),
("length_short_break", 5, "Length of a short break in minutes"),
@@ -171,4 +170,4 @@
send_notification("Pomodoro", message, urgent=urgent)
def poll(self):
- return self.fmt.format(self._get_text())
+ return self._get_text()
| {"golden_diff": "diff --git a/libqtile/widget/pomodoro.py b/libqtile/widget/pomodoro.py\n--- a/libqtile/widget/pomodoro.py\n+++ b/libqtile/widget/pomodoro.py\n@@ -29,7 +29,6 @@\n \"\"\"Pomodoro technique widget\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n- (\"fmt\", \"{}\", \"fmt\"),\n (\"num_pomodori\", 4, \"Number of pomodori to do in a cycle\"),\n (\"length_pomodori\", 25, \"Length of one pomodori in minutes\"),\n (\"length_short_break\", 5, \"Length of a short break in minutes\"),\n@@ -171,4 +170,4 @@\n send_notification(\"Pomodoro\", message, urgent=urgent)\n \n def poll(self):\n- return self.fmt.format(self._get_text())\n+ return self._get_text()\n", "issue": "Pomodoro widget duplicates the prefix and suffix texts of the fmt key\n```\r\nwidget.Pomodoro(\r\n fmt='prefix{}suffix',\r\n),\r\n\r\nwidget.TextBox(\r\n fmt='prefix{}suffix',\r\n text='TEXTBOX'\r\n),\r\n```\r\n\r\n### 1. Pomodoro widget\r\n\r\n\r\n_expected to display:_\r\n\r\n\r\n> **prefix**POMODORO**suffix**\r\n\r\n_actual:_\r\n\r\n> **prefixprefix**POMODORO**suffixsuffix**\r\n\r\n\r\n### 2. TextBox widget (for example)\r\n\r\n_expected to dispay = actual:_\r\n\r\n>**prefix**TEXTBOX**suffix**\r\n\r\n\r\n## Description:\r\n\r\n- If there is prefix or suffix in **fmt** key at Pomodoro widget, It doubles the text of prefix or suffix\r\n\r\n`qtile -v: 0.18.1`\r\n`logs: nothing relevant`\r\n- [x] I have searched past issues to see if this bug has already been reported. *\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2017 Zordsdavini\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom datetime import datetime, timedelta\nfrom time import time\n\nfrom libqtile.utils import send_notification\nfrom libqtile.widget import base\n\n\nclass Pomodoro(base.ThreadPoolText):\n \"\"\"Pomodoro technique widget\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"fmt\", \"{}\", \"fmt\"),\n (\"num_pomodori\", 4, \"Number of pomodori to do in a cycle\"),\n (\"length_pomodori\", 25, \"Length of one pomodori in minutes\"),\n (\"length_short_break\", 5, \"Length of a short break in minutes\"),\n (\"length_long_break\", 15, \"Length of a long break in minutes\"),\n (\"color_inactive\", \"ff0000\", \"Colour then pomodoro is inactive\"),\n (\"color_active\", \"00ff00\", \"Colour then pomodoro is running\"),\n (\"color_break\", \"ffff00\", \"Colour then it is break time\"),\n (\"notification_on\", True, \"Turn notifications on\"),\n (\"prefix_inactive\", \"POMODORO\", \"Prefix when app is inactive\"),\n (\"prefix_active\", \"\", \"Prefix then app is active\"),\n (\"prefix_break\", \"B \", \"Prefix during short break\"),\n (\"prefix_long_break\", \"LB \", \"Prefix during long break\"),\n (\"prefix_paused\", \"PAUSE\", \"Prefix during pause\"),\n (\"update_interval\", 1, \"Update interval in seconds, if none, the \"\n \"widget updates whenever the event loop is idle.\"),\n ]\n\n STATUS_START = \"start\"\n STATUS_INACTIVE = \"inactive\"\n STATUS_ACTIVE = \"active\"\n STATUS_BREAK = \"break\"\n STATUS_LONG_BREAK = \"long_break\"\n STATUS_PAUSED = \"paused\"\n\n status = \"inactive\"\n paused_status = None\n end_time = datetime.now()\n time_left = None\n pomodoros = 1\n\n def __init__(self, **config):\n base.ThreadPoolText.__init__(self, \"\", **config)\n self.add_defaults(Pomodoro.defaults)\n self.prefix = {\n 'inactive': self.prefix_inactive,\n 'active': self.prefix_active,\n 'break': self.prefix_break,\n 'long_break': self.prefix_long_break,\n 'paused': self.prefix_paused\n }\n\n self.add_callbacks({\n 'Button1': self._toggle_break,\n 'Button3': self._toggle_active,\n })\n\n def tick(self):\n self.update(self.poll())\n return self.update_interval - time() % self.update_interval\n\n def _update(self):\n if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:\n return\n\n if self.end_time > datetime.now() and self.status != self.STATUS_START:\n return\n\n if self.status == self.STATUS_ACTIVE and self.pomodoros == self.num_pomodori:\n self.status = self.STATUS_LONG_BREAK\n self.end_time = datetime.now() + timedelta(minutes=self.length_long_break)\n self.pomodoros = 1\n if self.notification_on:\n self._send_notification(\n \"normal\", \"Please take a long break! End Time: \" + self.end_time.strftime(\"%H:%M\")\n )\n return\n\n if self.status == self.STATUS_ACTIVE:\n self.status = self.STATUS_BREAK\n self.end_time = datetime.now() + timedelta(minutes=self.length_short_break)\n self.pomodoros += 1\n if self.notification_on:\n self._send_notification(\n \"normal\", \"Please take a short break! End Time: \" + self.end_time.strftime(\"%H:%M\")\n )\n return\n\n self.status = self.STATUS_ACTIVE\n self.end_time = datetime.now() + timedelta(minutes=self.length_pomodori)\n if self.notification_on:\n self._send_notification(\n \"critical\", \"Please start with the next Pomodori! End Time: \" + self.end_time.strftime(\"%H:%M\")\n )\n\n return\n\n def _get_text(self):\n self._update()\n\n if self.status in [self.STATUS_INACTIVE, self.STATUS_PAUSED]:\n self.layout.colour = self.color_inactive\n return self.prefix[self.status]\n\n time_left = self.end_time - datetime.now()\n\n if self.status == self.STATUS_ACTIVE:\n self.layout.colour = self.color_active\n else:\n self.layout.colour = self.color_break\n\n time_string = \"%i:%i:%s\" % (time_left.seconds // 3600, time_left.seconds % 3600 // 60, time_left.seconds % 60)\n return self.prefix[self.status] + time_string\n\n def _toggle_break(self):\n if self.status == self.STATUS_INACTIVE:\n self.status = self.STATUS_START\n return\n\n if self.paused_status is None:\n self.paused_status = self.status\n self.time_left = self.end_time - datetime.now()\n self.status = self.STATUS_PAUSED\n if self.notification_on:\n self._send_notification('low', \"Pomodoro has been paused\")\n else:\n self.status = self.paused_status\n self.paused_status = None\n self.end_time = self.time_left + datetime.now()\n if self.notification_on:\n if self.status == self.STATUS_ACTIVE:\n status = 'Pomodoro'\n else:\n status = 'break'\n\n self._send_notification(\n \"normal\",\n \"Please continue on %s! End Time: \" % status + self.end_time.strftime(\"%H:%M\")\n )\n\n def _toggle_active(self):\n if self.status != self.STATUS_INACTIVE:\n self.status = self.STATUS_INACTIVE\n if self.notification_on:\n self._send_notification('critical', \"Pomodoro has been suspended\")\n else:\n self.status = self.STATUS_START\n\n def _send_notification(self, urgent, message):\n send_notification(\"Pomodoro\", message, urgent=urgent)\n\n def poll(self):\n return self.fmt.format(self._get_text())\n", "path": "libqtile/widget/pomodoro.py"}]} | 2,697 | 209 |
gh_patches_debug_18010 | rasdani/github-patches | git_diff | speechbrain__speechbrain-2384 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E TypeError: save() missing 1 required positional argument: 'sample_rate'
Hello,
Getting error in using
`enhancer = SpectralMaskEnhancement.from_hparams(
source="speechbrain/mtl-mimic-voicebank",
savedir='stream_folder/',
run_opts={"device":"cuda"})
enhanced = enhancer.enhance_file("stream_folder/fr_yourname.wav", output="stream_folder/test.wav)`
test_speechbrain.py:None (test_speechbrain.py)
test_speechbrain.py:13: in <module>
enhanced = enhancer.enhance_file("stream_folder/fr_yourname.wav",
/home/tim/env/python3.8/lib/python3.8/site-packages/speechbrain/pretrained/interfaces.py:1929: in enhance_file
torchaudio.save(output_filename, enhanced, channels_first=False)
E TypeError: save() missing 1 required positional argument: 'sample_rate'
probably an update on torchaudio.save you need to handle.
Regards
</issue>
<code>
[start of speechbrain/inference/enhancement.py]
1 """ Specifies the inference interfaces for speech enhancement modules.
2
3 Authors:
4 * Aku Rouhe 2021
5 * Peter Plantinga 2021
6 * Loren Lugosch 2020
7 * Mirco Ravanelli 2020
8 * Titouan Parcollet 2021
9 * Abdel Heba 2021
10 * Andreas Nautsch 2022, 2023
11 * Pooneh Mousavi 2023
12 * Sylvain de Langen 2023
13 * Adel Moumen 2023
14 * Pradnya Kandarkar 2023
15 """
16 import torch
17 import torchaudio
18 from speechbrain.inference.interfaces import Pretrained
19 from speechbrain.utils.callchains import lengths_arg_exists
20
21
22 class SpectralMaskEnhancement(Pretrained):
23 """A ready-to-use model for speech enhancement.
24
25 Arguments
26 ---------
27 See ``Pretrained``.
28
29 Example
30 -------
31 >>> import torch
32 >>> from speechbrain.inference.enhancement import SpectralMaskEnhancement
33 >>> # Model is downloaded from the speechbrain HuggingFace repo
34 >>> tmpdir = getfixture("tmpdir")
35 >>> enhancer = SpectralMaskEnhancement.from_hparams(
36 ... source="speechbrain/metricgan-plus-voicebank",
37 ... savedir=tmpdir,
38 ... )
39 >>> enhanced = enhancer.enhance_file(
40 ... "speechbrain/metricgan-plus-voicebank/example.wav"
41 ... )
42 """
43
44 HPARAMS_NEEDED = ["compute_stft", "spectral_magnitude", "resynth"]
45 MODULES_NEEDED = ["enhance_model"]
46
47 def compute_features(self, wavs):
48 """Compute the log spectral magnitude features for masking.
49
50 Arguments
51 ---------
52 wavs : torch.Tensor
53 A batch of waveforms to convert to log spectral mags.
54 """
55 feats = self.hparams.compute_stft(wavs)
56 feats = self.hparams.spectral_magnitude(feats)
57 return torch.log1p(feats)
58
59 def enhance_batch(self, noisy, lengths=None):
60 """Enhance a batch of noisy waveforms.
61
62 Arguments
63 ---------
64 noisy : torch.Tensor
65 A batch of waveforms to perform enhancement on.
66 lengths : torch.Tensor
67 The lengths of the waveforms if the enhancement model handles them.
68
69 Returns
70 -------
71 torch.Tensor
72 A batch of enhanced waveforms of the same shape as input.
73 """
74 noisy = noisy.to(self.device)
75 noisy_features = self.compute_features(noisy)
76
77 # Perform masking-based enhancement, multiplying output with input.
78 if lengths is not None:
79 mask = self.mods.enhance_model(noisy_features, lengths=lengths)
80 else:
81 mask = self.mods.enhance_model(noisy_features)
82 enhanced = torch.mul(mask, noisy_features)
83
84 # Return resynthesized waveforms
85 return self.hparams.resynth(torch.expm1(enhanced), noisy)
86
87 def enhance_file(self, filename, output_filename=None, **kwargs):
88 """Enhance a wav file.
89
90 Arguments
91 ---------
92 filename : str
93 Location on disk to load file for enhancement.
94 output_filename : str
95 If provided, writes enhanced data to this file.
96 """
97 noisy = self.load_audio(filename, **kwargs)
98 noisy = noisy.to(self.device)
99
100 # Fake a batch:
101 batch = noisy.unsqueeze(0)
102 if lengths_arg_exists(self.enhance_batch):
103 enhanced = self.enhance_batch(batch, lengths=torch.tensor([1.0]))
104 else:
105 enhanced = self.enhance_batch(batch)
106
107 if output_filename is not None:
108 torchaudio.save(output_filename, enhanced, channels_first=False)
109
110 return enhanced.squeeze(0)
111
112
113 class WaveformEnhancement(Pretrained):
114 """A ready-to-use model for speech enhancement.
115
116 Arguments
117 ---------
118 See ``Pretrained``.
119
120 Example
121 -------
122 >>> from speechbrain.inference.enhancement import WaveformEnhancement
123 >>> # Model is downloaded from the speechbrain HuggingFace repo
124 >>> tmpdir = getfixture("tmpdir")
125 >>> enhancer = WaveformEnhancement.from_hparams(
126 ... source="speechbrain/mtl-mimic-voicebank",
127 ... savedir=tmpdir,
128 ... )
129 >>> enhanced = enhancer.enhance_file(
130 ... "speechbrain/mtl-mimic-voicebank/example.wav"
131 ... )
132 """
133
134 MODULES_NEEDED = ["enhance_model"]
135
136 def enhance_batch(self, noisy, lengths=None):
137 """Enhance a batch of noisy waveforms.
138
139 Arguments
140 ---------
141 noisy : torch.Tensor
142 A batch of waveforms to perform enhancement on.
143 lengths : torch.Tensor
144 The lengths of the waveforms if the enhancement model handles them.
145
146 Returns
147 -------
148 torch.Tensor
149 A batch of enhanced waveforms of the same shape as input.
150 """
151 noisy = noisy.to(self.device)
152 enhanced_wav, _ = self.mods.enhance_model(noisy)
153 return enhanced_wav
154
155 def enhance_file(self, filename, output_filename=None, **kwargs):
156 """Enhance a wav file.
157
158 Arguments
159 ---------
160 filename : str
161 Location on disk to load file for enhancement.
162 output_filename : str
163 If provided, writes enhanced data to this file.
164 """
165 noisy = self.load_audio(filename, **kwargs)
166
167 # Fake a batch:
168 batch = noisy.unsqueeze(0)
169 enhanced = self.enhance_batch(batch)
170
171 if output_filename is not None:
172 torchaudio.save(output_filename, enhanced, channels_first=False)
173
174 return enhanced.squeeze(0)
175
176 def forward(self, noisy, lengths=None):
177 """Runs enhancement on the noisy input"""
178 return self.enhance_batch(noisy, lengths)
179
[end of speechbrain/inference/enhancement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/speechbrain/inference/enhancement.py b/speechbrain/inference/enhancement.py
--- a/speechbrain/inference/enhancement.py
+++ b/speechbrain/inference/enhancement.py
@@ -105,7 +105,11 @@
enhanced = self.enhance_batch(batch)
if output_filename is not None:
- torchaudio.save(output_filename, enhanced, channels_first=False)
+ torchaudio.save(
+ uri=output_filename,
+ src=enhanced,
+ sample_rate=self.hparams.compute_stft.sample_rate,
+ )
return enhanced.squeeze(0)
@@ -169,7 +173,11 @@
enhanced = self.enhance_batch(batch)
if output_filename is not None:
- torchaudio.save(output_filename, enhanced, channels_first=False)
+ torchaudio.save(
+ uri=output_filename,
+ src=enhanced,
+ sample_rate=self.audio_normalizer.sample_rate,
+ )
return enhanced.squeeze(0)
| {"golden_diff": "diff --git a/speechbrain/inference/enhancement.py b/speechbrain/inference/enhancement.py\n--- a/speechbrain/inference/enhancement.py\n+++ b/speechbrain/inference/enhancement.py\n@@ -105,7 +105,11 @@\n enhanced = self.enhance_batch(batch)\n \n if output_filename is not None:\n- torchaudio.save(output_filename, enhanced, channels_first=False)\n+ torchaudio.save(\n+ uri=output_filename,\n+ src=enhanced,\n+ sample_rate=self.hparams.compute_stft.sample_rate,\n+ )\n \n return enhanced.squeeze(0)\n \n@@ -169,7 +173,11 @@\n enhanced = self.enhance_batch(batch)\n \n if output_filename is not None:\n- torchaudio.save(output_filename, enhanced, channels_first=False)\n+ torchaudio.save(\n+ uri=output_filename,\n+ src=enhanced,\n+ sample_rate=self.audio_normalizer.sample_rate,\n+ )\n \n return enhanced.squeeze(0)\n", "issue": "E TypeError: save() missing 1 required positional argument: 'sample_rate'\nHello,\r\n\r\nGetting error in using \r\n\r\n`enhancer = SpectralMaskEnhancement.from_hparams(\r\n source=\"speechbrain/mtl-mimic-voicebank\",\r\n savedir='stream_folder/',\r\n run_opts={\"device\":\"cuda\"})\r\n\r\nenhanced = enhancer.enhance_file(\"stream_folder/fr_yourname.wav\", output=\"stream_folder/test.wav)`\r\n\r\ntest_speechbrain.py:None (test_speechbrain.py)\r\ntest_speechbrain.py:13: in <module>\r\n enhanced = enhancer.enhance_file(\"stream_folder/fr_yourname.wav\",\r\n/home/tim/env/python3.8/lib/python3.8/site-packages/speechbrain/pretrained/interfaces.py:1929: in enhance_file\r\n torchaudio.save(output_filename, enhanced, channels_first=False)\r\nE TypeError: save() missing 1 required positional argument: 'sample_rate'\r\n\r\nprobably an update on torchaudio.save you need to handle.\r\n\r\nRegards\n", "before_files": [{"content": "\"\"\" Specifies the inference interfaces for speech enhancement modules.\n\nAuthors:\n * Aku Rouhe 2021\n * Peter Plantinga 2021\n * Loren Lugosch 2020\n * Mirco Ravanelli 2020\n * Titouan Parcollet 2021\n * Abdel Heba 2021\n * Andreas Nautsch 2022, 2023\n * Pooneh Mousavi 2023\n * Sylvain de Langen 2023\n * Adel Moumen 2023\n * Pradnya Kandarkar 2023\n\"\"\"\nimport torch\nimport torchaudio\nfrom speechbrain.inference.interfaces import Pretrained\nfrom speechbrain.utils.callchains import lengths_arg_exists\n\n\nclass SpectralMaskEnhancement(Pretrained):\n \"\"\"A ready-to-use model for speech enhancement.\n\n Arguments\n ---------\n See ``Pretrained``.\n\n Example\n -------\n >>> import torch\n >>> from speechbrain.inference.enhancement import SpectralMaskEnhancement\n >>> # Model is downloaded from the speechbrain HuggingFace repo\n >>> tmpdir = getfixture(\"tmpdir\")\n >>> enhancer = SpectralMaskEnhancement.from_hparams(\n ... source=\"speechbrain/metricgan-plus-voicebank\",\n ... savedir=tmpdir,\n ... )\n >>> enhanced = enhancer.enhance_file(\n ... \"speechbrain/metricgan-plus-voicebank/example.wav\"\n ... )\n \"\"\"\n\n HPARAMS_NEEDED = [\"compute_stft\", \"spectral_magnitude\", \"resynth\"]\n MODULES_NEEDED = [\"enhance_model\"]\n\n def compute_features(self, wavs):\n \"\"\"Compute the log spectral magnitude features for masking.\n\n Arguments\n ---------\n wavs : torch.Tensor\n A batch of waveforms to convert to log spectral mags.\n \"\"\"\n feats = self.hparams.compute_stft(wavs)\n feats = self.hparams.spectral_magnitude(feats)\n return torch.log1p(feats)\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms.\n\n Arguments\n ---------\n noisy : torch.Tensor\n A batch of waveforms to perform enhancement on.\n lengths : torch.Tensor\n The lengths of the waveforms if the enhancement model handles them.\n\n Returns\n -------\n torch.Tensor\n A batch of enhanced waveforms of the same shape as input.\n \"\"\"\n noisy = noisy.to(self.device)\n noisy_features = self.compute_features(noisy)\n\n # Perform masking-based enhancement, multiplying output with input.\n if lengths is not None:\n mask = self.mods.enhance_model(noisy_features, lengths=lengths)\n else:\n mask = self.mods.enhance_model(noisy_features)\n enhanced = torch.mul(mask, noisy_features)\n\n # Return resynthesized waveforms\n return self.hparams.resynth(torch.expm1(enhanced), noisy)\n\n def enhance_file(self, filename, output_filename=None, **kwargs):\n \"\"\"Enhance a wav file.\n\n Arguments\n ---------\n filename : str\n Location on disk to load file for enhancement.\n output_filename : str\n If provided, writes enhanced data to this file.\n \"\"\"\n noisy = self.load_audio(filename, **kwargs)\n noisy = noisy.to(self.device)\n\n # Fake a batch:\n batch = noisy.unsqueeze(0)\n if lengths_arg_exists(self.enhance_batch):\n enhanced = self.enhance_batch(batch, lengths=torch.tensor([1.0]))\n else:\n enhanced = self.enhance_batch(batch)\n\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n\n return enhanced.squeeze(0)\n\n\nclass WaveformEnhancement(Pretrained):\n \"\"\"A ready-to-use model for speech enhancement.\n\n Arguments\n ---------\n See ``Pretrained``.\n\n Example\n -------\n >>> from speechbrain.inference.enhancement import WaveformEnhancement\n >>> # Model is downloaded from the speechbrain HuggingFace repo\n >>> tmpdir = getfixture(\"tmpdir\")\n >>> enhancer = WaveformEnhancement.from_hparams(\n ... source=\"speechbrain/mtl-mimic-voicebank\",\n ... savedir=tmpdir,\n ... )\n >>> enhanced = enhancer.enhance_file(\n ... \"speechbrain/mtl-mimic-voicebank/example.wav\"\n ... )\n \"\"\"\n\n MODULES_NEEDED = [\"enhance_model\"]\n\n def enhance_batch(self, noisy, lengths=None):\n \"\"\"Enhance a batch of noisy waveforms.\n\n Arguments\n ---------\n noisy : torch.Tensor\n A batch of waveforms to perform enhancement on.\n lengths : torch.Tensor\n The lengths of the waveforms if the enhancement model handles them.\n\n Returns\n -------\n torch.Tensor\n A batch of enhanced waveforms of the same shape as input.\n \"\"\"\n noisy = noisy.to(self.device)\n enhanced_wav, _ = self.mods.enhance_model(noisy)\n return enhanced_wav\n\n def enhance_file(self, filename, output_filename=None, **kwargs):\n \"\"\"Enhance a wav file.\n\n Arguments\n ---------\n filename : str\n Location on disk to load file for enhancement.\n output_filename : str\n If provided, writes enhanced data to this file.\n \"\"\"\n noisy = self.load_audio(filename, **kwargs)\n\n # Fake a batch:\n batch = noisy.unsqueeze(0)\n enhanced = self.enhance_batch(batch)\n\n if output_filename is not None:\n torchaudio.save(output_filename, enhanced, channels_first=False)\n\n return enhanced.squeeze(0)\n\n def forward(self, noisy, lengths=None):\n \"\"\"Runs enhancement on the noisy input\"\"\"\n return self.enhance_batch(noisy, lengths)\n", "path": "speechbrain/inference/enhancement.py"}]} | 2,490 | 233 |
gh_patches_debug_12359 | rasdani/github-patches | git_diff | nautobot__nautobot-5491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The command 'nautobot-server celery result task_id' says "Cannot resolve keyword 'task_id' into field"
### Environment
* Nautobot version : v2.1.4
* Python version: 3.10.12
* Database platform, version: psql (PostgreSQL) 14.11 (Ubuntu 14.11-0ubuntu0.22.04.1)
* Middleware(s): NA
### Steps to Reproduce
1. open terminal as nautobot user
2. enter command :- 'nautobot-server celery result 505........89f' where the last argument would be task_id
### Expected Behavior
I expect to see the result of the task whose task_id is mentioned.
### Observed Behavior
Instead the following exception is returned
```
Traceback (most recent call last):
File "/opt/nautobot/bin/nautobot-server", line 8, in <module>
sys.exit(main())
File "/opt/nautobot/lib/python3.10/site-packages/nautobot/core/cli/__init__.py", line 52, in main
run_app(
File "/opt/nautobot/lib/python3.10/site-packages/nautobot/core/runner/runner.py", line 297, in run_app
management.execute_from_command_line([runner_name, command, *command_args])
File "/opt/nautobot/lib/python3.10/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line
utility.execute()
File "/opt/nautobot/lib/python3.10/site-packages/django/core/management/__init__.py", line 413, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "/opt/nautobot/lib/python3.10/site-packages/nautobot/core/management/commands/celery.py", line 23, in run_from_argv
celery_main.main()
File "/opt/nautobot/lib/python3.10/site-packages/click/core.py", line 1078, in main
rv = self.invoke(ctx)
File "/opt/nautobot/lib/python3.10/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/nautobot/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/nautobot/lib/python3.10/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/click/decorators.py", line 33, in new_func
return f(get_current_context(), *args, **kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/celery/bin/base.py", line 134, in caller
return f(ctx, *args, **kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/celery/bin/result.py", line 27, in result
value = task_result.traceback if traceback else task_result.get()
File "/opt/nautobot/lib/python3.10/site-packages/celery/result.py", line 251, in get
return self.backend.wait_for_pending(
File "/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py", line 755, in wait_for_pending
meta = self.wait_for(
File "/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py", line 782, in wait_for
meta = self.get_task_meta(task_id)
File "/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py", line 608, in get_task_meta
meta = self._get_task_meta_for(task_id)
File "/opt/nautobot/lib/python3.10/site-packages/django_celery_results/backends/database.py", line 137, in _get_task_meta_for
obj = self.TaskModel._default_manager.get_task(task_id)
File "/opt/nautobot/lib/python3.10/site-packages/django_celery_results/managers.py", line 111, in get_task
return self.get(task_id=task_id)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py", line 424, in get
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/nautobot/core/models/querysets.py", line 92, in filter
return super().filter(*args, **self.split_composite_key_into_kwargs(composite_key, **kwargs))
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py", line 941, in filter
return self._filter_or_exclude(False, args, kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py", line 961, in _filter_or_exclude
clone._filter_or_exclude_inplace(negate, args, kwargs)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py", line 968, in _filter_or_exclude_inplace
self._query.add_q(Q(*args, **kwargs))
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py", line 1416, in add_q
clause, _ = self._add_q(q_object, self.used_aliases)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py", line 1435, in _add_q
child_clause, needed_inner = self.build_filter(
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py", line 1309, in build_filter
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py", line 1135, in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
File "/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py", line 1562, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
django.core.exceptions.FieldError: Cannot resolve keyword 'task_id' into field. Choices are: _custom_field_data, celery_kwargs, date_created, date_done, files, id, job_log_entries, job_model, job_model_id, meta, name, result, scheduled_job, scheduled_job_id, status, task_args, task_kwargs, task_name, traceback, user, user_id, worker
```

Here is the image which shows usage of the command
</issue>
<code>
[start of nautobot/extras/managers.py]
1 from celery import states
2 from django.utils import timezone
3 from django_celery_beat.managers import ExtendedManager
4 from django_celery_results.managers import TaskResultManager, transaction_retry
5
6 from nautobot.core.models import BaseManager
7 from nautobot.core.models.querysets import RestrictedQuerySet
8
9
10 class JobResultManager(BaseManager.from_queryset(RestrictedQuerySet), TaskResultManager):
11 @transaction_retry(max_retries=2)
12 def store_result(
13 self,
14 task_id,
15 result,
16 status,
17 traceback=None,
18 meta=None,
19 periodic_task_name=None,
20 task_name=None,
21 task_args=None,
22 task_kwargs=None,
23 celery_kwargs=None,
24 job_model_id=None,
25 scheduled_job_id=None,
26 worker=None,
27 user_id=None,
28 using=None,
29 content_type=None,
30 content_encoding=None,
31 ):
32 """
33 Store the result and status of a Celery task.
34
35 This overloads default model options provided by `django-celery-results` to manage custom
36 behaviors for integration with Nautobot. Specifically these changes are:
37
38 - Ignore incoming `content_type` and `content_encoding` fields as Nautobot explicitly only uses
39 JSON utf-8 encoding for Celery messages.
40 - Ensure that `name` is set to `task_name` if not otherwise set.
41 - Only set `date_done` if the task has reached a ready state (execution completed),
42 otherwise keep it null.
43
44 Args:
45 task_id (uuid): UUID of task.
46 periodic_task_name (str): Celery periodic task name. (not used by Nautobot)
47 task_name (str): Celery task name.
48 task_args (list): Task arguments.
49 task_kwargs (dict): Task kwargs.
50 celery_kwargs (dict): Celery kwargs (kwargs passed to apply_async).
51 job_model_id (uuid): UUID of the Job model instance of the task being run.
52 scheduled_job_id (uuid): UUID of the ScheduledJob model instance that initiated
53 this task, or None if not scheduled.
54 result (obj): Return value of the task, or an exception instance raised
55 by the task.
56 status (str): Task status. See `JobResultStatusChoices` for a list of possible status
57 values.
58 worker (str): Worker that executes the task.
59 user_id (uuid): UUID of the user that initiated the task.
60 using (str): Django database connection to use.
61 traceback (str): Traceback string taken at the point of exception (only passed if the
62 task failed).
63 meta (json): JSON-serialized result meta data (this contains e.g. children).
64 content_type: Ignored. Kept for interface compatibility.
65 content_encoding: Ignored. Kept for interface compatibility.
66
67 Returns:
68 JobResult
69 """
70
71 # Prepare the fields for creating/updating a `JobResult`.
72 fields = {
73 "status": status,
74 "result": result,
75 "traceback": traceback,
76 "meta": meta,
77 "date_done": None,
78 "task_name": task_name,
79 "task_args": task_args,
80 "task_kwargs": task_kwargs,
81 "celery_kwargs": celery_kwargs,
82 "job_model_id": job_model_id,
83 "scheduled_job_id": scheduled_job_id,
84 "user_id": user_id,
85 "worker": worker,
86 }
87 from nautobot.extras.models.jobs import Job
88
89 # Need to have a try/except block here
90 # because sometimes job_model_id will be None.
91 try:
92 job = Job.objects.get(id=job_model_id)
93 if job.has_sensitive_variables:
94 del fields["task_args"]
95 del fields["task_kwargs"]
96 except Job.DoesNotExist:
97 pass
98
99 obj, created = self.using(using).get_or_create(id=task_id, defaults=fields)
100
101 if not created:
102 # Make sure `date_done` is allowed to stay null until the task reacheas a ready state.
103 #
104 # Default behavior in `django-celery-results` has this field as a
105 # `DateField(auto_now=True)` which just automatically updates the `date_done` field on every
106 # state transition. This is different than Celery's default behavior (and the current
107 # behavior of Nautobot) to keep it null until there is a state transition to a ready state
108 # (e.g. `SUCCESS`, `REVOKED`, `FAILURE`).
109 if fields["status"] in states.READY_STATES:
110 fields["date_done"] = timezone.now()
111
112 # Always make sure the Job `name` is set.
113 if not obj.name and fields["task_name"]:
114 fields["name"] = fields["task_name"]
115
116 # Set the field values on the model instance.
117 for k, v in fields.items():
118 setattr(obj, k, v)
119
120 obj.save(using=using)
121
122 return obj
123
124
125 class ScheduledJobsManager(BaseManager.from_queryset(RestrictedQuerySet), ExtendedManager):
126 pass
127
[end of nautobot/extras/managers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/extras/managers.py b/nautobot/extras/managers.py
--- a/nautobot/extras/managers.py
+++ b/nautobot/extras/managers.py
@@ -8,6 +8,20 @@
class JobResultManager(BaseManager.from_queryset(RestrictedQuerySet), TaskResultManager):
+ def get_task(self, task_id):
+ """Get result for task by ``task_id``.
+
+ This overloads `TaskResultManager.get_task` provided by `django-celery-results` to manage custom
+ behaviors for integration with Nautobot.
+ """
+ try:
+ return self.get(id=task_id)
+ except self.model.DoesNotExist:
+ if self._last_id == task_id:
+ self.warn_if_repeatable_read()
+ self._last_id = task_id
+ return self.model(id=task_id)
+
@transaction_retry(max_retries=2)
def store_result(
self,
| {"golden_diff": "diff --git a/nautobot/extras/managers.py b/nautobot/extras/managers.py\n--- a/nautobot/extras/managers.py\n+++ b/nautobot/extras/managers.py\n@@ -8,6 +8,20 @@\n \n \n class JobResultManager(BaseManager.from_queryset(RestrictedQuerySet), TaskResultManager):\n+ def get_task(self, task_id):\n+ \"\"\"Get result for task by ``task_id``.\n+\n+ This overloads `TaskResultManager.get_task` provided by `django-celery-results` to manage custom\n+ behaviors for integration with Nautobot.\n+ \"\"\"\n+ try:\n+ return self.get(id=task_id)\n+ except self.model.DoesNotExist:\n+ if self._last_id == task_id:\n+ self.warn_if_repeatable_read()\n+ self._last_id = task_id\n+ return self.model(id=task_id)\n+\n @transaction_retry(max_retries=2)\n def store_result(\n self,\n", "issue": "The command 'nautobot-server celery result task_id' says \"Cannot resolve keyword 'task_id' into field\"\n### Environment\r\n* Nautobot version : v2.1.4\r\n* Python version: 3.10.12\r\n* Database platform, version: psql (PostgreSQL) 14.11 (Ubuntu 14.11-0ubuntu0.22.04.1)\r\n* Middleware(s): NA\r\n\r\n### Steps to Reproduce\r\n1. open terminal as nautobot user \r\n2. enter command :- 'nautobot-server celery result 505........89f' where the last argument would be task_id\r\n\r\n### Expected Behavior\r\nI expect to see the result of the task whose task_id is mentioned.\r\n\r\n### Observed Behavior\r\nInstead the following exception is returned\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/nautobot/bin/nautobot-server\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/opt/nautobot/lib/python3.10/site-packages/nautobot/core/cli/__init__.py\", line 52, in main\r\n run_app(\r\n File \"/opt/nautobot/lib/python3.10/site-packages/nautobot/core/runner/runner.py\", line 297, in run_app\r\n management.execute_from_command_line([runner_name, command, *command_args])\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/nautobot/core/management/commands/celery.py\", line 23, in run_from_argv\r\n celery_main.main()\r\n File \"/opt/nautobot/lib/python3.10/site-packages/click/core.py\", line 1078, in main\r\n rv = self.invoke(ctx)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/opt/nautobot/lib/python3.10/site-packages/click/core.py\", line 1434, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/click/core.py\", line 783, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/click/decorators.py\", line 33, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/bin/base.py\", line 134, in caller\r\n return f(ctx, *args, **kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/bin/result.py\", line 27, in result\r\n value = task_result.traceback if traceback else task_result.get()\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/result.py\", line 251, in get\r\n return self.backend.wait_for_pending(\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py\", line 755, in wait_for_pending\r\n meta = self.wait_for(\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py\", line 782, in wait_for\r\n meta = self.get_task_meta(task_id)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/celery/backends/base.py\", line 608, in get_task_meta\r\n meta = self._get_task_meta_for(task_id)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django_celery_results/backends/database.py\", line 137, in _get_task_meta_for\r\n obj = self.TaskModel._default_manager.get_task(task_id)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django_celery_results/managers.py\", line 111, in get_task\r\n return self.get(task_id=task_id)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/manager.py\", line 85, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py\", line 424, in get\r\n clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/nautobot/core/models/querysets.py\", line 92, in filter\r\n return super().filter(*args, **self.split_composite_key_into_kwargs(composite_key, **kwargs))\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py\", line 941, in filter\r\n return self._filter_or_exclude(False, args, kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py\", line 961, in _filter_or_exclude\r\n clone._filter_or_exclude_inplace(negate, args, kwargs)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/query.py\", line 968, in _filter_or_exclude_inplace\r\n self._query.add_q(Q(*args, **kwargs))\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py\", line 1416, in add_q\r\n clause, _ = self._add_q(q_object, self.used_aliases)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py\", line 1435, in _add_q\r\n child_clause, needed_inner = self.build_filter(\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py\", line 1309, in build_filter\r\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py\", line 1135, in solve_lookup_type\r\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\r\n File \"/opt/nautobot/lib/python3.10/site-packages/django/db/models/sql/query.py\", line 1562, in names_to_path\r\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\r\ndjango.core.exceptions.FieldError: Cannot resolve keyword 'task_id' into field. Choices are: _custom_field_data, celery_kwargs, date_created, date_done, files, id, job_log_entries, job_model, job_model_id, meta, name, result, scheduled_job, scheduled_job_id, status, task_args, task_kwargs, task_name, traceback, user, user_id, worker\r\n```\r\n\r\n\r\nHere is the image which shows usage of the command\n", "before_files": [{"content": "from celery import states\nfrom django.utils import timezone\nfrom django_celery_beat.managers import ExtendedManager\nfrom django_celery_results.managers import TaskResultManager, transaction_retry\n\nfrom nautobot.core.models import BaseManager\nfrom nautobot.core.models.querysets import RestrictedQuerySet\n\n\nclass JobResultManager(BaseManager.from_queryset(RestrictedQuerySet), TaskResultManager):\n @transaction_retry(max_retries=2)\n def store_result(\n self,\n task_id,\n result,\n status,\n traceback=None,\n meta=None,\n periodic_task_name=None,\n task_name=None,\n task_args=None,\n task_kwargs=None,\n celery_kwargs=None,\n job_model_id=None,\n scheduled_job_id=None,\n worker=None,\n user_id=None,\n using=None,\n content_type=None,\n content_encoding=None,\n ):\n \"\"\"\n Store the result and status of a Celery task.\n\n This overloads default model options provided by `django-celery-results` to manage custom\n behaviors for integration with Nautobot. Specifically these changes are:\n\n - Ignore incoming `content_type` and `content_encoding` fields as Nautobot explicitly only uses\n JSON utf-8 encoding for Celery messages.\n - Ensure that `name` is set to `task_name` if not otherwise set.\n - Only set `date_done` if the task has reached a ready state (execution completed),\n otherwise keep it null.\n\n Args:\n task_id (uuid): UUID of task.\n periodic_task_name (str): Celery periodic task name. (not used by Nautobot)\n task_name (str): Celery task name.\n task_args (list): Task arguments.\n task_kwargs (dict): Task kwargs.\n celery_kwargs (dict): Celery kwargs (kwargs passed to apply_async).\n job_model_id (uuid): UUID of the Job model instance of the task being run.\n scheduled_job_id (uuid): UUID of the ScheduledJob model instance that initiated\n this task, or None if not scheduled.\n result (obj): Return value of the task, or an exception instance raised\n by the task.\n status (str): Task status. See `JobResultStatusChoices` for a list of possible status\n values.\n worker (str): Worker that executes the task.\n user_id (uuid): UUID of the user that initiated the task.\n using (str): Django database connection to use.\n traceback (str): Traceback string taken at the point of exception (only passed if the\n task failed).\n meta (json): JSON-serialized result meta data (this contains e.g. children).\n content_type: Ignored. Kept for interface compatibility.\n content_encoding: Ignored. Kept for interface compatibility.\n\n Returns:\n JobResult\n \"\"\"\n\n # Prepare the fields for creating/updating a `JobResult`.\n fields = {\n \"status\": status,\n \"result\": result,\n \"traceback\": traceback,\n \"meta\": meta,\n \"date_done\": None,\n \"task_name\": task_name,\n \"task_args\": task_args,\n \"task_kwargs\": task_kwargs,\n \"celery_kwargs\": celery_kwargs,\n \"job_model_id\": job_model_id,\n \"scheduled_job_id\": scheduled_job_id,\n \"user_id\": user_id,\n \"worker\": worker,\n }\n from nautobot.extras.models.jobs import Job\n\n # Need to have a try/except block here\n # because sometimes job_model_id will be None.\n try:\n job = Job.objects.get(id=job_model_id)\n if job.has_sensitive_variables:\n del fields[\"task_args\"]\n del fields[\"task_kwargs\"]\n except Job.DoesNotExist:\n pass\n\n obj, created = self.using(using).get_or_create(id=task_id, defaults=fields)\n\n if not created:\n # Make sure `date_done` is allowed to stay null until the task reacheas a ready state.\n #\n # Default behavior in `django-celery-results` has this field as a\n # `DateField(auto_now=True)` which just automatically updates the `date_done` field on every\n # state transition. This is different than Celery's default behavior (and the current\n # behavior of Nautobot) to keep it null until there is a state transition to a ready state\n # (e.g. `SUCCESS`, `REVOKED`, `FAILURE`).\n if fields[\"status\"] in states.READY_STATES:\n fields[\"date_done\"] = timezone.now()\n\n # Always make sure the Job `name` is set.\n if not obj.name and fields[\"task_name\"]:\n fields[\"name\"] = fields[\"task_name\"]\n\n # Set the field values on the model instance.\n for k, v in fields.items():\n setattr(obj, k, v)\n\n obj.save(using=using)\n\n return obj\n\n\nclass ScheduledJobsManager(BaseManager.from_queryset(RestrictedQuerySet), ExtendedManager):\n pass\n", "path": "nautobot/extras/managers.py"}]} | 3,581 | 216 |
gh_patches_debug_22459 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-2807 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AUS-TAS-KI reports negative wind generation sometimes
Small values, so probably self-consumption. I'll prepare a fix
[Kibana](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:AUS-TAS-KI,type:phrase),type:phrase,value:AUS-TAS-KI),query:(match:(extra.key:(query:AUS-TAS-KI,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))):
>November 17th 2020, 17:23:16.743
invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 23, 16, 425060, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.338, 'solar': 0.0, 'wind': -0.003, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.003
AUS-TAS-KI
ERROR
>November 17th 2020, 17:02:28.794invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 2, 27, 648934, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.217, 'solar': 0.0, 'wind': -0.006, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.006
AUS-TAS-KI
ERROR
>November 17th 2020, 16:49:52.790invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 2, 49, 49, 351083, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.264, 'solar': 0.0, 'wind': -0.017, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.017
AUS-TAS-KI
ERROR
AUS-TAS-KI reports negative wind generation sometimes
Small values, so probably self-consumption. I'll prepare a fix
[Kibana](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:AUS-TAS-KI,type:phrase),type:phrase,value:AUS-TAS-KI),query:(match:(extra.key:(query:AUS-TAS-KI,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))):
>November 17th 2020, 17:23:16.743
invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 23, 16, 425060, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.338, 'solar': 0.0, 'wind': -0.003, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.003
AUS-TAS-KI
ERROR
>November 17th 2020, 17:02:28.794invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 2, 27, 648934, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.217, 'solar': 0.0, 'wind': -0.006, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.006
AUS-TAS-KI
ERROR
>November 17th 2020, 16:49:52.790invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 2, 49, 49, 351083, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.264, 'solar': 0.0, 'wind': -0.017, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.017
AUS-TAS-KI
ERROR
</issue>
<code>
[start of parsers/AUS_TAS_KI.py]
1 # Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456
2 # Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636
3 # A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island
4 # As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html
5 # About the data, the feed we get seems to be counters with a 2 seconds interval.
6 # That means that if we fetch these counters every 15 minutes, we only are reading "instantaneous" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.
7
8 import asyncio
9 import json
10 import logging
11 import arrow
12 from signalr import Connection
13 from requests import Session
14
15 class SignalR:
16 def __init__(self, url):
17 self.url = url
18
19 def update_res(self, msg):
20 if (msg != {}):
21 self.res = msg
22
23 def get_value(self, hub, method):
24 self.res = {}
25 with Session() as session:
26 #create a connection
27 connection = Connection(self.url, session)
28 chat = connection.register_hub(hub)
29 chat.client.on(method, self.update_res)
30 connection.start()
31 connection.wait(3)
32 connection.close()
33 return self.res
34
35 def parse_payload(logger, payload):
36 technologies_parsed = {}
37 logger.debug(f"serie : {json.dumps(payload)}")
38 for technology in payload['technologies']:
39 assert technology['unit'] == 'kW'
40 # The upstream API gives us kW, we need MW
41 technologies_parsed[technology['id']] = int(technology['value'])/1000
42 logger.debug(f"production : {json.dumps(technologies_parsed)}")
43
44 biodiesel_percent = payload['biodiesel']['percent']
45
46 return technologies_parsed, biodiesel_percent
47
48 # Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid
49 def format_storage_techs(technologies_parsed):
50 storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']
51 battery_production = storage_techs if storage_techs > 0 else 0
52 battery_storage = storage_techs if storage_techs < 0 else 0
53
54 return battery_production, battery_storage
55
56 def fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):
57
58 if target_datetime is not None:
59 raise NotImplementedError('The datasource currently implemented is only real time')
60
61 payload = SignalR("https://data.ajenti.com.au/live/signalr").get_value("TagHub", "Dashboard")
62 technologies_parsed, biodiesel_percent = parse_payload(logger, payload)
63 battery_production, battery_storage = format_storage_techs(technologies_parsed)
64 return {
65 'zoneKey': zone_key,
66 'datetime': arrow.now(tz='Australia/Currie').datetime,
67 'production': {
68 'battery discharge': battery_production,
69 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,
70 'coal': 0,
71 'gas': 0,
72 'hydro': 0,
73 'nuclear': 0,
74 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,
75 'solar': technologies_parsed['solar'],
76 'wind': technologies_parsed['wind'],
77 'geothermal': 0,
78 'unknown': 0
79 },
80 'storage': {
81 'battery': battery_storage*-1
82 },
83 'source': 'https://data.ajenti.com.au/KIREIP/index.html'
84 }
85
86 if __name__ == '__main__':
87 print(fetch_production())
88
[end of parsers/AUS_TAS_KI.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py
--- a/parsers/AUS_TAS_KI.py
+++ b/parsers/AUS_TAS_KI.py
@@ -34,7 +34,13 @@
def parse_payload(logger, payload):
technologies_parsed = {}
- logger.debug(f"serie : {json.dumps(payload)}")
+ if not 'technologies' in payload:
+ raise KeyError(
+ f"No 'technologies' in payload\n"
+ f"serie : {json.dumps(payload)}"
+ )
+ else:
+ logger.debug(f"serie : {json.dumps(payload)}")
for technology in payload['technologies']:
assert technology['unit'] == 'kW'
# The upstream API gives us kW, we need MW
@@ -73,7 +79,7 @@
'nuclear': 0,
'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,
'solar': technologies_parsed['solar'],
- 'wind': technologies_parsed['wind'],
+ 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption
'geothermal': 0,
'unknown': 0
},
| {"golden_diff": "diff --git a/parsers/AUS_TAS_KI.py b/parsers/AUS_TAS_KI.py\n--- a/parsers/AUS_TAS_KI.py\n+++ b/parsers/AUS_TAS_KI.py\n@@ -34,7 +34,13 @@\n \n def parse_payload(logger, payload):\n technologies_parsed = {}\n- logger.debug(f\"serie : {json.dumps(payload)}\")\n+ if not 'technologies' in payload:\n+ raise KeyError(\n+ f\"No 'technologies' in payload\\n\"\n+ f\"serie : {json.dumps(payload)}\"\n+ )\n+ else:\n+ logger.debug(f\"serie : {json.dumps(payload)}\")\n for technology in payload['technologies']:\n assert technology['unit'] == 'kW'\n # The upstream API gives us kW, we need MW\n@@ -73,7 +79,7 @@\n 'nuclear': 0,\n 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,\n 'solar': technologies_parsed['solar'],\n- 'wind': technologies_parsed['wind'],\n+ 'wind': 0 if technologies_parsed['wind'] < 0 and technologies_parsed['wind'] > -0.1 else technologies_parsed['wind'], #If wind between 0 and -0.1 set to 0 to ignore self-consumption\n 'geothermal': 0,\n 'unknown': 0\n },\n", "issue": "AUS-TAS-KI reports negative wind generation sometimes\nSmall values, so probably self-consumption. I'll prepare a fix\r\n\r\n[Kibana](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:AUS-TAS-KI,type:phrase),type:phrase,value:AUS-TAS-KI),query:(match:(extra.key:(query:AUS-TAS-KI,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))):\r\n\r\n>November 17th 2020, 17:23:16.743\r\ninvalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 23, 16, 425060, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.338, 'solar': 0.0, 'wind': -0.003, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.003\r\nAUS-TAS-KI\r\nERROR\r\n>November 17th 2020, 17:02:28.794invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 2, 27, 648934, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.217, 'solar': 0.0, 'wind': -0.006, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.006\r\nAUS-TAS-KI\r\nERROR\r\n>November 17th 2020, 16:49:52.790invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 2, 49, 49, 351083, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.264, 'solar': 0.0, 'wind': -0.017, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.017\r\nAUS-TAS-KI\r\nERROR\nAUS-TAS-KI reports negative wind generation sometimes\nSmall values, so probably self-consumption. I'll prepare a fix\r\n\r\n[Kibana](https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:AUS-TAS-KI,type:phrase),type:phrase,value:AUS-TAS-KI),query:(match:(extra.key:(query:AUS-TAS-KI,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))):\r\n\r\n>November 17th 2020, 17:23:16.743\r\ninvalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 23, 16, 425060, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.338, 'solar': 0.0, 'wind': -0.003, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.003\r\nAUS-TAS-KI\r\nERROR\r\n>November 17th 2020, 17:02:28.794invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 3, 2, 27, 648934, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.217, 'solar': 0.0, 'wind': -0.006, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.006\r\nAUS-TAS-KI\r\nERROR\r\n>November 17th 2020, 16:49:52.790invalid point: {'zoneKey': 'AUS-TAS-KI', 'datetime': datetime.datetime(2020, 11, 18, 2, 49, 49, 351083, tzinfo=tzfile('/usr/share/zoneinfo/Australia/Currie')), 'production': {'battery discharge': 0, 'biomass': 0.0, 'coal': 0, 'gas': 0, 'hydro': 0, 'nuclear': 0, 'oil': 1.264, 'solar': 0.0, 'wind': -0.017, 'geothermal': 0, 'unknown': 0}, 'storage': {'battery': 0.004}, 'source': 'https://data.ajenti.com.au/KIREIP/index.html', 'schemaVersion': 3}, reason:AUS-TAS-KI: key wind has negative value -0.017\r\nAUS-TAS-KI\r\nERROR\n", "before_files": [{"content": "# Initial PR https://github.com/tmrowco/electricitymap-contrib/pull/2456\n# Discussion thread https://github.com/tmrowco/electricitymap-contrib/issues/636\n# A promotion webpage for King's Island energy production is here : https://www.hydro.com.au/clean-energy/hybrid-energy-solutions/success-stories/king-island\n# As of 09/2020, it embeds with <iframe> the URI https://data.ajenti.com.au/KIREIP/index.html\n# About the data, the feed we get seems to be counters with a 2 seconds interval.\n# That means that if we fetch these counters every 15 minutes, we only are reading \"instantaneous\" metters that could differ from the total quantity of energies at play. To get the very exact data, we would need to have a parser running constanty to collect those 2-sec interval counters.\n\nimport asyncio\nimport json\nimport logging\nimport arrow\nfrom signalr import Connection\nfrom requests import Session\n\nclass SignalR:\n def __init__(self, url):\n self.url = url\n \n def update_res(self, msg):\n if (msg != {}):\n self.res = msg\n\n def get_value(self, hub, method):\n self.res = {}\n with Session() as session:\n #create a connection\n connection = Connection(self.url, session)\n chat = connection.register_hub(hub)\n chat.client.on(method, self.update_res)\n connection.start()\n connection.wait(3)\n connection.close()\n return self.res\n \ndef parse_payload(logger, payload):\n technologies_parsed = {}\n logger.debug(f\"serie : {json.dumps(payload)}\")\n for technology in payload['technologies']:\n assert technology['unit'] == 'kW'\n # The upstream API gives us kW, we need MW\n technologies_parsed[technology['id']] = int(technology['value'])/1000\n logger.debug(f\"production : {json.dumps(technologies_parsed)}\")\n\n biodiesel_percent = payload['biodiesel']['percent']\n\n return technologies_parsed, biodiesel_percent\n\n# Both keys battery and flywheel are negative when storing energy, and positive when feeding energy to the grid\ndef format_storage_techs(technologies_parsed):\n storage_techs = technologies_parsed['battery']+technologies_parsed['flywheel']\n battery_production = storage_techs if storage_techs > 0 else 0\n battery_storage = storage_techs if storage_techs < 0 else 0\n\n return battery_production, battery_storage\n\ndef fetch_production(zone_key='AUS-TAS-KI', session=None, target_datetime=None, logger: logging.Logger = logging.getLogger(__name__)):\n\n if target_datetime is not None:\n raise NotImplementedError('The datasource currently implemented is only real time')\n \n payload = SignalR(\"https://data.ajenti.com.au/live/signalr\").get_value(\"TagHub\", \"Dashboard\")\n technologies_parsed, biodiesel_percent = parse_payload(logger, payload)\n battery_production, battery_storage = format_storage_techs(technologies_parsed)\n return {\n 'zoneKey': zone_key,\n 'datetime': arrow.now(tz='Australia/Currie').datetime,\n 'production': {\n 'battery discharge': battery_production,\n 'biomass': technologies_parsed['diesel']*biodiesel_percent/100,\n 'coal': 0,\n 'gas': 0,\n 'hydro': 0,\n 'nuclear': 0,\n 'oil': technologies_parsed['diesel']*(100-biodiesel_percent)/100,\n 'solar': technologies_parsed['solar'],\n 'wind': technologies_parsed['wind'],\n 'geothermal': 0,\n 'unknown': 0\n },\n 'storage': {\n 'battery': battery_storage*-1\n },\n 'source': 'https://data.ajenti.com.au/KIREIP/index.html'\n }\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AUS_TAS_KI.py"}]} | 3,916 | 317 |
gh_patches_debug_35477 | rasdani/github-patches | git_diff | sunpy__sunpy-4485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove sunpy.instr.aia.aiaprep
The `aiaprep` function should be removed from the `sunpy.instr` subpackage. Any AIA specific functionality should transition to the new [`aiapy` package](https://gitlab.com/LMSAL_HUB/aia_hub/aiapy).
This is part of the broader goal of moving instrument-specific functionality out of the core sunpy package and into affiliated packages.
</issue>
<code>
[start of sunpy/instr/aia.py]
1 """
2 This module provides processing routines for data captured with the AIA
3 instrument on SDO.
4 """
5 import numpy as np
6
7 import astropy.units as u
8
9 from sunpy.map.sources.sdo import AIAMap, HMIMap
10 from sunpy.util.decorators import deprecated
11
12 __all__ = ['aiaprep']
13
14
15 @deprecated("2.0", alternative="`register` in aiapy (https://aiapy.readthedocs.io) for converting \
16 AIA images to level 1.5")
17 def aiaprep(aiamap):
18 """
19 Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5
20 `~sunpy.map.sources.sdo.AIAMap`.
21
22 Rotates, scales and translates the image so that solar North is aligned
23 with the y axis, each pixel is 0.6 arcsec across, and the center of the
24 Sun is at the center of the image. The actual transformation is done by Map's
25 `~sunpy.map.mapbase.GenericMap.rotate` method.
26
27 This function is similar in functionality to ``aia_prep`` in SSWIDL, but
28 it does not use the same transformation to rotate the image and it handles
29 the meta data differently. It should therefore not be expected to produce
30 the same results.
31
32 Parameters
33 ----------
34 aiamap : `~sunpy.map.sources.sdo.AIAMap`
35 A `sunpy.map.Map` from AIA.
36
37 Returns
38 -------
39 `~sunpy.map.sources.sdo.AIAMap`:
40 A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.
41
42 Notes
43 -----
44 This routine modifies the header information to the standard PCi_j WCS
45 formalism. The FITS header resulting in saving a file after this
46 procedure will therefore differ from the original file.
47 """
48
49 if not isinstance(aiamap, (AIAMap, HMIMap)):
50 raise ValueError("Input must be an AIAMap or HMIMap.")
51
52 # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map
53 # has already been rescaled.
54 if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix
55 and aiamap.data.shape != (4096, 4096)):
56 scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec
57 else:
58 scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image
59 scale_factor = aiamap.scale[0] / scale
60
61 tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())
62
63 # extract center from padded aiamap.rotate output
64 # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps
65 center = np.floor(tempmap.meta['crpix1'])
66 range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix
67 newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),
68 u.Quantity([range_side[1] - 1 * u.pix,
69 range_side[1] - 1 * u.pix]))
70
71 newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']
72 newmap.meta['lvl_num'] = 1.5
73 newmap.meta['bitpix'] = -64
74
75 return newmap
76
[end of sunpy/instr/aia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sunpy/instr/aia.py b/sunpy/instr/aia.py
deleted file mode 100644
--- a/sunpy/instr/aia.py
+++ /dev/null
@@ -1,75 +0,0 @@
-"""
-This module provides processing routines for data captured with the AIA
-instrument on SDO.
-"""
-import numpy as np
-
-import astropy.units as u
-
-from sunpy.map.sources.sdo import AIAMap, HMIMap
-from sunpy.util.decorators import deprecated
-
-__all__ = ['aiaprep']
-
-
-@deprecated("2.0", alternative="`register` in aiapy (https://aiapy.readthedocs.io) for converting \
-AIA images to level 1.5")
-def aiaprep(aiamap):
- """
- Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5
- `~sunpy.map.sources.sdo.AIAMap`.
-
- Rotates, scales and translates the image so that solar North is aligned
- with the y axis, each pixel is 0.6 arcsec across, and the center of the
- Sun is at the center of the image. The actual transformation is done by Map's
- `~sunpy.map.mapbase.GenericMap.rotate` method.
-
- This function is similar in functionality to ``aia_prep`` in SSWIDL, but
- it does not use the same transformation to rotate the image and it handles
- the meta data differently. It should therefore not be expected to produce
- the same results.
-
- Parameters
- ----------
- aiamap : `~sunpy.map.sources.sdo.AIAMap`
- A `sunpy.map.Map` from AIA.
-
- Returns
- -------
- `~sunpy.map.sources.sdo.AIAMap`:
- A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.
-
- Notes
- -----
- This routine modifies the header information to the standard PCi_j WCS
- formalism. The FITS header resulting in saving a file after this
- procedure will therefore differ from the original file.
- """
-
- if not isinstance(aiamap, (AIAMap, HMIMap)):
- raise ValueError("Input must be an AIAMap or HMIMap.")
-
- # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map
- # has already been rescaled.
- if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix
- and aiamap.data.shape != (4096, 4096)):
- scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec
- else:
- scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image
- scale_factor = aiamap.scale[0] / scale
-
- tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())
-
- # extract center from padded aiamap.rotate output
- # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps
- center = np.floor(tempmap.meta['crpix1'])
- range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix
- newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),
- u.Quantity([range_side[1] - 1 * u.pix,
- range_side[1] - 1 * u.pix]))
-
- newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']
- newmap.meta['lvl_num'] = 1.5
- newmap.meta['bitpix'] = -64
-
- return newmap
| {"golden_diff": "diff --git a/sunpy/instr/aia.py b/sunpy/instr/aia.py\ndeleted file mode 100644\n--- a/sunpy/instr/aia.py\n+++ /dev/null\n@@ -1,75 +0,0 @@\n-\"\"\"\n-This module provides processing routines for data captured with the AIA\n-instrument on SDO.\n-\"\"\"\n-import numpy as np\n-\n-import astropy.units as u\n-\n-from sunpy.map.sources.sdo import AIAMap, HMIMap\n-from sunpy.util.decorators import deprecated\n-\n-__all__ = ['aiaprep']\n-\n-\n-@deprecated(\"2.0\", alternative=\"`register` in aiapy (https://aiapy.readthedocs.io) for converting \\\n-AIA images to level 1.5\")\n-def aiaprep(aiamap):\n- \"\"\"\n- Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5\n- `~sunpy.map.sources.sdo.AIAMap`.\n-\n- Rotates, scales and translates the image so that solar North is aligned\n- with the y axis, each pixel is 0.6 arcsec across, and the center of the\n- Sun is at the center of the image. The actual transformation is done by Map's\n- `~sunpy.map.mapbase.GenericMap.rotate` method.\n-\n- This function is similar in functionality to ``aia_prep`` in SSWIDL, but\n- it does not use the same transformation to rotate the image and it handles\n- the meta data differently. It should therefore not be expected to produce\n- the same results.\n-\n- Parameters\n- ----------\n- aiamap : `~sunpy.map.sources.sdo.AIAMap`\n- A `sunpy.map.Map` from AIA.\n-\n- Returns\n- -------\n- `~sunpy.map.sources.sdo.AIAMap`:\n- A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.\n-\n- Notes\n- -----\n- This routine modifies the header information to the standard PCi_j WCS\n- formalism. The FITS header resulting in saving a file after this\n- procedure will therefore differ from the original file.\n- \"\"\"\n-\n- if not isinstance(aiamap, (AIAMap, HMIMap)):\n- raise ValueError(\"Input must be an AIAMap or HMIMap.\")\n-\n- # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map\n- # has already been rescaled.\n- if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix\n- and aiamap.data.shape != (4096, 4096)):\n- scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec\n- else:\n- scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image\n- scale_factor = aiamap.scale[0] / scale\n-\n- tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())\n-\n- # extract center from padded aiamap.rotate output\n- # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps\n- center = np.floor(tempmap.meta['crpix1'])\n- range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix\n- newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),\n- u.Quantity([range_side[1] - 1 * u.pix,\n- range_side[1] - 1 * u.pix]))\n-\n- newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']\n- newmap.meta['lvl_num'] = 1.5\n- newmap.meta['bitpix'] = -64\n-\n- return newmap\n", "issue": "Remove sunpy.instr.aia.aiaprep\nThe `aiaprep` function should be removed from the `sunpy.instr` subpackage. Any AIA specific functionality should transition to the new [`aiapy` package](https://gitlab.com/LMSAL_HUB/aia_hub/aiapy).\r\n\r\nThis is part of the broader goal of moving instrument-specific functionality out of the core sunpy package and into affiliated packages.\n", "before_files": [{"content": "\"\"\"\nThis module provides processing routines for data captured with the AIA\ninstrument on SDO.\n\"\"\"\nimport numpy as np\n\nimport astropy.units as u\n\nfrom sunpy.map.sources.sdo import AIAMap, HMIMap\nfrom sunpy.util.decorators import deprecated\n\n__all__ = ['aiaprep']\n\n\n@deprecated(\"2.0\", alternative=\"`register` in aiapy (https://aiapy.readthedocs.io) for converting \\\nAIA images to level 1.5\")\ndef aiaprep(aiamap):\n \"\"\"\n Processes a level 1 `~sunpy.map.sources.sdo.AIAMap` into a level 1.5\n `~sunpy.map.sources.sdo.AIAMap`.\n\n Rotates, scales and translates the image so that solar North is aligned\n with the y axis, each pixel is 0.6 arcsec across, and the center of the\n Sun is at the center of the image. The actual transformation is done by Map's\n `~sunpy.map.mapbase.GenericMap.rotate` method.\n\n This function is similar in functionality to ``aia_prep`` in SSWIDL, but\n it does not use the same transformation to rotate the image and it handles\n the meta data differently. It should therefore not be expected to produce\n the same results.\n\n Parameters\n ----------\n aiamap : `~sunpy.map.sources.sdo.AIAMap`\n A `sunpy.map.Map` from AIA.\n\n Returns\n -------\n `~sunpy.map.sources.sdo.AIAMap`:\n A level 1.5 copy of `~sunpy.map.sources.sdo.AIAMap`.\n\n Notes\n -----\n This routine modifies the header information to the standard PCi_j WCS\n formalism. The FITS header resulting in saving a file after this\n procedure will therefore differ from the original file.\n \"\"\"\n\n if not isinstance(aiamap, (AIAMap, HMIMap)):\n raise ValueError(\"Input must be an AIAMap or HMIMap.\")\n\n # Target scale is 0.6 arcsec/pixel, but this needs to be adjusted if the map\n # has already been rescaled.\n if ((aiamap.scale[0] / 0.6).round() != 1.0 * u.arcsec / u.pix\n and aiamap.data.shape != (4096, 4096)):\n scale = (aiamap.scale[0] / 0.6).round() * 0.6 * u.arcsec\n else:\n scale = 0.6 * u.arcsec # pragma: no cover # can't test this because it needs a full res image\n scale_factor = aiamap.scale[0] / scale\n\n tempmap = aiamap.rotate(recenter=True, scale=scale_factor.value, missing=aiamap.min())\n\n # extract center from padded aiamap.rotate output\n # crpix1 and crpix2 will be equal (recenter=True), as aiaprep does not work with submaps\n center = np.floor(tempmap.meta['crpix1'])\n range_side = (center + np.array([-1, 1]) * aiamap.data.shape[0] / 2) * u.pix\n newmap = tempmap.submap(u.Quantity([range_side[0], range_side[0]]),\n u.Quantity([range_side[1] - 1 * u.pix,\n range_side[1] - 1 * u.pix]))\n\n newmap.meta['r_sun'] = newmap.meta['rsun_obs'] / newmap.meta['cdelt1']\n newmap.meta['lvl_num'] = 1.5\n newmap.meta['bitpix'] = -64\n\n return newmap\n", "path": "sunpy/instr/aia.py"}]} | 1,601 | 942 |
gh_patches_debug_4218 | rasdani/github-patches | git_diff | biolab__orange3-4252 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Validation: Remove deprecated constructor calls
**Describe the bug**
Our tests (and code?) use deprecated calls, resulting in many of the following lines in output of tests:
`DeprecationWarning: calling Validation's constructor with data and learners is deprecated;`
**To Reproduce**
Run tests or look at travis.
</issue>
<code>
[start of Orange/ensembles/stack.py]
1 import numpy as np
2
3 from Orange.base import Learner, Model
4 from Orange.modelling import Fitter
5 from Orange.classification import LogisticRegressionLearner
6 from Orange.classification.base_classification import LearnerClassification
7 from Orange.data import Domain, ContinuousVariable, Table
8 from Orange.evaluation import CrossValidation
9 from Orange.regression import RidgeRegressionLearner
10 from Orange.regression.base_regression import LearnerRegression
11
12
13 __all__ = ['StackedLearner', 'StackedClassificationLearner',
14 'StackedRegressionLearner', 'StackedFitter']
15
16
17 class StackedModel(Model):
18 def __init__(self, models, aggregate, use_prob=True, domain=None):
19 super().__init__(domain=domain)
20 self.models = models
21 self.aggregate = aggregate
22 self.use_prob = use_prob
23
24 def predict_storage(self, data):
25 if self.use_prob:
26 probs = [m(data, Model.Probs) for m in self.models]
27 X = np.hstack(probs)
28 else:
29 pred = [m(data) for m in self.models]
30 X = np.column_stack(pred)
31 Y = np.repeat(np.nan, X.shape[0])
32 stacked_data = data.transform(self.aggregate.domain)
33 stacked_data.X = X
34 stacked_data.Y = Y
35 return self.aggregate(
36 stacked_data, Model.ValueProbs if self.use_prob else Model.Value)
37
38
39 class StackedLearner(Learner):
40 """
41 Constructs a stacked model by fitting an aggregator
42 over the results of base models.
43
44 K-fold cross-validation is used to get predictions of the base learners
45 and fit the aggregator to obtain a stacked model.
46
47 Args:
48 learners (list):
49 list of `Learner`s used for base models
50
51 aggregate (Learner):
52 Learner used to fit the meta model, aggregating predictions
53 of base models
54
55 k (int):
56 number of folds for cross-validation
57
58 Returns:
59 instance of StackedModel
60 """
61
62 __returns__ = StackedModel
63
64 def __init__(self, learners, aggregate, k=5, preprocessors=None):
65 super().__init__(preprocessors=preprocessors)
66 self.learners = learners
67 self.aggregate = aggregate
68 self.k = k
69 self.params = vars()
70
71 def fit_storage(self, data):
72 res = CrossValidation(data, self.learners, k=self.k)
73 if data.domain.class_var.is_discrete:
74 X = np.hstack(res.probabilities)
75 use_prob = True
76 else:
77 X = res.predicted.T
78 use_prob = False
79 dom = Domain([ContinuousVariable('f{}'.format(i + 1))
80 for i in range(X.shape[1])],
81 data.domain.class_var)
82 stacked_data = data.transform(dom)
83 stacked_data.X = X
84 stacked_data.Y = res.actual
85 models = [l(data) for l in self.learners]
86 aggregate_model = self.aggregate(stacked_data)
87 return StackedModel(models, aggregate_model, use_prob=use_prob,
88 domain=data.domain)
89
90
91 class StackedClassificationLearner(StackedLearner, LearnerClassification):
92 """
93 Subclass of StackedLearner intended for classification tasks.
94
95 Same as the super class, but has a default
96 classification-specific aggregator (`LogisticRegressionLearner`).
97 """
98
99 def __init__(self, learners, aggregate=LogisticRegressionLearner(), k=5,
100 preprocessors=None):
101 super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)
102
103
104 class StackedRegressionLearner(StackedLearner, LearnerRegression):
105 """
106 Subclass of StackedLearner intended for regression tasks.
107
108 Same as the super class, but has a default
109 regression-specific aggregator (`RidgeRegressionLearner`).
110 """
111 def __init__(self, learners, aggregate=RidgeRegressionLearner(), k=5,
112 preprocessors=None):
113 super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)
114
115
116 class StackedFitter(Fitter):
117 __fits__ = {'classification': StackedClassificationLearner,
118 'regression': StackedRegressionLearner}
119
120 def __init__(self, learners, **kwargs):
121 kwargs['learners'] = learners
122 super().__init__(**kwargs)
123
124
125 if __name__ == '__main__':
126 import Orange
127 iris = Table('iris')
128 knn = Orange.modelling.KNNLearner()
129 tree = Orange.modelling.TreeLearner()
130 sl = StackedFitter([tree, knn])
131 m = sl(iris[::2])
132 print(m(iris[1::2], Model.Value))
133
134 housing = Table('housing')
135 sl = StackedFitter([tree, knn])
136 m = sl(housing[::2])
137 print(list(zip(housing[1:10:2].Y, m(housing[1:10:2], Model.Value))))
138
[end of Orange/ensembles/stack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Orange/ensembles/stack.py b/Orange/ensembles/stack.py
--- a/Orange/ensembles/stack.py
+++ b/Orange/ensembles/stack.py
@@ -69,7 +69,8 @@
self.params = vars()
def fit_storage(self, data):
- res = CrossValidation(data, self.learners, k=self.k)
+ cv = CrossValidation(k=self.k)
+ res = cv(data, self.learners)
if data.domain.class_var.is_discrete:
X = np.hstack(res.probabilities)
use_prob = True
| {"golden_diff": "diff --git a/Orange/ensembles/stack.py b/Orange/ensembles/stack.py\n--- a/Orange/ensembles/stack.py\n+++ b/Orange/ensembles/stack.py\n@@ -69,7 +69,8 @@\n self.params = vars()\n \n def fit_storage(self, data):\n- res = CrossValidation(data, self.learners, k=self.k)\n+ cv = CrossValidation(k=self.k)\n+ res = cv(data, self.learners)\n if data.domain.class_var.is_discrete:\n X = np.hstack(res.probabilities)\n use_prob = True\n", "issue": "Validation: Remove deprecated constructor calls\n**Describe the bug**\r\nOur tests (and code?) use deprecated calls, resulting in many of the following lines in output of tests:\r\n`DeprecationWarning: calling Validation's constructor with data and learners is deprecated;`\r\n\r\n**To Reproduce**\r\nRun tests or look at travis.\n", "before_files": [{"content": "import numpy as np\n\nfrom Orange.base import Learner, Model\nfrom Orange.modelling import Fitter\nfrom Orange.classification import LogisticRegressionLearner\nfrom Orange.classification.base_classification import LearnerClassification\nfrom Orange.data import Domain, ContinuousVariable, Table\nfrom Orange.evaluation import CrossValidation\nfrom Orange.regression import RidgeRegressionLearner\nfrom Orange.regression.base_regression import LearnerRegression\n\n\n__all__ = ['StackedLearner', 'StackedClassificationLearner',\n 'StackedRegressionLearner', 'StackedFitter']\n\n\nclass StackedModel(Model):\n def __init__(self, models, aggregate, use_prob=True, domain=None):\n super().__init__(domain=domain)\n self.models = models\n self.aggregate = aggregate\n self.use_prob = use_prob\n\n def predict_storage(self, data):\n if self.use_prob:\n probs = [m(data, Model.Probs) for m in self.models]\n X = np.hstack(probs)\n else:\n pred = [m(data) for m in self.models]\n X = np.column_stack(pred)\n Y = np.repeat(np.nan, X.shape[0])\n stacked_data = data.transform(self.aggregate.domain)\n stacked_data.X = X\n stacked_data.Y = Y\n return self.aggregate(\n stacked_data, Model.ValueProbs if self.use_prob else Model.Value)\n\n\nclass StackedLearner(Learner):\n \"\"\"\n Constructs a stacked model by fitting an aggregator\n over the results of base models.\n\n K-fold cross-validation is used to get predictions of the base learners\n and fit the aggregator to obtain a stacked model.\n\n Args:\n learners (list):\n list of `Learner`s used for base models\n\n aggregate (Learner):\n Learner used to fit the meta model, aggregating predictions\n of base models\n\n k (int):\n number of folds for cross-validation\n\n Returns:\n instance of StackedModel\n \"\"\"\n\n __returns__ = StackedModel\n\n def __init__(self, learners, aggregate, k=5, preprocessors=None):\n super().__init__(preprocessors=preprocessors)\n self.learners = learners\n self.aggregate = aggregate\n self.k = k\n self.params = vars()\n\n def fit_storage(self, data):\n res = CrossValidation(data, self.learners, k=self.k)\n if data.domain.class_var.is_discrete:\n X = np.hstack(res.probabilities)\n use_prob = True\n else:\n X = res.predicted.T\n use_prob = False\n dom = Domain([ContinuousVariable('f{}'.format(i + 1))\n for i in range(X.shape[1])],\n data.domain.class_var)\n stacked_data = data.transform(dom)\n stacked_data.X = X\n stacked_data.Y = res.actual\n models = [l(data) for l in self.learners]\n aggregate_model = self.aggregate(stacked_data)\n return StackedModel(models, aggregate_model, use_prob=use_prob,\n domain=data.domain)\n\n\nclass StackedClassificationLearner(StackedLearner, LearnerClassification):\n \"\"\"\n Subclass of StackedLearner intended for classification tasks.\n\n Same as the super class, but has a default\n classification-specific aggregator (`LogisticRegressionLearner`).\n \"\"\"\n\n def __init__(self, learners, aggregate=LogisticRegressionLearner(), k=5,\n preprocessors=None):\n super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)\n\n\nclass StackedRegressionLearner(StackedLearner, LearnerRegression):\n \"\"\"\n Subclass of StackedLearner intended for regression tasks.\n\n Same as the super class, but has a default\n regression-specific aggregator (`RidgeRegressionLearner`).\n \"\"\"\n def __init__(self, learners, aggregate=RidgeRegressionLearner(), k=5,\n preprocessors=None):\n super().__init__(learners, aggregate, k=k, preprocessors=preprocessors)\n\n\nclass StackedFitter(Fitter):\n __fits__ = {'classification': StackedClassificationLearner,\n 'regression': StackedRegressionLearner}\n\n def __init__(self, learners, **kwargs):\n kwargs['learners'] = learners\n super().__init__(**kwargs)\n\n\nif __name__ == '__main__':\n import Orange\n iris = Table('iris')\n knn = Orange.modelling.KNNLearner()\n tree = Orange.modelling.TreeLearner()\n sl = StackedFitter([tree, knn])\n m = sl(iris[::2])\n print(m(iris[1::2], Model.Value))\n\n housing = Table('housing')\n sl = StackedFitter([tree, knn])\n m = sl(housing[::2])\n print(list(zip(housing[1:10:2].Y, m(housing[1:10:2], Model.Value))))\n", "path": "Orange/ensembles/stack.py"}]} | 2,010 | 139 |
gh_patches_debug_28538 | rasdani/github-patches | git_diff | PrefectHQ__prefect-2136 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flow S3ResultHandler Fails for Dask Worker with nthreads > 1
## Description
Specifying S3ResultHandler for a Flow running on Dask worker(s) with nthreads > 1 fails with: `KeyError: 'credential_provider'`, likely due to a race condition in using the global boto3 session (boto3.client) between threads.
## Expected Behavior
In a multithreaded environment, boto3 recommends creating a session per thread rather than sharing the default boto3 session, i.e. boto3.client. See boto3 documentation at: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing
This thread in Prefect's Community Slack describes using this session-per-thread approach to successfully fix a similar issue when using boto3 in Prefect tasks: https://prefect-community.slack.com/archives/CM28LL405/p1581434710167100
## Reproduction
A Flow with tasks that can run in parallel (e.g. mapped tasks or different Flow branches) and where the Flow-level result_handler is set to S3ResultHandler should reproduce this behavior.
Full stack trace:
```
February 29th 2020 at 8:09:43am | prefect.CloudTaskRunner
ERROR
Failed to set task state with error: KeyError('credential_provider')
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/task_runner.py", line 117, in call_runner_target_handlers
cloud_state = prepare_state_for_cloud(new_state)
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/utilities.py", line 21, in prepare_state_for_cloud
res.store_safe_value()
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py", line 93, in store_safe_value
value = self.result_handler.write(self.value)
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 103, in write
self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri)
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 67, in client
self.initialize_client()
File "/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py", line 60, in initialize_client
aws_secret_access_key=aws_secret_access_key,
File "/opt/conda/lib/python3.7/site-packages/boto3/__init__.py", line 91, in client
return _get_default_session().client(*args, **kwargs)
File "/opt/conda/lib/python3.7/site-packages/boto3/session.py", line 263, in client
aws_session_token=aws_session_token, config=config)
File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 823, in create_client
credentials = self.get_credentials()
File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 428, in get_credentials
'credential_provider').load_credentials()
File "/opt/conda/lib/python3.7/site-packages/botocore/session.py", line 923, in get_component
del self._deferred[name]
KeyError: 'credential_provider'
```
## Environment
We create a long-running Dask cluster where our Dask workers are started with --nprocs 1 --nthreads 3.
(Thanks to @JLouSRM for identifying this issue and capturing log evidence!)
</issue>
<code>
[start of src/prefect/engine/result_handlers/s3_result_handler.py]
1 import base64
2 import io
3 import json
4 import uuid
5 from typing import TYPE_CHECKING, Any
6
7 import cloudpickle
8 import pendulum
9
10 from prefect.client import Secret
11 from prefect.engine.result_handlers import ResultHandler
12
13 if TYPE_CHECKING:
14 import boto3
15
16
17 class S3ResultHandler(ResultHandler):
18 """
19 Result Handler for writing to and reading from an AWS S3 Bucket.
20
21 For authentication, there are two options: you can set a Prefect Secret containing
22 your AWS access keys which will be passed directly to the `boto3` client, or you can
23 [configure your flow's runtime environment](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#guide-configuration)
24 for `boto3`.
25
26 Args:
27 - bucket (str): the name of the bucket to write to / read from
28 - aws_credentials_secret (str, optional): the name of the Prefect Secret
29 that stores your AWS credentials; this Secret must be a JSON string
30 with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY` which will be
31 passed directly to `boto3`. If not provided, `boto3`
32 will fall back on standard AWS rules for authentication.
33 """
34
35 def __init__(self, bucket: str, aws_credentials_secret: str = None) -> None:
36 self.bucket = bucket
37 self.aws_credentials_secret = aws_credentials_secret
38 super().__init__()
39
40 def initialize_client(self) -> None:
41 """
42 Initializes an S3 Client.
43 """
44 import boto3
45
46 aws_access_key = None
47 aws_secret_access_key = None
48
49 if self.aws_credentials_secret:
50 aws_credentials = Secret(self.aws_credentials_secret).get()
51 if isinstance(aws_credentials, str):
52 aws_credentials = json.loads(aws_credentials)
53
54 aws_access_key = aws_credentials["ACCESS_KEY"]
55 aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
56
57 s3_client = boto3.client(
58 "s3",
59 aws_access_key_id=aws_access_key,
60 aws_secret_access_key=aws_secret_access_key,
61 )
62 self.client = s3_client
63
64 @property
65 def client(self) -> "boto3.client":
66 if not hasattr(self, "_client"):
67 self.initialize_client()
68 return self._client
69
70 @client.setter
71 def client(self, val: Any) -> None:
72 self._client = val
73
74 def __getstate__(self) -> dict:
75 state = self.__dict__.copy()
76 if "_client" in state:
77 del state["_client"]
78 return state
79
80 def __setstate__(self, state: dict) -> None:
81 self.__dict__.update(state)
82
83 def write(self, result: Any) -> str:
84 """
85 Given a result, writes the result to a location in S3
86 and returns the resulting URI.
87
88 Args:
89 - result (Any): the written result
90
91 Returns:
92 - str: the S3 URI
93 """
94 date = pendulum.now("utc").format("Y/M/D") # type: ignore
95 uri = "{date}/{uuid}.prefect_result".format(date=date, uuid=uuid.uuid4())
96 self.logger.debug("Starting to upload result to {}...".format(uri))
97
98 ## prepare data
99 binary_data = base64.b64encode(cloudpickle.dumps(result))
100 stream = io.BytesIO(binary_data)
101
102 ## upload
103 self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri)
104 self.logger.debug("Finished uploading result to {}.".format(uri))
105 return uri
106
107 def read(self, uri: str) -> Any:
108 """
109 Given a uri, reads a result from S3, reads it and returns it
110
111 Args:
112 - uri (str): the S3 URI
113
114 Returns:
115 - Any: the read result
116 """
117 try:
118 self.logger.debug("Starting to download result from {}...".format(uri))
119 stream = io.BytesIO()
120
121 ## download
122 self.client.download_fileobj(Bucket=self.bucket, Key=uri, Fileobj=stream)
123 stream.seek(0)
124
125 try:
126 return_val = cloudpickle.loads(base64.b64decode(stream.read()))
127 except EOFError:
128 return_val = None
129 self.logger.debug("Finished downloading result from {}.".format(uri))
130
131 except Exception as exc:
132 self.logger.exception(
133 "Unexpected error while reading from result handler: {}".format(
134 repr(exc)
135 )
136 )
137 return_val = None
138
139 return return_val
140
[end of src/prefect/engine/result_handlers/s3_result_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/engine/result_handlers/s3_result_handler.py b/src/prefect/engine/result_handlers/s3_result_handler.py
--- a/src/prefect/engine/result_handlers/s3_result_handler.py
+++ b/src/prefect/engine/result_handlers/s3_result_handler.py
@@ -7,6 +7,7 @@
import cloudpickle
import pendulum
+import prefect
from prefect.client import Secret
from prefect.engine.result_handlers import ResultHandler
@@ -54,7 +55,10 @@
aws_access_key = aws_credentials["ACCESS_KEY"]
aws_secret_access_key = aws_credentials["SECRET_ACCESS_KEY"]
- s3_client = boto3.client(
+ # use a new boto session when initializing in case we are in a new thread
+ # see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing
+ session = boto3.session.Session()
+ s3_client = session.client(
"s3",
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key,
@@ -63,8 +67,13 @@
@property
def client(self) -> "boto3.client":
- if not hasattr(self, "_client"):
+ """
+ Initializes a client if we believe we are in a new thread.
+ We consider ourselves in a new thread if we haven't stored a client yet in the current context.
+ """
+ if not prefect.context.get("boto3client"):
self.initialize_client()
+ prefect.context["boto3client"] = self._client
return self._client
@client.setter
| {"golden_diff": "diff --git a/src/prefect/engine/result_handlers/s3_result_handler.py b/src/prefect/engine/result_handlers/s3_result_handler.py\n--- a/src/prefect/engine/result_handlers/s3_result_handler.py\n+++ b/src/prefect/engine/result_handlers/s3_result_handler.py\n@@ -7,6 +7,7 @@\n import cloudpickle\n import pendulum\n \n+import prefect\n from prefect.client import Secret\n from prefect.engine.result_handlers import ResultHandler\n \n@@ -54,7 +55,10 @@\n aws_access_key = aws_credentials[\"ACCESS_KEY\"]\n aws_secret_access_key = aws_credentials[\"SECRET_ACCESS_KEY\"]\n \n- s3_client = boto3.client(\n+ # use a new boto session when initializing in case we are in a new thread\n+ # see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?#multithreading-multiprocessing\n+ session = boto3.session.Session()\n+ s3_client = session.client(\n \"s3\",\n aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_access_key,\n@@ -63,8 +67,13 @@\n \n @property\n def client(self) -> \"boto3.client\":\n- if not hasattr(self, \"_client\"):\n+ \"\"\"\n+ Initializes a client if we believe we are in a new thread.\n+ We consider ourselves in a new thread if we haven't stored a client yet in the current context.\n+ \"\"\"\n+ if not prefect.context.get(\"boto3client\"):\n self.initialize_client()\n+ prefect.context[\"boto3client\"] = self._client\n return self._client\n \n @client.setter\n", "issue": "Flow S3ResultHandler Fails for Dask Worker with nthreads > 1\n## Description\r\nSpecifying S3ResultHandler for a Flow running on Dask worker(s) with nthreads > 1 fails with: `KeyError: 'credential_provider'`, likely due to a race condition in using the global boto3 session (boto3.client) between threads. \r\n\r\n## Expected Behavior\r\nIn a multithreaded environment, boto3 recommends creating a session per thread rather than sharing the default boto3 session, i.e. boto3.client. See boto3 documentation at: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html?highlight=multithreading#multithreading-multiprocessing\r\n\r\nThis thread in Prefect's Community Slack describes using this session-per-thread approach to successfully fix a similar issue when using boto3 in Prefect tasks: https://prefect-community.slack.com/archives/CM28LL405/p1581434710167100\r\n\r\n## Reproduction\r\nA Flow with tasks that can run in parallel (e.g. mapped tasks or different Flow branches) and where the Flow-level result_handler is set to S3ResultHandler should reproduce this behavior.\r\n\r\nFull stack trace:\r\n```\r\nFebruary 29th 2020 at 8:09:43am | prefect.CloudTaskRunner\r\nERROR \r\nFailed to set task state with error: KeyError('credential_provider')\r\nTraceback (most recent call last):\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/task_runner.py\", line 117, in call_runner_target_handlers\r\n cloud_state = prepare_state_for_cloud(new_state)\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/cloud/utilities.py\", line 21, in prepare_state_for_cloud\r\n res.store_safe_value()\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/result.py\", line 93, in store_safe_value\r\n value = self.result_handler.write(self.value)\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py\", line 103, in write\r\n self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri)\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py\", line 67, in client\r\n self.initialize_client()\r\n File \"/opt/conda/lib/python3.7/site-packages/prefect/engine/result_handlers/s3_result_handler.py\", line 60, in initialize_client\r\n aws_secret_access_key=aws_secret_access_key,\r\n File \"/opt/conda/lib/python3.7/site-packages/boto3/__init__.py\", line 91, in client\r\n return _get_default_session().client(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.7/site-packages/boto3/session.py\", line 263, in client\r\n aws_session_token=aws_session_token, config=config)\r\n File \"/opt/conda/lib/python3.7/site-packages/botocore/session.py\", line 823, in create_client\r\n credentials = self.get_credentials()\r\n File \"/opt/conda/lib/python3.7/site-packages/botocore/session.py\", line 428, in get_credentials\r\n 'credential_provider').load_credentials()\r\n File \"/opt/conda/lib/python3.7/site-packages/botocore/session.py\", line 923, in get_component\r\n del self._deferred[name]\r\nKeyError: 'credential_provider'\r\n```\r\n\r\n## Environment\r\nWe create a long-running Dask cluster where our Dask workers are started with --nprocs 1 --nthreads 3.\r\n\r\n(Thanks to @JLouSRM for identifying this issue and capturing log evidence!)\r\n\n", "before_files": [{"content": "import base64\nimport io\nimport json\nimport uuid\nfrom typing import TYPE_CHECKING, Any\n\nimport cloudpickle\nimport pendulum\n\nfrom prefect.client import Secret\nfrom prefect.engine.result_handlers import ResultHandler\n\nif TYPE_CHECKING:\n import boto3\n\n\nclass S3ResultHandler(ResultHandler):\n \"\"\"\n Result Handler for writing to and reading from an AWS S3 Bucket.\n\n For authentication, there are two options: you can set a Prefect Secret containing\n your AWS access keys which will be passed directly to the `boto3` client, or you can\n [configure your flow's runtime environment](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#guide-configuration)\n for `boto3`.\n\n Args:\n - bucket (str): the name of the bucket to write to / read from\n - aws_credentials_secret (str, optional): the name of the Prefect Secret\n that stores your AWS credentials; this Secret must be a JSON string\n with two keys: `ACCESS_KEY` and `SECRET_ACCESS_KEY` which will be\n passed directly to `boto3`. If not provided, `boto3`\n will fall back on standard AWS rules for authentication.\n \"\"\"\n\n def __init__(self, bucket: str, aws_credentials_secret: str = None) -> None:\n self.bucket = bucket\n self.aws_credentials_secret = aws_credentials_secret\n super().__init__()\n\n def initialize_client(self) -> None:\n \"\"\"\n Initializes an S3 Client.\n \"\"\"\n import boto3\n\n aws_access_key = None\n aws_secret_access_key = None\n\n if self.aws_credentials_secret:\n aws_credentials = Secret(self.aws_credentials_secret).get()\n if isinstance(aws_credentials, str):\n aws_credentials = json.loads(aws_credentials)\n\n aws_access_key = aws_credentials[\"ACCESS_KEY\"]\n aws_secret_access_key = aws_credentials[\"SECRET_ACCESS_KEY\"]\n\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=aws_access_key,\n aws_secret_access_key=aws_secret_access_key,\n )\n self.client = s3_client\n\n @property\n def client(self) -> \"boto3.client\":\n if not hasattr(self, \"_client\"):\n self.initialize_client()\n return self._client\n\n @client.setter\n def client(self, val: Any) -> None:\n self._client = val\n\n def __getstate__(self) -> dict:\n state = self.__dict__.copy()\n if \"_client\" in state:\n del state[\"_client\"]\n return state\n\n def __setstate__(self, state: dict) -> None:\n self.__dict__.update(state)\n\n def write(self, result: Any) -> str:\n \"\"\"\n Given a result, writes the result to a location in S3\n and returns the resulting URI.\n\n Args:\n - result (Any): the written result\n\n Returns:\n - str: the S3 URI\n \"\"\"\n date = pendulum.now(\"utc\").format(\"Y/M/D\") # type: ignore\n uri = \"{date}/{uuid}.prefect_result\".format(date=date, uuid=uuid.uuid4())\n self.logger.debug(\"Starting to upload result to {}...\".format(uri))\n\n ## prepare data\n binary_data = base64.b64encode(cloudpickle.dumps(result))\n stream = io.BytesIO(binary_data)\n\n ## upload\n self.client.upload_fileobj(stream, Bucket=self.bucket, Key=uri)\n self.logger.debug(\"Finished uploading result to {}.\".format(uri))\n return uri\n\n def read(self, uri: str) -> Any:\n \"\"\"\n Given a uri, reads a result from S3, reads it and returns it\n\n Args:\n - uri (str): the S3 URI\n\n Returns:\n - Any: the read result\n \"\"\"\n try:\n self.logger.debug(\"Starting to download result from {}...\".format(uri))\n stream = io.BytesIO()\n\n ## download\n self.client.download_fileobj(Bucket=self.bucket, Key=uri, Fileobj=stream)\n stream.seek(0)\n\n try:\n return_val = cloudpickle.loads(base64.b64decode(stream.read()))\n except EOFError:\n return_val = None\n self.logger.debug(\"Finished downloading result from {}.\".format(uri))\n\n except Exception as exc:\n self.logger.exception(\n \"Unexpected error while reading from result handler: {}\".format(\n repr(exc)\n )\n )\n return_val = None\n\n return return_val\n", "path": "src/prefect/engine/result_handlers/s3_result_handler.py"}]} | 2,684 | 364 |
gh_patches_debug_20030 | rasdani/github-patches | git_diff | getsentry__sentry-python-2405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support upcoming version of RQ
### Problem Statement
I'm [RQ](https://github.com/rq/rq)'s maintainer and I use Sentry in production. I plan on releasing a major improvement to RQ sometime in the next few weeks, but this upcoming change breaks Sentry's integration.
### Solution Brainstorm
This [pull request](https://github.com/rq/rq/pull/1964) introduces a change that causes Sentry's RQ integration to fail. From what I can tell, the failure is caused by [this change](https://github.com/rq/rq/pull/1964/commits/fadbb233d81c5c1a53bfcae7149b80888c1e7bdd).
Can we get Sentry to support this change prior to the next major release of RQ? I plan on releasing this sometime in the next few weeks.
</issue>
<code>
[start of sentry_sdk/integrations/rq.py]
1 from __future__ import absolute_import
2
3 import weakref
4 from sentry_sdk.consts import OP
5
6 from sentry_sdk.api import continue_trace
7 from sentry_sdk.hub import Hub
8 from sentry_sdk.integrations import DidNotEnable, Integration
9 from sentry_sdk.integrations.logging import ignore_logger
10 from sentry_sdk.tracing import TRANSACTION_SOURCE_TASK
11 from sentry_sdk.utils import (
12 capture_internal_exceptions,
13 event_from_exception,
14 format_timestamp,
15 parse_version,
16 )
17
18 try:
19 from rq.queue import Queue
20 from rq.timeouts import JobTimeoutException
21 from rq.version import VERSION as RQ_VERSION
22 from rq.worker import Worker
23 except ImportError:
24 raise DidNotEnable("RQ not installed")
25
26 from sentry_sdk._types import TYPE_CHECKING
27
28 if TYPE_CHECKING:
29 from typing import Any, Callable, Dict
30
31 from sentry_sdk._types import EventProcessor
32 from sentry_sdk.utils import ExcInfo
33
34 from rq.job import Job
35
36
37 class RqIntegration(Integration):
38 identifier = "rq"
39
40 @staticmethod
41 def setup_once():
42 # type: () -> None
43
44 version = parse_version(RQ_VERSION)
45
46 if version is None:
47 raise DidNotEnable("Unparsable RQ version: {}".format(RQ_VERSION))
48
49 if version < (0, 6):
50 raise DidNotEnable("RQ 0.6 or newer is required.")
51
52 old_perform_job = Worker.perform_job
53
54 def sentry_patched_perform_job(self, job, *args, **kwargs):
55 # type: (Any, Job, *Queue, **Any) -> bool
56 hub = Hub.current
57 integration = hub.get_integration(RqIntegration)
58
59 if integration is None:
60 return old_perform_job(self, job, *args, **kwargs)
61
62 client = hub.client
63 assert client is not None
64
65 with hub.push_scope() as scope:
66 scope.clear_breadcrumbs()
67 scope.add_event_processor(_make_event_processor(weakref.ref(job)))
68
69 transaction = continue_trace(
70 job.meta.get("_sentry_trace_headers") or {},
71 op=OP.QUEUE_TASK_RQ,
72 name="unknown RQ task",
73 source=TRANSACTION_SOURCE_TASK,
74 )
75
76 with capture_internal_exceptions():
77 transaction.name = job.func_name
78
79 with hub.start_transaction(
80 transaction, custom_sampling_context={"rq_job": job}
81 ):
82 rv = old_perform_job(self, job, *args, **kwargs)
83
84 if self.is_horse:
85 # We're inside of a forked process and RQ is
86 # about to call `os._exit`. Make sure that our
87 # events get sent out.
88 client.flush()
89
90 return rv
91
92 Worker.perform_job = sentry_patched_perform_job
93
94 old_handle_exception = Worker.handle_exception
95
96 def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
97 # type: (Worker, Any, *Any, **Any) -> Any
98 if job.is_failed:
99 _capture_exception(exc_info) # type: ignore
100
101 return old_handle_exception(self, job, *exc_info, **kwargs)
102
103 Worker.handle_exception = sentry_patched_handle_exception
104
105 old_enqueue_job = Queue.enqueue_job
106
107 def sentry_patched_enqueue_job(self, job, **kwargs):
108 # type: (Queue, Any, **Any) -> Any
109 hub = Hub.current
110 if hub.get_integration(RqIntegration) is not None:
111 if hub.scope.span is not None:
112 job.meta["_sentry_trace_headers"] = dict(
113 hub.iter_trace_propagation_headers()
114 )
115
116 return old_enqueue_job(self, job, **kwargs)
117
118 Queue.enqueue_job = sentry_patched_enqueue_job
119
120 ignore_logger("rq.worker")
121
122
123 def _make_event_processor(weak_job):
124 # type: (Callable[[], Job]) -> EventProcessor
125 def event_processor(event, hint):
126 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
127 job = weak_job()
128 if job is not None:
129 with capture_internal_exceptions():
130 extra = event.setdefault("extra", {})
131 extra["rq-job"] = {
132 "job_id": job.id,
133 "func": job.func_name,
134 "args": job.args,
135 "kwargs": job.kwargs,
136 "description": job.description,
137 }
138
139 if job.enqueued_at:
140 extra["rq-job"]["enqueued_at"] = format_timestamp(job.enqueued_at)
141 if job.started_at:
142 extra["rq-job"]["started_at"] = format_timestamp(job.started_at)
143
144 if "exc_info" in hint:
145 with capture_internal_exceptions():
146 if issubclass(hint["exc_info"][0], JobTimeoutException):
147 event["fingerprint"] = ["rq", "JobTimeoutException", job.func_name]
148
149 return event
150
151 return event_processor
152
153
154 def _capture_exception(exc_info, **kwargs):
155 # type: (ExcInfo, **Any) -> None
156 hub = Hub.current
157 if hub.get_integration(RqIntegration) is None:
158 return
159
160 # If an integration is there, a client has to be there.
161 client = hub.client # type: Any
162
163 event, hint = event_from_exception(
164 exc_info,
165 client_options=client.options,
166 mechanism={"type": "rq", "handled": False},
167 )
168
169 hub.capture_event(event, hint=hint)
170
[end of sentry_sdk/integrations/rq.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/rq.py b/sentry_sdk/integrations/rq.py
--- a/sentry_sdk/integrations/rq.py
+++ b/sentry_sdk/integrations/rq.py
@@ -20,6 +20,7 @@
from rq.timeouts import JobTimeoutException
from rq.version import VERSION as RQ_VERSION
from rq.worker import Worker
+ from rq.job import JobStatus
except ImportError:
raise DidNotEnable("RQ not installed")
@@ -95,7 +96,9 @@
def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
# type: (Worker, Any, *Any, **Any) -> Any
- if job.is_failed:
+ # Note, the order of the `or` here is important,
+ # because calling `job.is_failed` will change `_status`.
+ if job._status == JobStatus.FAILED or job.is_failed:
_capture_exception(exc_info) # type: ignore
return old_handle_exception(self, job, *exc_info, **kwargs)
| {"golden_diff": "diff --git a/sentry_sdk/integrations/rq.py b/sentry_sdk/integrations/rq.py\n--- a/sentry_sdk/integrations/rq.py\n+++ b/sentry_sdk/integrations/rq.py\n@@ -20,6 +20,7 @@\n from rq.timeouts import JobTimeoutException\n from rq.version import VERSION as RQ_VERSION\n from rq.worker import Worker\n+ from rq.job import JobStatus\n except ImportError:\n raise DidNotEnable(\"RQ not installed\")\n \n@@ -95,7 +96,9 @@\n \n def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):\n # type: (Worker, Any, *Any, **Any) -> Any\n- if job.is_failed:\n+ # Note, the order of the `or` here is important,\n+ # because calling `job.is_failed` will change `_status`.\n+ if job._status == JobStatus.FAILED or job.is_failed:\n _capture_exception(exc_info) # type: ignore\n \n return old_handle_exception(self, job, *exc_info, **kwargs)\n", "issue": "Support upcoming version of RQ\n### Problem Statement\n\nI'm [RQ](https://github.com/rq/rq)'s maintainer and I use Sentry in production. I plan on releasing a major improvement to RQ sometime in the next few weeks, but this upcoming change breaks Sentry's integration.\r\n\n\n### Solution Brainstorm\n\nThis [pull request](https://github.com/rq/rq/pull/1964) introduces a change that causes Sentry's RQ integration to fail. From what I can tell, the failure is caused by [this change](https://github.com/rq/rq/pull/1964/commits/fadbb233d81c5c1a53bfcae7149b80888c1e7bdd).\r\n\r\nCan we get Sentry to support this change prior to the next major release of RQ? I plan on releasing this sometime in the next few weeks.\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport weakref\nfrom sentry_sdk.consts import OP\n\nfrom sentry_sdk.api import continue_trace\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import DidNotEnable, Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk.tracing import TRANSACTION_SOURCE_TASK\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n format_timestamp,\n parse_version,\n)\n\ntry:\n from rq.queue import Queue\n from rq.timeouts import JobTimeoutException\n from rq.version import VERSION as RQ_VERSION\n from rq.worker import Worker\nexcept ImportError:\n raise DidNotEnable(\"RQ not installed\")\n\nfrom sentry_sdk._types import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import Any, Callable, Dict\n\n from sentry_sdk._types import EventProcessor\n from sentry_sdk.utils import ExcInfo\n\n from rq.job import Job\n\n\nclass RqIntegration(Integration):\n identifier = \"rq\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n version = parse_version(RQ_VERSION)\n\n if version is None:\n raise DidNotEnable(\"Unparsable RQ version: {}\".format(RQ_VERSION))\n\n if version < (0, 6):\n raise DidNotEnable(\"RQ 0.6 or newer is required.\")\n\n old_perform_job = Worker.perform_job\n\n def sentry_patched_perform_job(self, job, *args, **kwargs):\n # type: (Any, Job, *Queue, **Any) -> bool\n hub = Hub.current\n integration = hub.get_integration(RqIntegration)\n\n if integration is None:\n return old_perform_job(self, job, *args, **kwargs)\n\n client = hub.client\n assert client is not None\n\n with hub.push_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_event_processor(weakref.ref(job)))\n\n transaction = continue_trace(\n job.meta.get(\"_sentry_trace_headers\") or {},\n op=OP.QUEUE_TASK_RQ,\n name=\"unknown RQ task\",\n source=TRANSACTION_SOURCE_TASK,\n )\n\n with capture_internal_exceptions():\n transaction.name = job.func_name\n\n with hub.start_transaction(\n transaction, custom_sampling_context={\"rq_job\": job}\n ):\n rv = old_perform_job(self, job, *args, **kwargs)\n\n if self.is_horse:\n # We're inside of a forked process and RQ is\n # about to call `os._exit`. Make sure that our\n # events get sent out.\n client.flush()\n\n return rv\n\n Worker.perform_job = sentry_patched_perform_job\n\n old_handle_exception = Worker.handle_exception\n\n def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):\n # type: (Worker, Any, *Any, **Any) -> Any\n if job.is_failed:\n _capture_exception(exc_info) # type: ignore\n\n return old_handle_exception(self, job, *exc_info, **kwargs)\n\n Worker.handle_exception = sentry_patched_handle_exception\n\n old_enqueue_job = Queue.enqueue_job\n\n def sentry_patched_enqueue_job(self, job, **kwargs):\n # type: (Queue, Any, **Any) -> Any\n hub = Hub.current\n if hub.get_integration(RqIntegration) is not None:\n if hub.scope.span is not None:\n job.meta[\"_sentry_trace_headers\"] = dict(\n hub.iter_trace_propagation_headers()\n )\n\n return old_enqueue_job(self, job, **kwargs)\n\n Queue.enqueue_job = sentry_patched_enqueue_job\n\n ignore_logger(\"rq.worker\")\n\n\ndef _make_event_processor(weak_job):\n # type: (Callable[[], Job]) -> EventProcessor\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n job = weak_job()\n if job is not None:\n with capture_internal_exceptions():\n extra = event.setdefault(\"extra\", {})\n extra[\"rq-job\"] = {\n \"job_id\": job.id,\n \"func\": job.func_name,\n \"args\": job.args,\n \"kwargs\": job.kwargs,\n \"description\": job.description,\n }\n\n if job.enqueued_at:\n extra[\"rq-job\"][\"enqueued_at\"] = format_timestamp(job.enqueued_at)\n if job.started_at:\n extra[\"rq-job\"][\"started_at\"] = format_timestamp(job.started_at)\n\n if \"exc_info\" in hint:\n with capture_internal_exceptions():\n if issubclass(hint[\"exc_info\"][0], JobTimeoutException):\n event[\"fingerprint\"] = [\"rq\", \"JobTimeoutException\", job.func_name]\n\n return event\n\n return event_processor\n\n\ndef _capture_exception(exc_info, **kwargs):\n # type: (ExcInfo, **Any) -> None\n hub = Hub.current\n if hub.get_integration(RqIntegration) is None:\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n event, hint = event_from_exception(\n exc_info,\n client_options=client.options,\n mechanism={\"type\": \"rq\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n", "path": "sentry_sdk/integrations/rq.py"}]} | 2,342 | 246 |
gh_patches_debug_31571 | rasdani/github-patches | git_diff | svthalia__concrexit-3462 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change the header photos
### Is your feature request related to a problem? Please describe.
The header photos are a bit outdated
### Describe the solution you'd like
Choose some new photos
### Motivation
<img width="1306" alt="image" src="https://user-images.githubusercontent.com/7915741/164710169-e2d4ac27-04f0-48e6-bceb-bc05a1d3c30f.png">
these guys are old
<img width="1306" alt="image" src="https://user-images.githubusercontent.com/7915741/164710211-ad6fa23f-b021-4dab-9f4f-de45b11c56a2.png">
this image is lowres and old
### Describe alternatives you've considered
Not
### Additional context
I have access to the RU Brandportal with nice stock images of the university that we can use
</issue>
<code>
[start of website/thaliawebsite/templatetags/pick_header_image.py]
1 """Get a random header image."""
2 import random
3
4 from django import template
5 from django.contrib.staticfiles.storage import staticfiles_storage
6
7 register = template.Library()
8
9 HEADERS = [
10 "img/headers/banner_default.jpg",
11 "img/headers/banner2.jpg",
12 "img/headers/banner4.jpg",
13 "img/headers/banner5.jpg",
14 "img/headers/banner6.jpg",
15 ]
16
17
18 @register.simple_tag
19 def pick_header_image():
20 """Render a random header image."""
21 return staticfiles_storage.url(random.choice(HEADERS))
22
[end of website/thaliawebsite/templatetags/pick_header_image.py]
[start of website/photos/models.py]
1 import hashlib
2 import logging
3 import os
4 import random
5 from secrets import token_hex
6
7 from django.conf import settings
8 from django.core.exceptions import ValidationError
9 from django.db import models
10 from django.db.models import Count, IntegerField, Value
11 from django.db.models.functions import Coalesce
12 from django.urls import reverse
13 from django.utils.functional import cached_property
14 from django.utils.translation import gettext_lazy as _
15
16 from queryable_properties.managers import QueryablePropertiesManager
17 from queryable_properties.properties import AnnotationProperty
18 from thumbnails.fields import ImageField
19
20 from members.models import Member
21
22 COVER_FILENAME = "cover.jpg"
23
24
25 logger = logging.getLogger(__name__)
26
27
28 def photo_uploadto(instance, filename):
29 ext = os.path.splitext(filename)[1]
30 return f"photos/{instance.album.dirname}/{token_hex(8)}{ext}"
31
32
33 class DuplicatePhotoException(Exception):
34 """Raised when a photo with the same digest already exists in a given album."""
35
36
37 class Photo(models.Model):
38 """Model for a Photo object."""
39
40 objects = QueryablePropertiesManager()
41
42 album = models.ForeignKey(
43 "Album", on_delete=models.CASCADE, verbose_name=_("album")
44 )
45
46 file = ImageField(
47 _("file"),
48 upload_to=photo_uploadto,
49 resize_source_to="source",
50 )
51
52 rotation = models.IntegerField(
53 verbose_name=_("rotation"),
54 default=0,
55 choices=((x, x) for x in (0, 90, 180, 270)),
56 help_text=_("This does not modify the original image file."),
57 )
58
59 _digest = models.CharField(
60 "digest",
61 max_length=40,
62 blank=True,
63 editable=False,
64 )
65
66 num_likes = AnnotationProperty(
67 Coalesce(Count("likes"), Value(0), output_field=IntegerField())
68 )
69
70 def __init__(self, *args, **kwargs):
71 """Initialize Photo object and set the file if it exists."""
72 super().__init__(*args, **kwargs)
73 if self.file:
74 self.original_file = self.file.name
75 else:
76 self.original_file = ""
77
78 def __str__(self):
79 """Return the filename of a Photo object."""
80 return os.path.basename(self.file.name)
81
82 def clean(self):
83 if not self.file._committed:
84 hash_sha1 = hashlib.sha1()
85 for chunk in iter(lambda: self.file.read(4096), b""):
86 hash_sha1.update(chunk)
87 digest = hash_sha1.hexdigest()
88 self._digest = digest
89
90 if (
91 Photo.objects.filter(album=self.album, _digest=digest)
92 .exclude(pk=self.pk)
93 .exists()
94 ):
95 raise ValidationError(
96 {"file": "This photo already exists in this album."}
97 )
98
99 return super().clean()
100
101 def delete(self, using=None, keep_parents=False):
102 removed = super().delete(using, keep_parents)
103 if self.file.name:
104 self.file.delete()
105 return removed
106
107 class Meta:
108 """Meta class for Photo."""
109
110 # Photos are created in order of their filename.
111 ordering = ("pk",)
112
113
114 class Like(models.Model):
115 photo = models.ForeignKey(
116 Photo, null=False, blank=False, related_name="likes", on_delete=models.CASCADE
117 )
118 member = models.ForeignKey(
119 Member, null=True, blank=False, on_delete=models.SET_NULL
120 )
121
122 def __str__(self):
123 return str(self.member) + " " + _("likes") + " " + str(self.photo)
124
125 class Meta:
126 unique_together = ["photo", "member"]
127
128
129 class Album(models.Model):
130 """Model for Album objects."""
131
132 title = models.CharField(
133 _("title"),
134 blank=True,
135 max_length=200,
136 help_text=_("Leave empty to take over the title of the event"),
137 )
138
139 dirname = models.CharField(
140 verbose_name=_("directory name"),
141 max_length=200,
142 )
143
144 date = models.DateField(
145 verbose_name=_("date"),
146 blank=True,
147 help_text=_("Leave empty to take over the date of the event"),
148 )
149
150 slug = models.SlugField(
151 verbose_name=_("slug"),
152 unique=True,
153 )
154
155 hidden = models.BooleanField(verbose_name=_("hidden"), default=False)
156
157 event = models.ForeignKey(
158 "events.Event",
159 on_delete=models.SET_NULL,
160 blank=True,
161 null=True,
162 )
163
164 _cover = models.OneToOneField(
165 Photo,
166 on_delete=models.SET_NULL,
167 blank=True,
168 null=True,
169 related_name="covered_album",
170 verbose_name=_("cover image"),
171 )
172
173 shareable = models.BooleanField(verbose_name=_("shareable"), default=False)
174
175 photosdir = "photos"
176 photospath = os.path.join(settings.MEDIA_ROOT, photosdir)
177
178 @cached_property
179 def cover(self):
180 """Return cover of Album.
181
182 If a cover is not set, return a random photo or None if there are no photos.
183 """
184 cover = None
185 if self._cover is not None:
186 return self._cover
187
188 # Not prefetched because this should be rare and is a lot of data
189 # `exists` is faster in theory, but requires everything to be fetched later anyways
190 if self.photo_set.exists():
191 random.seed(self.dirname)
192 cover = random.choice(self.photo_set.all())
193 return cover
194
195 def __str__(self):
196 """Get string representation of Album."""
197 return f"{self.date:%Y-%m-%d} {self.title}"
198
199 def get_absolute_url(self):
200 """Get url of Album."""
201 return reverse("photos:album", args=[str(self.slug)])
202
203 def clean(self):
204 super().clean()
205 errors = {}
206
207 if not self.title and not self.event:
208 errors.update(
209 {"title": _("This field is required if there is no event selected.")}
210 )
211
212 if not self.date and not self.event:
213 errors.update(
214 {"date": _("This field is required if there is no event selected.")}
215 )
216
217 if errors:
218 raise ValidationError(errors)
219
220 def save(self, **kwargs):
221 """Save album and send appropriate notifications."""
222 # dirname is only set for new objects, to avoid ever changing it
223 if self.pk is None:
224 self.dirname = self.slug
225
226 if not self.title and self.event:
227 self.title = self.event.title
228
229 if not self.date:
230 self.date = self.event.start.date()
231
232 super().save(**kwargs)
233
234 @property
235 def access_token(self):
236 """Return access token for album."""
237 return hashlib.sha256(
238 f"{settings.SECRET_KEY}album{self.pk}".encode()
239 ).hexdigest()
240
241 class Meta:
242 """Meta class for Album."""
243
244 ordering = ("-date", "title")
245
[end of website/photos/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/photos/models.py b/website/photos/models.py
--- a/website/photos/models.py
+++ b/website/photos/models.py
@@ -188,8 +188,8 @@
# Not prefetched because this should be rare and is a lot of data
# `exists` is faster in theory, but requires everything to be fetched later anyways
if self.photo_set.exists():
- random.seed(self.dirname)
- cover = random.choice(self.photo_set.all())
+ r = random.Random(self.dirname)
+ cover = r.choice(self.photo_set.all())
return cover
def __str__(self):
diff --git a/website/thaliawebsite/templatetags/pick_header_image.py b/website/thaliawebsite/templatetags/pick_header_image.py
--- a/website/thaliawebsite/templatetags/pick_header_image.py
+++ b/website/thaliawebsite/templatetags/pick_header_image.py
@@ -3,19 +3,40 @@
from django import template
from django.contrib.staticfiles.storage import staticfiles_storage
+from django.utils.safestring import mark_safe
register = template.Library()
HEADERS = [
- "img/headers/banner_default.jpg",
- "img/headers/banner2.jpg",
- "img/headers/banner4.jpg",
- "img/headers/banner5.jpg",
- "img/headers/banner6.jpg",
+ "img/headers/banner_default",
+ "img/headers/banner_bicycles",
+ "img/headers/banner_huygenshall",
+ "img/headers/banner_huygensfront",
+ "img/headers/banner_huygens",
+ "img/headers/banner_huygenstent",
+ "img/headers/banner_robot",
+ "img/headers/banner_tent",
+]
+
+HEADERS_FUN = [
+ "img/headers/banner_wine",
+ "img/headers/banner_winetasting",
+ "img/headers/banner_christmas",
+ "img/headers/banner_huygenstent",
+ "img/headers/banner_bingo",
+ "img/headers/banner_food",
]
@register.simple_tag
-def pick_header_image():
+def pick_header_image(type="normal"):
"""Render a random header image."""
- return staticfiles_storage.url(random.choice(HEADERS))
+ if type == "fun":
+ headers = HEADERS_FUN
+ else:
+ headers = HEADERS
+ header = random.choice(headers)
+ header_2k = staticfiles_storage.url(header + "-2k.webp")
+ header_5k = staticfiles_storage.url(header + "-5k.webp")
+
+ return mark_safe(f"{header_2k}, {header_5k} 3x")
| {"golden_diff": "diff --git a/website/photos/models.py b/website/photos/models.py\n--- a/website/photos/models.py\n+++ b/website/photos/models.py\n@@ -188,8 +188,8 @@\n # Not prefetched because this should be rare and is a lot of data\n # `exists` is faster in theory, but requires everything to be fetched later anyways\n if self.photo_set.exists():\n- random.seed(self.dirname)\n- cover = random.choice(self.photo_set.all())\n+ r = random.Random(self.dirname)\n+ cover = r.choice(self.photo_set.all())\n return cover\n \n def __str__(self):\ndiff --git a/website/thaliawebsite/templatetags/pick_header_image.py b/website/thaliawebsite/templatetags/pick_header_image.py\n--- a/website/thaliawebsite/templatetags/pick_header_image.py\n+++ b/website/thaliawebsite/templatetags/pick_header_image.py\n@@ -3,19 +3,40 @@\n \n from django import template\n from django.contrib.staticfiles.storage import staticfiles_storage\n+from django.utils.safestring import mark_safe\n \n register = template.Library()\n \n HEADERS = [\n- \"img/headers/banner_default.jpg\",\n- \"img/headers/banner2.jpg\",\n- \"img/headers/banner4.jpg\",\n- \"img/headers/banner5.jpg\",\n- \"img/headers/banner6.jpg\",\n+ \"img/headers/banner_default\",\n+ \"img/headers/banner_bicycles\",\n+ \"img/headers/banner_huygenshall\",\n+ \"img/headers/banner_huygensfront\",\n+ \"img/headers/banner_huygens\",\n+ \"img/headers/banner_huygenstent\",\n+ \"img/headers/banner_robot\",\n+ \"img/headers/banner_tent\",\n+]\n+\n+HEADERS_FUN = [\n+ \"img/headers/banner_wine\",\n+ \"img/headers/banner_winetasting\",\n+ \"img/headers/banner_christmas\",\n+ \"img/headers/banner_huygenstent\",\n+ \"img/headers/banner_bingo\",\n+ \"img/headers/banner_food\",\n ]\n \n \n @register.simple_tag\n-def pick_header_image():\n+def pick_header_image(type=\"normal\"):\n \"\"\"Render a random header image.\"\"\"\n- return staticfiles_storage.url(random.choice(HEADERS))\n+ if type == \"fun\":\n+ headers = HEADERS_FUN\n+ else:\n+ headers = HEADERS\n+ header = random.choice(headers)\n+ header_2k = staticfiles_storage.url(header + \"-2k.webp\")\n+ header_5k = staticfiles_storage.url(header + \"-5k.webp\")\n+\n+ return mark_safe(f\"{header_2k}, {header_5k} 3x\")\n", "issue": "Change the header photos\n### Is your feature request related to a problem? Please describe.\r\nThe header photos are a bit outdated \r\n\r\n### Describe the solution you'd like\r\nChoose some new photos\r\n\r\n### Motivation\r\n<img width=\"1306\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/164710169-e2d4ac27-04f0-48e6-bceb-bc05a1d3c30f.png\">\r\nthese guys are old\r\n<img width=\"1306\" alt=\"image\" src=\"https://user-images.githubusercontent.com/7915741/164710211-ad6fa23f-b021-4dab-9f4f-de45b11c56a2.png\">\r\nthis image is lowres and old\r\n\r\n### Describe alternatives you've considered\r\nNot\r\n\r\n### Additional context\r\nI have access to the RU Brandportal with nice stock images of the university that we can use\n", "before_files": [{"content": "\"\"\"Get a random header image.\"\"\"\nimport random\n\nfrom django import template\nfrom django.contrib.staticfiles.storage import staticfiles_storage\n\nregister = template.Library()\n\nHEADERS = [\n \"img/headers/banner_default.jpg\",\n \"img/headers/banner2.jpg\",\n \"img/headers/banner4.jpg\",\n \"img/headers/banner5.jpg\",\n \"img/headers/banner6.jpg\",\n]\n\n\[email protected]_tag\ndef pick_header_image():\n \"\"\"Render a random header image.\"\"\"\n return staticfiles_storage.url(random.choice(HEADERS))\n", "path": "website/thaliawebsite/templatetags/pick_header_image.py"}, {"content": "import hashlib\nimport logging\nimport os\nimport random\nfrom secrets import token_hex\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Count, IntegerField, Value\nfrom django.db.models.functions import Coalesce\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\n\nfrom queryable_properties.managers import QueryablePropertiesManager\nfrom queryable_properties.properties import AnnotationProperty\nfrom thumbnails.fields import ImageField\n\nfrom members.models import Member\n\nCOVER_FILENAME = \"cover.jpg\"\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef photo_uploadto(instance, filename):\n ext = os.path.splitext(filename)[1]\n return f\"photos/{instance.album.dirname}/{token_hex(8)}{ext}\"\n\n\nclass DuplicatePhotoException(Exception):\n \"\"\"Raised when a photo with the same digest already exists in a given album.\"\"\"\n\n\nclass Photo(models.Model):\n \"\"\"Model for a Photo object.\"\"\"\n\n objects = QueryablePropertiesManager()\n\n album = models.ForeignKey(\n \"Album\", on_delete=models.CASCADE, verbose_name=_(\"album\")\n )\n\n file = ImageField(\n _(\"file\"),\n upload_to=photo_uploadto,\n resize_source_to=\"source\",\n )\n\n rotation = models.IntegerField(\n verbose_name=_(\"rotation\"),\n default=0,\n choices=((x, x) for x in (0, 90, 180, 270)),\n help_text=_(\"This does not modify the original image file.\"),\n )\n\n _digest = models.CharField(\n \"digest\",\n max_length=40,\n blank=True,\n editable=False,\n )\n\n num_likes = AnnotationProperty(\n Coalesce(Count(\"likes\"), Value(0), output_field=IntegerField())\n )\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize Photo object and set the file if it exists.\"\"\"\n super().__init__(*args, **kwargs)\n if self.file:\n self.original_file = self.file.name\n else:\n self.original_file = \"\"\n\n def __str__(self):\n \"\"\"Return the filename of a Photo object.\"\"\"\n return os.path.basename(self.file.name)\n\n def clean(self):\n if not self.file._committed:\n hash_sha1 = hashlib.sha1()\n for chunk in iter(lambda: self.file.read(4096), b\"\"):\n hash_sha1.update(chunk)\n digest = hash_sha1.hexdigest()\n self._digest = digest\n\n if (\n Photo.objects.filter(album=self.album, _digest=digest)\n .exclude(pk=self.pk)\n .exists()\n ):\n raise ValidationError(\n {\"file\": \"This photo already exists in this album.\"}\n )\n\n return super().clean()\n\n def delete(self, using=None, keep_parents=False):\n removed = super().delete(using, keep_parents)\n if self.file.name:\n self.file.delete()\n return removed\n\n class Meta:\n \"\"\"Meta class for Photo.\"\"\"\n\n # Photos are created in order of their filename.\n ordering = (\"pk\",)\n\n\nclass Like(models.Model):\n photo = models.ForeignKey(\n Photo, null=False, blank=False, related_name=\"likes\", on_delete=models.CASCADE\n )\n member = models.ForeignKey(\n Member, null=True, blank=False, on_delete=models.SET_NULL\n )\n\n def __str__(self):\n return str(self.member) + \" \" + _(\"likes\") + \" \" + str(self.photo)\n\n class Meta:\n unique_together = [\"photo\", \"member\"]\n\n\nclass Album(models.Model):\n \"\"\"Model for Album objects.\"\"\"\n\n title = models.CharField(\n _(\"title\"),\n blank=True,\n max_length=200,\n help_text=_(\"Leave empty to take over the title of the event\"),\n )\n\n dirname = models.CharField(\n verbose_name=_(\"directory name\"),\n max_length=200,\n )\n\n date = models.DateField(\n verbose_name=_(\"date\"),\n blank=True,\n help_text=_(\"Leave empty to take over the date of the event\"),\n )\n\n slug = models.SlugField(\n verbose_name=_(\"slug\"),\n unique=True,\n )\n\n hidden = models.BooleanField(verbose_name=_(\"hidden\"), default=False)\n\n event = models.ForeignKey(\n \"events.Event\",\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n )\n\n _cover = models.OneToOneField(\n Photo,\n on_delete=models.SET_NULL,\n blank=True,\n null=True,\n related_name=\"covered_album\",\n verbose_name=_(\"cover image\"),\n )\n\n shareable = models.BooleanField(verbose_name=_(\"shareable\"), default=False)\n\n photosdir = \"photos\"\n photospath = os.path.join(settings.MEDIA_ROOT, photosdir)\n\n @cached_property\n def cover(self):\n \"\"\"Return cover of Album.\n\n If a cover is not set, return a random photo or None if there are no photos.\n \"\"\"\n cover = None\n if self._cover is not None:\n return self._cover\n\n # Not prefetched because this should be rare and is a lot of data\n # `exists` is faster in theory, but requires everything to be fetched later anyways\n if self.photo_set.exists():\n random.seed(self.dirname)\n cover = random.choice(self.photo_set.all())\n return cover\n\n def __str__(self):\n \"\"\"Get string representation of Album.\"\"\"\n return f\"{self.date:%Y-%m-%d} {self.title}\"\n\n def get_absolute_url(self):\n \"\"\"Get url of Album.\"\"\"\n return reverse(\"photos:album\", args=[str(self.slug)])\n\n def clean(self):\n super().clean()\n errors = {}\n\n if not self.title and not self.event:\n errors.update(\n {\"title\": _(\"This field is required if there is no event selected.\")}\n )\n\n if not self.date and not self.event:\n errors.update(\n {\"date\": _(\"This field is required if there is no event selected.\")}\n )\n\n if errors:\n raise ValidationError(errors)\n\n def save(self, **kwargs):\n \"\"\"Save album and send appropriate notifications.\"\"\"\n # dirname is only set for new objects, to avoid ever changing it\n if self.pk is None:\n self.dirname = self.slug\n\n if not self.title and self.event:\n self.title = self.event.title\n\n if not self.date:\n self.date = self.event.start.date()\n\n super().save(**kwargs)\n\n @property\n def access_token(self):\n \"\"\"Return access token for album.\"\"\"\n return hashlib.sha256(\n f\"{settings.SECRET_KEY}album{self.pk}\".encode()\n ).hexdigest()\n\n class Meta:\n \"\"\"Meta class for Album.\"\"\"\n\n ordering = (\"-date\", \"title\")\n", "path": "website/photos/models.py"}]} | 3,044 | 612 |
gh_patches_debug_14328 | rasdani/github-patches | git_diff | common-workflow-language__cwltool-1239 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Workflow that passes validation fails to pack
## Expected Behavior
If a workflow that passes validation (`cwltool --validate`) there should be no errors when packing.
## Actual Behavior
In this specific case, I tested the workflow https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl .
It passes the validation (`cwltool --validate https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl`) but fails when running:
```
cwltool --pack --debug \
https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl > /dev/null
```
Also see CompEpigen/CWLab#38.
(@Athanaseus please have a look.)
## Workflow Code
Please find it at https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl.
## Full Traceback
The error thrown was:
```
INFO /usr/local/bin/cwltool 1.0.20180809224403
ERROR I'm sorry, I couldn't load this CWL file.
The error was:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/cwltool/main.py", line 916, in main
print_pack(loadingContext.loader, processobj, uri, metadata)
File "/usr/local/lib/python3.6/dist-packages/cwltool/main.py", line 563, in print_pack
packed = pack(document_loader, processobj, uri, metadata)
File "/usr/local/lib/python3.6/dist-packages/cwltool/pack.py", line 222, in pack
import_embed(packed, set())
File "/usr/local/lib/python3.6/dist-packages/cwltool/pack.py", line 118, in import_embed
import_embed(d[k], seen)
File "/usr/local/lib/python3.6/dist-packages/cwltool/pack.py", line 104, in import_embed
import_embed(v, seen)
File "/usr/local/lib/python3.6/dist-packages/cwltool/pack.py", line 118, in import_embed
import_embed(d[k], seen)
File "/usr/local/lib/python3.6/dist-packages/cwltool/pack.py", line 108, in import_embed
if d[n] in seen:
TypeError: unhashable type: 'CommentedMap'
```
## Your Environment
* cwltool version: 1.0.20180809224403
</issue>
<code>
[start of cwltool/pack.py]
1 """Reformat a CWL document and all its references to be a single stream."""
2
3 import copy
4 import urllib
5 from typing import (
6 Any,
7 Callable,
8 Dict,
9 List,
10 MutableMapping,
11 MutableSequence,
12 Optional,
13 Set,
14 Union,
15 cast,
16 )
17
18 from ruamel.yaml.comments import CommentedMap, CommentedSeq
19 from schema_salad.ref_resolver import Loader, SubLoader
20 from schema_salad.sourceline import cmap
21
22 from .process import shortname, uniquename
23
24
25 def flatten_deps(d, files): # type: (Any, Set[str]) -> None
26 if isinstance(d, MutableSequence):
27 for s in d:
28 flatten_deps(s, files)
29 elif isinstance(d, MutableMapping):
30 if d["class"] == "File":
31 files.add(d["location"])
32 if "secondaryFiles" in d:
33 flatten_deps(d["secondaryFiles"], files)
34 if "listing" in d:
35 flatten_deps(d["listing"], files)
36
37
38 LoadRefType = Callable[
39 [Optional[str], str], Union[Dict[str, Any], List[Dict[str, Any]], str, None]
40 ]
41
42
43 def find_run(
44 d, # type: Any
45 loadref, # type: LoadRefType
46 runs, # type: Set[str]
47 ): # type: (...) -> None
48 if isinstance(d, MutableSequence):
49 for s in d:
50 find_run(s, loadref, runs)
51 elif isinstance(d, MutableMapping):
52 if "run" in d and isinstance(d["run"], str):
53 if d["run"] not in runs:
54 runs.add(d["run"])
55 find_run(loadref(None, d["run"]), loadref, runs)
56 for s in d.values():
57 find_run(s, loadref, runs)
58
59
60 def find_ids(d, ids): # type: (Any, Set[str]) -> None
61 if isinstance(d, MutableSequence):
62 for s in d:
63 find_ids(s, ids)
64 elif isinstance(d, MutableMapping):
65 for i in ("id", "name"):
66 if i in d and isinstance(d[i], str):
67 ids.add(d[i])
68 for s in d.values():
69 find_ids(s, ids)
70
71
72 def replace_refs(d, rewrite, stem, newstem):
73 # type: (Any, Dict[str, str], str, str) -> None
74 if isinstance(d, MutableSequence):
75 for s, v in enumerate(d):
76 if isinstance(v, str):
77 if v in rewrite:
78 d[s] = rewrite[v]
79 elif v.startswith(stem):
80 d[s] = newstem + v[len(stem) :]
81 rewrite[v] = d[s]
82 else:
83 replace_refs(v, rewrite, stem, newstem)
84 elif isinstance(d, MutableMapping):
85 for s, v in d.items():
86 if isinstance(v, str):
87 if v in rewrite:
88 d[s] = rewrite[v]
89 elif v.startswith(stem):
90 id_ = v[len(stem) :]
91 # prevent appending newstems if tool is already packed
92 if id_.startswith(newstem.strip("#")):
93 d[s] = "#" + id_
94 else:
95 d[s] = newstem + id_
96 rewrite[v] = d[s]
97 replace_refs(v, rewrite, stem, newstem)
98
99
100 def import_embed(d, seen):
101 # type: (Any, Set[str]) -> None
102 if isinstance(d, MutableSequence):
103 for v in d:
104 import_embed(v, seen)
105 elif isinstance(d, MutableMapping):
106 for n in ("id", "name"):
107 if n in d:
108 if d[n] in seen:
109 this = d[n]
110 d.clear()
111 d["$import"] = this
112 else:
113 this = d[n]
114 seen.add(this)
115 break
116
117 for k in sorted(d.keys()):
118 import_embed(d[k], seen)
119
120
121 def pack(
122 document_loader: Loader,
123 processobj, # type: Union[Dict[str, Any], List[Dict[str, Any]]]
124 uri, # type: str
125 metadata, # type: Dict[str, str]
126 rewrite_out=None, # type: Optional[Dict[str, str]]
127 ): # type: (...) -> Dict[str, Any]
128
129 document_loader = SubLoader(document_loader)
130 document_loader.idx = {}
131 if isinstance(processobj, MutableMapping):
132 document_loader.idx[processobj["id"]] = CommentedMap(processobj.items())
133 elif isinstance(processobj, MutableSequence):
134 _, frag = urllib.parse.urldefrag(uri)
135 for po in processobj:
136 if not frag:
137 if po["id"].endswith("#main"):
138 uri = po["id"]
139 document_loader.idx[po["id"]] = CommentedMap(po.items())
140 document_loader.idx[metadata["id"]] = CommentedMap(metadata.items())
141
142 def loadref(base, uri):
143 # type: (Optional[str], str) -> Union[Dict[str, Any], List[Dict[str, Any]], str, None]
144 return document_loader.resolve_ref(uri, base_url=base)[0]
145
146 ids = set() # type: Set[str]
147 find_ids(processobj, ids)
148
149 runs = {uri}
150 find_run(processobj, loadref, runs)
151
152 for f in runs:
153 find_ids(document_loader.resolve_ref(f)[0], ids)
154
155 names = set() # type: Set[str]
156 if rewrite_out is None:
157 rewrite = {} # type: Dict[str, str]
158 else:
159 rewrite = rewrite_out
160
161 mainpath, _ = urllib.parse.urldefrag(uri)
162
163 def rewrite_id(r, mainuri):
164 # type: (str, str) -> None
165 if r == mainuri:
166 rewrite[r] = "#main"
167 elif r.startswith(mainuri) and r[len(mainuri)] in ("#", "/"):
168 if r[len(mainuri) :].startswith("#main/"):
169 rewrite[r] = "#" + uniquename(r[len(mainuri) + 1 :], names)
170 else:
171 rewrite[r] = "#" + uniquename("main/" + r[len(mainuri) + 1 :], names)
172 else:
173 path, frag = urllib.parse.urldefrag(r)
174 if path == mainpath:
175 rewrite[r] = "#" + uniquename(frag, names)
176 else:
177 if path not in rewrite:
178 rewrite[path] = "#" + uniquename(shortname(path), names)
179
180 sortedids = sorted(ids)
181
182 for r in sortedids:
183 rewrite_id(r, uri)
184
185 packed = CommentedMap(
186 (("$graph", CommentedSeq()), ("cwlVersion", metadata["cwlVersion"]))
187 )
188 namespaces = metadata.get("$namespaces", None)
189
190 schemas = set() # type: Set[str]
191 if "$schemas" in metadata:
192 for each_schema in metadata["$schemas"]:
193 schemas.add(each_schema)
194 for r in sorted(runs):
195 dcr, metadata = document_loader.resolve_ref(r)
196 if isinstance(dcr, CommentedSeq):
197 dcr = dcr[0]
198 dcr = cast(CommentedMap, dcr)
199 if not isinstance(dcr, MutableMapping):
200 continue
201 metadata = cast(Dict[str, Any], metadata)
202 if "$schemas" in metadata:
203 for s in metadata["$schemas"]:
204 schemas.add(s)
205 if dcr.get("class") not in ("Workflow", "CommandLineTool", "ExpressionTool"):
206 continue
207 dc = cast(Dict[str, Any], copy.deepcopy(dcr))
208 v = rewrite[r]
209 dc["id"] = v
210 for n in ("name", "cwlVersion", "$namespaces", "$schemas"):
211 if n in dc:
212 del dc[n]
213 packed["$graph"].append(dc)
214
215 if schemas:
216 packed["$schemas"] = list(schemas)
217
218 for r in list(rewrite.keys()):
219 v = rewrite[r]
220 replace_refs(packed, rewrite, r + "/" if "#" in r else r + "#", v + "/")
221
222 import_embed(packed, set())
223
224 if len(packed["$graph"]) == 1:
225 # duplicate 'cwlVersion' and $schemas inside $graph when there is only
226 # a single item because we will print the contents inside '$graph'
227 # rather than whole dict
228 packed["$graph"][0]["cwlVersion"] = packed["cwlVersion"]
229 if schemas:
230 packed["$graph"][0]["$schemas"] = list(schemas)
231 # always include $namespaces in the #main
232 if namespaces:
233 packed["$graph"][0]["$namespaces"] = namespaces
234
235 return packed
236
[end of cwltool/pack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cwltool/pack.py b/cwltool/pack.py
--- a/cwltool/pack.py
+++ b/cwltool/pack.py
@@ -105,14 +105,15 @@
elif isinstance(d, MutableMapping):
for n in ("id", "name"):
if n in d:
- if d[n] in seen:
- this = d[n]
- d.clear()
- d["$import"] = this
- else:
- this = d[n]
- seen.add(this)
- break
+ if isinstance(d[n], str):
+ if d[n] in seen:
+ this = d[n]
+ d.clear()
+ d["$import"] = this
+ else:
+ this = d[n]
+ seen.add(this)
+ break
for k in sorted(d.keys()):
import_embed(d[k], seen)
| {"golden_diff": "diff --git a/cwltool/pack.py b/cwltool/pack.py\n--- a/cwltool/pack.py\n+++ b/cwltool/pack.py\n@@ -105,14 +105,15 @@\n elif isinstance(d, MutableMapping):\n for n in (\"id\", \"name\"):\n if n in d:\n- if d[n] in seen:\n- this = d[n]\n- d.clear()\n- d[\"$import\"] = this\n- else:\n- this = d[n]\n- seen.add(this)\n- break\n+ if isinstance(d[n], str):\n+ if d[n] in seen:\n+ this = d[n]\n+ d.clear()\n+ d[\"$import\"] = this\n+ else:\n+ this = d[n]\n+ seen.add(this)\n+ break\n \n for k in sorted(d.keys()):\n import_embed(d[k], seen)\n", "issue": "Workflow that passes validation fails to pack\n## Expected Behavior\r\nIf a workflow that passes validation (`cwltool --validate`) there should be no errors when packing.\r\n\r\n## Actual Behavior\r\nIn this specific case, I tested the workflow https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl .\r\n\r\nIt passes the validation (`cwltool --validate https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl`) but fails when running:\r\n```\r\ncwltool --pack --debug \\\r\nhttps://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl > /dev/null\r\n```\r\n\r\nAlso see CompEpigen/CWLab#38.\r\n(@Athanaseus please have a look.)\r\n## Workflow Code\r\nPlease find it at https://raw.githubusercontent.com/Athanaseus/2gc-pipeline/master/selfcal_simulation.cwl.\r\n\r\n## Full Traceback\r\nThe error thrown was:\r\n```\r\nINFO /usr/local/bin/cwltool 1.0.20180809224403\r\nERROR I'm sorry, I couldn't load this CWL file.\r\nThe error was:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/main.py\", line 916, in main\r\n print_pack(loadingContext.loader, processobj, uri, metadata)\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/main.py\", line 563, in print_pack\r\n packed = pack(document_loader, processobj, uri, metadata)\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/pack.py\", line 222, in pack\r\n import_embed(packed, set())\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/pack.py\", line 118, in import_embed\r\n import_embed(d[k], seen)\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/pack.py\", line 104, in import_embed\r\n import_embed(v, seen)\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/pack.py\", line 118, in import_embed\r\n import_embed(d[k], seen)\r\n File \"/usr/local/lib/python3.6/dist-packages/cwltool/pack.py\", line 108, in import_embed\r\n if d[n] in seen:\r\nTypeError: unhashable type: 'CommentedMap'\r\n```\r\n\r\n## Your Environment\r\n* cwltool version: 1.0.20180809224403\r\n\n", "before_files": [{"content": "\"\"\"Reformat a CWL document and all its references to be a single stream.\"\"\"\n\nimport copy\nimport urllib\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n MutableMapping,\n MutableSequence,\n Optional,\n Set,\n Union,\n cast,\n)\n\nfrom ruamel.yaml.comments import CommentedMap, CommentedSeq\nfrom schema_salad.ref_resolver import Loader, SubLoader\nfrom schema_salad.sourceline import cmap\n\nfrom .process import shortname, uniquename\n\n\ndef flatten_deps(d, files): # type: (Any, Set[str]) -> None\n if isinstance(d, MutableSequence):\n for s in d:\n flatten_deps(s, files)\n elif isinstance(d, MutableMapping):\n if d[\"class\"] == \"File\":\n files.add(d[\"location\"])\n if \"secondaryFiles\" in d:\n flatten_deps(d[\"secondaryFiles\"], files)\n if \"listing\" in d:\n flatten_deps(d[\"listing\"], files)\n\n\nLoadRefType = Callable[\n [Optional[str], str], Union[Dict[str, Any], List[Dict[str, Any]], str, None]\n]\n\n\ndef find_run(\n d, # type: Any\n loadref, # type: LoadRefType\n runs, # type: Set[str]\n): # type: (...) -> None\n if isinstance(d, MutableSequence):\n for s in d:\n find_run(s, loadref, runs)\n elif isinstance(d, MutableMapping):\n if \"run\" in d and isinstance(d[\"run\"], str):\n if d[\"run\"] not in runs:\n runs.add(d[\"run\"])\n find_run(loadref(None, d[\"run\"]), loadref, runs)\n for s in d.values():\n find_run(s, loadref, runs)\n\n\ndef find_ids(d, ids): # type: (Any, Set[str]) -> None\n if isinstance(d, MutableSequence):\n for s in d:\n find_ids(s, ids)\n elif isinstance(d, MutableMapping):\n for i in (\"id\", \"name\"):\n if i in d and isinstance(d[i], str):\n ids.add(d[i])\n for s in d.values():\n find_ids(s, ids)\n\n\ndef replace_refs(d, rewrite, stem, newstem):\n # type: (Any, Dict[str, str], str, str) -> None\n if isinstance(d, MutableSequence):\n for s, v in enumerate(d):\n if isinstance(v, str):\n if v in rewrite:\n d[s] = rewrite[v]\n elif v.startswith(stem):\n d[s] = newstem + v[len(stem) :]\n rewrite[v] = d[s]\n else:\n replace_refs(v, rewrite, stem, newstem)\n elif isinstance(d, MutableMapping):\n for s, v in d.items():\n if isinstance(v, str):\n if v in rewrite:\n d[s] = rewrite[v]\n elif v.startswith(stem):\n id_ = v[len(stem) :]\n # prevent appending newstems if tool is already packed\n if id_.startswith(newstem.strip(\"#\")):\n d[s] = \"#\" + id_\n else:\n d[s] = newstem + id_\n rewrite[v] = d[s]\n replace_refs(v, rewrite, stem, newstem)\n\n\ndef import_embed(d, seen):\n # type: (Any, Set[str]) -> None\n if isinstance(d, MutableSequence):\n for v in d:\n import_embed(v, seen)\n elif isinstance(d, MutableMapping):\n for n in (\"id\", \"name\"):\n if n in d:\n if d[n] in seen:\n this = d[n]\n d.clear()\n d[\"$import\"] = this\n else:\n this = d[n]\n seen.add(this)\n break\n\n for k in sorted(d.keys()):\n import_embed(d[k], seen)\n\n\ndef pack(\n document_loader: Loader,\n processobj, # type: Union[Dict[str, Any], List[Dict[str, Any]]]\n uri, # type: str\n metadata, # type: Dict[str, str]\n rewrite_out=None, # type: Optional[Dict[str, str]]\n): # type: (...) -> Dict[str, Any]\n\n document_loader = SubLoader(document_loader)\n document_loader.idx = {}\n if isinstance(processobj, MutableMapping):\n document_loader.idx[processobj[\"id\"]] = CommentedMap(processobj.items())\n elif isinstance(processobj, MutableSequence):\n _, frag = urllib.parse.urldefrag(uri)\n for po in processobj:\n if not frag:\n if po[\"id\"].endswith(\"#main\"):\n uri = po[\"id\"]\n document_loader.idx[po[\"id\"]] = CommentedMap(po.items())\n document_loader.idx[metadata[\"id\"]] = CommentedMap(metadata.items())\n\n def loadref(base, uri):\n # type: (Optional[str], str) -> Union[Dict[str, Any], List[Dict[str, Any]], str, None]\n return document_loader.resolve_ref(uri, base_url=base)[0]\n\n ids = set() # type: Set[str]\n find_ids(processobj, ids)\n\n runs = {uri}\n find_run(processobj, loadref, runs)\n\n for f in runs:\n find_ids(document_loader.resolve_ref(f)[0], ids)\n\n names = set() # type: Set[str]\n if rewrite_out is None:\n rewrite = {} # type: Dict[str, str]\n else:\n rewrite = rewrite_out\n\n mainpath, _ = urllib.parse.urldefrag(uri)\n\n def rewrite_id(r, mainuri):\n # type: (str, str) -> None\n if r == mainuri:\n rewrite[r] = \"#main\"\n elif r.startswith(mainuri) and r[len(mainuri)] in (\"#\", \"/\"):\n if r[len(mainuri) :].startswith(\"#main/\"):\n rewrite[r] = \"#\" + uniquename(r[len(mainuri) + 1 :], names)\n else:\n rewrite[r] = \"#\" + uniquename(\"main/\" + r[len(mainuri) + 1 :], names)\n else:\n path, frag = urllib.parse.urldefrag(r)\n if path == mainpath:\n rewrite[r] = \"#\" + uniquename(frag, names)\n else:\n if path not in rewrite:\n rewrite[path] = \"#\" + uniquename(shortname(path), names)\n\n sortedids = sorted(ids)\n\n for r in sortedids:\n rewrite_id(r, uri)\n\n packed = CommentedMap(\n ((\"$graph\", CommentedSeq()), (\"cwlVersion\", metadata[\"cwlVersion\"]))\n )\n namespaces = metadata.get(\"$namespaces\", None)\n\n schemas = set() # type: Set[str]\n if \"$schemas\" in metadata:\n for each_schema in metadata[\"$schemas\"]:\n schemas.add(each_schema)\n for r in sorted(runs):\n dcr, metadata = document_loader.resolve_ref(r)\n if isinstance(dcr, CommentedSeq):\n dcr = dcr[0]\n dcr = cast(CommentedMap, dcr)\n if not isinstance(dcr, MutableMapping):\n continue\n metadata = cast(Dict[str, Any], metadata)\n if \"$schemas\" in metadata:\n for s in metadata[\"$schemas\"]:\n schemas.add(s)\n if dcr.get(\"class\") not in (\"Workflow\", \"CommandLineTool\", \"ExpressionTool\"):\n continue\n dc = cast(Dict[str, Any], copy.deepcopy(dcr))\n v = rewrite[r]\n dc[\"id\"] = v\n for n in (\"name\", \"cwlVersion\", \"$namespaces\", \"$schemas\"):\n if n in dc:\n del dc[n]\n packed[\"$graph\"].append(dc)\n\n if schemas:\n packed[\"$schemas\"] = list(schemas)\n\n for r in list(rewrite.keys()):\n v = rewrite[r]\n replace_refs(packed, rewrite, r + \"/\" if \"#\" in r else r + \"#\", v + \"/\")\n\n import_embed(packed, set())\n\n if len(packed[\"$graph\"]) == 1:\n # duplicate 'cwlVersion' and $schemas inside $graph when there is only\n # a single item because we will print the contents inside '$graph'\n # rather than whole dict\n packed[\"$graph\"][0][\"cwlVersion\"] = packed[\"cwlVersion\"]\n if schemas:\n packed[\"$graph\"][0][\"$schemas\"] = list(schemas)\n # always include $namespaces in the #main\n if namespaces:\n packed[\"$graph\"][0][\"$namespaces\"] = namespaces\n\n return packed\n", "path": "cwltool/pack.py"}]} | 3,587 | 203 |
gh_patches_debug_26061 | rasdani/github-patches | git_diff | huggingface__transformers-7153 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Convert 12-1 and 6-1 en-de models from AllenNLP
https://github.com/jungokasai/deep-shallow#download-trained-deep-shallow-models
+ These should be FSMT models, so can be part of #6940 or done after.
+ They should be uploaded to the AllenNLP namespace. If stas takes this, they can start in stas/ and I will move them.
+ model card(s) should link to the original repo and paper.
+ I hope same en-de tokenizer already ported.
+ Would be interesting to compare BLEU to the initial models in that PR. There is no ensemble so we should be able to reported scores pretty well.
+ **Ideally** this requires 0 lines of checked in python code, besides maybe an integration test.
Desired Signature:
```python
model = FSMT.from_pretrained('allen_nlp/en-de-12-1')
```
Weights can be downloaded with gdown https://pypi.org/project/gdown/
```bash
pip install gdown
gdown https://drive.google.com/uc?id=1x_G2cjvM1nW5hjAB8-vWxRqtQTlmIaQU
```
@stas00 if you are blocked in the late stages of #6940 and have extra cycles, you could give this a whirl. We could also wait for that to be finalized and then either of us can take this.
</issue>
<code>
[start of scripts/fsmt/gen-card-allenai-wmt19.py]
1 #!/usr/bin/env python
2
3 # Usage:
4 # ./gen-card-allenai-wmt19.py
5
6 import os
7 from pathlib import Path
8
9 def write_model_card(model_card_dir, src_lang, tgt_lang, model_name):
10
11 texts = {
12 "en": "Machine learning is great, isn't it?",
13 "ru": "ΠΠ°ΡΠΈΠ½Π½ΠΎΠ΅ ΠΎΠ±ΡΡΠ΅Π½ΠΈΠ΅ - ΡΡΠΎ Π·Π΄ΠΎΡΠΎΠ²ΠΎ, Π½Π΅ ΡΠ°ΠΊ Π»ΠΈ?",
14 "de": "Maschinelles Lernen ist groΓartig, nicht wahr?",
15 }
16
17 # BLUE scores as follows:
18 # "pair": [fairseq, transformers]
19 scores = {
20 "wmt19-de-en-6-6-base": [0, 38.37],
21 "wmt19-de-en-6-6-big": [0, 39.90],
22 }
23 pair = f"{src_lang}-{tgt_lang}"
24
25 readme = f"""
26 ---
27
28 language: {src_lang}, {tgt_lang}
29 thumbnail:
30 tags:
31 - translation
32 - wmt19
33 - allenai
34 license: Apache 2.0
35 datasets:
36 - http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))
37 metrics:
38 - http://www.statmt.org/wmt19/metrics-task.html
39 ---
40
41 # FSMT
42
43 ## Model description
44
45 This is a ported version of fairseq-based wmt19 transformer created by [jungokasai]](https://github.com/jungokasai/) @ allenai for {src_lang}-{tgt_lang}.
46
47 2 models are available:
48
49 * [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big)
50 * [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base)
51
52 ## Intended uses & limitations
53
54 #### How to use
55
56 ```python
57 from transformers.tokenization_fsmt import FSMTTokenizer
58 from transformers.modeling_fsmt import FSMTForConditionalGeneration
59 mname = "allenai/{model_name}"
60 tokenizer = FSMTTokenizer.from_pretrained(mname)
61 model = FSMTForConditionalGeneration.from_pretrained(mname)
62
63 input = "{texts[src_lang]}"
64 input_ids = tokenizer.encode(input, return_tensors="pt")
65 outputs = model.generate(input_ids)
66 decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
67 print(decoded) # {texts[tgt_lang]}
68
69 ```
70
71 #### Limitations and bias
72
73
74 ## Training data
75
76 Pretrained weights were left identical to the original model released by the researcher.
77
78 ## Eval results
79
80 Here are the BLEU scores:
81
82 model | transformers
83 -------|---------|----------
84 {model_name} | {scores[model_name][1]}
85
86 The score was calculated using this code:
87
88 ```bash
89 git clone https://github.com/huggingface/transformers
90 cd transformers
91 export PAIR={pair}
92 export DATA_DIR=data/$PAIR
93 export SAVE_DIR=data/$PAIR
94 export BS=8
95 export NUM_BEAMS=5
96 mkdir -p $DATA_DIR
97 sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
98 sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
99 echo $PAIR
100 PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
101 ```
102
103 """
104 model_card_dir.mkdir(parents=True, exist_ok=True)
105 path = os.path.join(model_card_dir, "README.md")
106 print(f"Generating {path}")
107 with open(path, "w", encoding="utf-8") as f:
108 f.write(readme)
109
110 # make sure we are under the root of the project
111 repo_dir = Path(__file__).resolve().parent.parent.parent
112 model_cards_dir = repo_dir / "model_cards"
113
114 for model_name in ["wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big"]:
115 model_card_dir = model_cards_dir / "allenai" / model_name
116 write_model_card(model_card_dir, src_lang="de", tgt_lang="en", model_name=model_name)
117
[end of scripts/fsmt/gen-card-allenai-wmt19.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/fsmt/gen-card-allenai-wmt19.py b/scripts/fsmt/gen-card-allenai-wmt19.py
--- a/scripts/fsmt/gen-card-allenai-wmt19.py
+++ b/scripts/fsmt/gen-card-allenai-wmt19.py
@@ -42,13 +42,26 @@
## Model description
-This is a ported version of fairseq-based wmt19 transformer created by [jungokasai]](https://github.com/jungokasai/) @ allenai for {src_lang}-{tgt_lang}.
+This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
+
+For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
2 models are available:
* [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big)
* [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base)
+```
+@misc{{kasai2020deep,
+ title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
+ author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
+ year={{2020}},
+ eprint={{2006.10369}},
+ archivePrefix={{arXiv}},
+ primaryClass={{cs.CL}}
+}}
+```
+
## Intended uses & limitations
#### How to use
@@ -73,7 +86,7 @@
## Training data
-Pretrained weights were left identical to the original model released by the researcher.
+Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
| {"golden_diff": "diff --git a/scripts/fsmt/gen-card-allenai-wmt19.py b/scripts/fsmt/gen-card-allenai-wmt19.py\n--- a/scripts/fsmt/gen-card-allenai-wmt19.py\n+++ b/scripts/fsmt/gen-card-allenai-wmt19.py\n@@ -42,13 +42,26 @@\n \n ## Model description\n \n-This is a ported version of fairseq-based wmt19 transformer created by [jungokasai]](https://github.com/jungokasai/) @ allenai for {src_lang}-{tgt_lang}.\n+This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n+\n+For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n \n 2 models are available:\n \n * [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big)\n * [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base)\n \n+```\n+@misc{{kasai2020deep,\n+ title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n+ author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n+ year={{2020}},\n+ eprint={{2006.10369}},\n+ archivePrefix={{arXiv}},\n+ primaryClass={{cs.CL}}\n+}}\n+```\n+\n ## Intended uses & limitations\n \n #### How to use\n@@ -73,7 +86,7 @@\n \n ## Training data\n \n-Pretrained weights were left identical to the original model released by the researcher.\n+Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n \n ## Eval results\n", "issue": "Convert 12-1 and 6-1 en-de models from AllenNLP\nhttps://github.com/jungokasai/deep-shallow#download-trained-deep-shallow-models\r\n\r\n+ These should be FSMT models, so can be part of #6940 or done after. \r\n+ They should be uploaded to the AllenNLP namespace. If stas takes this, they can start in stas/ and I will move them.\r\n+ model card(s) should link to the original repo and paper.\r\n+ I hope same en-de tokenizer already ported.\r\n+ Would be interesting to compare BLEU to the initial models in that PR. There is no ensemble so we should be able to reported scores pretty well.\r\n+ **Ideally** this requires 0 lines of checked in python code, besides maybe an integration test.\r\n\r\nDesired Signature:\r\n```python\r\nmodel = FSMT.from_pretrained('allen_nlp/en-de-12-1')\r\n```\r\n\r\nWeights can be downloaded with gdown https://pypi.org/project/gdown/\r\n\r\n```bash\r\npip install gdown\r\ngdown https://drive.google.com/uc?id=1x_G2cjvM1nW5hjAB8-vWxRqtQTlmIaQU\r\n```\r\n\r\n@stas00 if you are blocked in the late stages of #6940 and have extra cycles, you could give this a whirl. We could also wait for that to be finalized and then either of us can take this.\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Usage:\n# ./gen-card-allenai-wmt19.py\n\nimport os\nfrom pathlib import Path\n\ndef write_model_card(model_card_dir, src_lang, tgt_lang, model_name):\n\n texts = {\n \"en\": \"Machine learning is great, isn't it?\",\n \"ru\": \"\u041c\u0430\u0448\u0438\u043d\u043d\u043e\u0435 \u043e\u0431\u0443\u0447\u0435\u043d\u0438\u0435 - \u044d\u0442\u043e \u0437\u0434\u043e\u0440\u043e\u0432\u043e, \u043d\u0435 \u0442\u0430\u043a \u043b\u0438?\",\n \"de\": \"Maschinelles Lernen ist gro\u00dfartig, nicht wahr?\",\n }\n\n # BLUE scores as follows:\n # \"pair\": [fairseq, transformers]\n scores = {\n \"wmt19-de-en-6-6-base\": [0, 38.37],\n \"wmt19-de-en-6-6-big\": [0, 39.90],\n }\n pair = f\"{src_lang}-{tgt_lang}\"\n\n readme = f\"\"\"\n---\n\nlanguage: {src_lang}, {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- allenai\nlicense: Apache 2.0\ndatasets:\n- http://www.statmt.org/wmt19/ ([test-set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561))\nmetrics:\n- http://www.statmt.org/wmt19/metrics-task.html\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based wmt19 transformer created by [jungokasai]](https://github.com/jungokasai/) @ allenai for {src_lang}-{tgt_lang}.\n\n2 models are available:\n\n* [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big)\n* [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers.tokenization_fsmt import FSMTTokenizer\nfrom transformers.modeling_fsmt import FSMTForConditionalGeneration\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by the researcher.\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][1]}\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n\"\"\"\n model_card_dir.mkdir(parents=True, exist_ok=True)\n path = os.path.join(model_card_dir, \"README.md\")\n print(f\"Generating {path}\")\n with open(path, \"w\", encoding=\"utf-8\") as f:\n f.write(readme)\n\n# make sure we are under the root of the project\nrepo_dir = Path(__file__).resolve().parent.parent.parent\nmodel_cards_dir = repo_dir / \"model_cards\"\n\nfor model_name in [\"wmt19-de-en-6-6-base\", \"wmt19-de-en-6-6-big\"]:\n model_card_dir = model_cards_dir / \"allenai\" / model_name\n write_model_card(model_card_dir, src_lang=\"de\", tgt_lang=\"en\", model_name=model_name)\n", "path": "scripts/fsmt/gen-card-allenai-wmt19.py"}]} | 2,110 | 508 |
gh_patches_debug_57931 | rasdani/github-patches | git_diff | scrapy__scrapy-1786 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PY3: error decoding Content-Disposition header
This request
```
scrapy shell 'http://npe.com.cn/plus/save_to_doc.php?id=1666'
```
raises this error:
```
Traceback (most recent call last):
File "/Users/kmike/envs/dl/bin/scrapy", line 9, in <module>
load_entry_point('Scrapy', 'console_scripts', 'scrapy')()
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 142, in execute
_run_print_help(parser, _run_command, cmd, args, opts)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 88, in _run_print_help
func(*a, **kw)
File "/Users/kmike/svn/scrapy/scrapy/cmdline.py", line 149, in _run_command
cmd.run(args, opts)
File "/Users/kmike/svn/scrapy/scrapy/commands/shell.py", line 71, in run
shell.start(url=url)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 47, in start
self.fetch(url, spider)
File "/Users/kmike/svn/scrapy/scrapy/shell.py", line 112, in fetch
reactor, self._schedule, request, spider)
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/threads.py", line 122, in blockingCallFromThread
result.raiseException()
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py", line 368, in raiseException
raise self.value.with_traceback(self.tb)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte
```
The error points to a wrong location (similar to #1760); the real traceback is
```
Traceback (most recent call last):
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py", line 1126, in _inlineCallbacks
result = result.throwExceptionIntoGenerator(g)
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py", line 389, in throwExceptionIntoGenerator
return g.throw(self.type, self.value, self.tb)
File "/Users/kmike/svn/scrapy/scrapy/core/downloader/middleware.py", line 43, in process_request
defer.returnValue((yield download_func(request=request,spider=spider)))
File "/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py", line 588, in _runCallbacks
current.result = callback(current.result, *args, **kw)
File "/Users/kmike/svn/scrapy/scrapy/core/downloader/handlers/http11.py", line 272, in _cb_bodydone
respcls = responsetypes.from_args(headers=headers, url=url)
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 110, in from_args
cls = self.from_headers(headers)
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 78, in from_headers
cls = self.from_content_disposition(headers[b'Content-Disposition'])
File "/Users/kmike/svn/scrapy/scrapy/responsetypes.py", line 62, in from_content_disposition
filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
File "/Users/kmike/svn/scrapy/scrapy/utils/python.py", line 129, in to_native_str
return to_unicode(text, encoding, errors)
File "/Users/kmike/svn/scrapy/scrapy/utils/python.py", line 107, in to_unicode
return text.decode(encoding, errors)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte
```
It looks like Content-Disposition is decoded using utf-8, but the encoding was not UTF-8.
</issue>
<code>
[start of scrapy/responsetypes.py]
1 """
2 This module implements a class which returns the appropriate Response class
3 based on different criteria.
4 """
5 from __future__ import absolute_import
6 from mimetypes import MimeTypes
7 from pkgutil import get_data
8 from io import StringIO
9 import six
10
11 from scrapy.http import Response
12 from scrapy.utils.misc import load_object
13 from scrapy.utils.python import isbinarytext, to_bytes, to_native_str
14
15
16 class ResponseTypes(object):
17
18 CLASSES = {
19 'text/html': 'scrapy.http.HtmlResponse',
20 'application/atom+xml': 'scrapy.http.XmlResponse',
21 'application/rdf+xml': 'scrapy.http.XmlResponse',
22 'application/rss+xml': 'scrapy.http.XmlResponse',
23 'application/xhtml+xml': 'scrapy.http.HtmlResponse',
24 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
25 'application/xml': 'scrapy.http.XmlResponse',
26 'application/json': 'scrapy.http.TextResponse',
27 'application/x-json': 'scrapy.http.TextResponse',
28 'application/javascript': 'scrapy.http.TextResponse',
29 'application/x-javascript': 'scrapy.http.TextResponse',
30 'text/xml': 'scrapy.http.XmlResponse',
31 'text/*': 'scrapy.http.TextResponse',
32 }
33
34 def __init__(self):
35 self.classes = {}
36 self.mimetypes = MimeTypes()
37 mimedata = get_data('scrapy', 'mime.types').decode('utf8')
38 self.mimetypes.readfp(StringIO(mimedata))
39 for mimetype, cls in six.iteritems(self.CLASSES):
40 self.classes[mimetype] = load_object(cls)
41
42 def from_mimetype(self, mimetype):
43 """Return the most appropriate Response class for the given mimetype"""
44 if mimetype is None:
45 return Response
46 elif mimetype in self.classes:
47 return self.classes[mimetype]
48 else:
49 basetype = "%s/*" % mimetype.split('/')[0]
50 return self.classes.get(basetype, Response)
51
52 def from_content_type(self, content_type, content_encoding=None):
53 """Return the most appropriate Response class from an HTTP Content-Type
54 header """
55 if content_encoding:
56 return Response
57 mimetype = to_native_str(content_type).split(';')[0].strip().lower()
58 return self.from_mimetype(mimetype)
59
60 def from_content_disposition(self, content_disposition):
61 try:
62 filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
63 filename = filename.strip('"\'')
64 return self.from_filename(filename)
65 except IndexError:
66 return Response
67
68 def from_headers(self, headers):
69 """Return the most appropriate Response class by looking at the HTTP
70 headers"""
71 cls = Response
72 if b'Content-Type' in headers:
73 cls = self.from_content_type(
74 content_type=headers[b'Content-type'],
75 content_encoding=headers.get(b'Content-Encoding')
76 )
77 if cls is Response and b'Content-Disposition' in headers:
78 cls = self.from_content_disposition(headers[b'Content-Disposition'])
79 return cls
80
81 def from_filename(self, filename):
82 """Return the most appropriate Response class from a file name"""
83 mimetype, encoding = self.mimetypes.guess_type(filename)
84 if mimetype and not encoding:
85 return self.from_mimetype(mimetype)
86 else:
87 return Response
88
89 def from_body(self, body):
90 """Try to guess the appropriate response based on the body content.
91 This method is a bit magic and could be improved in the future, but
92 it's not meant to be used except for special cases where response types
93 cannot be guess using more straightforward methods."""
94 chunk = body[:5000]
95 chunk = to_bytes(chunk)
96 if isbinarytext(chunk):
97 return self.from_mimetype('application/octet-stream')
98 elif b"<html>" in chunk.lower():
99 return self.from_mimetype('text/html')
100 elif b"<?xml" in chunk.lower():
101 return self.from_mimetype('text/xml')
102 else:
103 return self.from_mimetype('text')
104
105 def from_args(self, headers=None, url=None, filename=None, body=None):
106 """Guess the most appropriate Response class based on
107 the given arguments."""
108 cls = Response
109 if headers is not None:
110 cls = self.from_headers(headers)
111 if cls is Response and url is not None:
112 cls = self.from_filename(url)
113 if cls is Response and filename is not None:
114 cls = self.from_filename(filename)
115 if cls is Response and body is not None:
116 cls = self.from_body(body)
117 return cls
118
119 responsetypes = ResponseTypes()
120
[end of scrapy/responsetypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py
--- a/scrapy/responsetypes.py
+++ b/scrapy/responsetypes.py
@@ -59,7 +59,8 @@
def from_content_disposition(self, content_disposition):
try:
- filename = to_native_str(content_disposition).split(';')[1].split('=')[1]
+ filename = to_native_str(content_disposition,
+ encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
filename = filename.strip('"\'')
return self.from_filename(filename)
except IndexError:
| {"golden_diff": "diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py\n--- a/scrapy/responsetypes.py\n+++ b/scrapy/responsetypes.py\n@@ -59,7 +59,8 @@\n \n def from_content_disposition(self, content_disposition):\n try:\n- filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n+ filename = to_native_str(content_disposition,\n+ encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n", "issue": "PY3: error decoding Content-Disposition header\nThis request\n\n```\nscrapy shell 'http://npe.com.cn/plus/save_to_doc.php?id=1666'\n```\n\nraises this error:\n\n```\nTraceback (most recent call last):\n File \"/Users/kmike/envs/dl/bin/scrapy\", line 9, in <module>\n load_entry_point('Scrapy', 'console_scripts', 'scrapy')()\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 142, in execute\n _run_print_help(parser, _run_command, cmd, args, opts)\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 88, in _run_print_help\n func(*a, **kw)\n File \"/Users/kmike/svn/scrapy/scrapy/cmdline.py\", line 149, in _run_command\n cmd.run(args, opts)\n File \"/Users/kmike/svn/scrapy/scrapy/commands/shell.py\", line 71, in run\n shell.start(url=url)\n File \"/Users/kmike/svn/scrapy/scrapy/shell.py\", line 47, in start\n self.fetch(url, spider)\n File \"/Users/kmike/svn/scrapy/scrapy/shell.py\", line 112, in fetch\n reactor, self._schedule, request, spider)\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/threads.py\", line 122, in blockingCallFromThread\n result.raiseException()\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py\", line 368, in raiseException\n raise self.value.with_traceback(self.tb)\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte\n```\n\nThe error points to a wrong location (similar to #1760); the real traceback is\n\n```\nTraceback (most recent call last):\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py\", line 1126, in _inlineCallbacks\n result = result.throwExceptionIntoGenerator(g)\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/python/failure.py\", line 389, in throwExceptionIntoGenerator\n return g.throw(self.type, self.value, self.tb)\n File \"/Users/kmike/svn/scrapy/scrapy/core/downloader/middleware.py\", line 43, in process_request\n defer.returnValue((yield download_func(request=request,spider=spider)))\n File \"/Users/kmike/envs/dl/lib/python3.5/site-packages/Twisted-15.5.0-py3.5.egg/twisted/internet/defer.py\", line 588, in _runCallbacks\n current.result = callback(current.result, *args, **kw)\n File \"/Users/kmike/svn/scrapy/scrapy/core/downloader/handlers/http11.py\", line 272, in _cb_bodydone\n respcls = responsetypes.from_args(headers=headers, url=url)\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 110, in from_args\n cls = self.from_headers(headers)\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 78, in from_headers\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n File \"/Users/kmike/svn/scrapy/scrapy/responsetypes.py\", line 62, in from_content_disposition\n filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n File \"/Users/kmike/svn/scrapy/scrapy/utils/python.py\", line 129, in to_native_str\n return to_unicode(text, encoding, errors)\n File \"/Users/kmike/svn/scrapy/scrapy/utils/python.py\", line 107, in to_unicode\n return text.decode(encoding, errors)\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xb8 in position 25: invalid start byte\n```\n\nIt looks like Content-Disposition is decoded using utf-8, but the encoding was not UTF-8.\n\n", "before_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom __future__ import absolute_import\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\nimport six\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import isbinarytext, to_bytes, to_native_str\n\n\nclass ResponseTypes(object):\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in six.iteritems(self.CLASSES):\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_native_str(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_native_str(content_disposition).split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if isbinarytext(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}]} | 2,817 | 141 |
gh_patches_debug_64 | rasdani/github-patches | git_diff | cython__cython-5647 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Unable to run `Cythonize.py`
### Describe the bug
I tried to run `Cythonize.py` but it failed because of relative imports in file:
```
Traceback (most recent call last):
File "/tmp/cython/Cython/Build/Cythonize.py", line 10, in <module>
from .Dependencies import cythonize, extended_iglob
ImportError: attempted relative import with no known parent package
```
### Code to reproduce the behaviour:
```bash
# download repo
cd /tmp/
git clone https://github.com/cython/cython.git
# make file runable
cd /tmp/cython/Cython/Build/
chmod +x Cythonize.py
# run file
./Cythonize.py
```
### Expected behaviour
`Cythonize.py` has the shebang so I expected it to be executable.
### OS
Linux
### Python version
3.10.12
### Cython version
main branch from git
### Additional context
_No response_
</issue>
<code>
[start of Cython/Build/Cythonize.py]
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import, print_function
4
5 import os
6 import shutil
7 import tempfile
8 from distutils.core import setup
9
10 from .Dependencies import cythonize, extended_iglob
11 from ..Utils import is_package_dir
12 from ..Compiler import Options
13
14 try:
15 import multiprocessing
16 parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
17 except ImportError:
18 multiprocessing = None
19 parallel_compiles = 0
20
21
22 class _FakePool(object):
23 def map_async(self, func, args):
24 try:
25 from itertools import imap
26 except ImportError:
27 imap=map
28 for _ in imap(func, args):
29 pass
30
31 def close(self):
32 pass
33
34 def terminate(self):
35 pass
36
37 def join(self):
38 pass
39
40
41 def find_package_base(path):
42 base_dir, package_path = os.path.split(path)
43 while is_package_dir(base_dir):
44 base_dir, parent = os.path.split(base_dir)
45 package_path = '%s/%s' % (parent, package_path)
46 return base_dir, package_path
47
48 def cython_compile(path_pattern, options):
49 all_paths = map(os.path.abspath, extended_iglob(path_pattern))
50 _cython_compile_files(all_paths, options)
51
52 def _cython_compile_files(all_paths, options):
53 pool = None
54 try:
55 for path in all_paths:
56 if options.build_inplace:
57 base_dir = path
58 while not os.path.isdir(base_dir) or is_package_dir(base_dir):
59 base_dir = os.path.dirname(base_dir)
60 else:
61 base_dir = None
62
63 if os.path.isdir(path):
64 # recursively compiling a package
65 paths = [os.path.join(path, '**', '*.{py,pyx}')]
66 else:
67 # assume it's a file(-like thing)
68 paths = [path]
69
70 ext_modules = cythonize(
71 paths,
72 nthreads=options.parallel,
73 exclude_failures=options.keep_going,
74 exclude=options.excludes,
75 compiler_directives=options.directives,
76 compile_time_env=options.compile_time_env,
77 force=options.force,
78 quiet=options.quiet,
79 depfile=options.depfile,
80 **options.options)
81
82 if ext_modules and options.build:
83 if len(ext_modules) > 1 and options.parallel > 1:
84 if pool is None:
85 try:
86 pool = multiprocessing.Pool(options.parallel)
87 except OSError:
88 pool = _FakePool()
89 pool.map_async(run_distutils, [
90 (base_dir, [ext]) for ext in ext_modules])
91 else:
92 run_distutils((base_dir, ext_modules))
93 except:
94 if pool is not None:
95 pool.terminate()
96 raise
97 else:
98 if pool is not None:
99 pool.close()
100 pool.join()
101
102
103 def run_distutils(args):
104 base_dir, ext_modules = args
105 script_args = ['build_ext', '-i']
106 cwd = os.getcwd()
107 temp_dir = None
108 try:
109 if base_dir:
110 os.chdir(base_dir)
111 temp_dir = tempfile.mkdtemp(dir=base_dir)
112 script_args.extend(['--build-temp', temp_dir])
113 setup(
114 script_name='setup.py',
115 script_args=script_args,
116 ext_modules=ext_modules,
117 )
118 finally:
119 if base_dir:
120 os.chdir(cwd)
121 if temp_dir and os.path.isdir(temp_dir):
122 shutil.rmtree(temp_dir)
123
124
125 def create_args_parser():
126 from argparse import ArgumentParser, RawDescriptionHelpFormatter
127 from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction
128
129 parser = ArgumentParser(
130 formatter_class=RawDescriptionHelpFormatter,
131 epilog="""\
132 Environment variables:
133 CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless
134 of modification times and changes.
135 Environment variables accepted by setuptools are supported to configure the C compiler and build:
136 https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options"""
137 )
138
139 parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
140 dest='directives', default={}, type=str,
141 action=ParseDirectivesAction,
142 help='set a compiler directive')
143 parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',
144 dest='compile_time_env', default={}, type=str,
145 action=ParseCompileTimeEnvAction,
146 help='set a compile time environment variable')
147 parser.add_argument('-s', '--option', metavar='NAME=VALUE',
148 dest='options', default={}, type=str,
149 action=ParseOptionsAction,
150 help='set a cythonize option')
151 parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,
152 help='use Python 2 syntax mode by default')
153 parser.add_argument('-3', dest='language_level', action='store_const', const=3,
154 help='use Python 3 syntax mode by default')
155 parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',
156 help='use Python 3 syntax mode by default')
157 parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',
158 help='Produce a colorized HTML version of the source.')
159 parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',
160 help='Produce a colorized HTML version of the source '
161 'which includes entire generated C/C++-code.')
162 parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',
163 action='append', default=[],
164 help='exclude certain file patterns from the compilation')
165
166 parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,
167 help='build extension modules using distutils')
168 parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,
169 help='build extension modules in place using distutils (implies -b)')
170 parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',
171 type=int, default=parallel_compiles,
172 help=('run builds in N parallel jobs (default: %d)' %
173 parallel_compiles or 1))
174 parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,
175 help='force recompilation')
176 parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,
177 help='be less verbose during compilation')
178
179 parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,
180 help='increase Python compatibility by ignoring some compile time errors')
181 parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,
182 help='compile as much as possible, ignore compilation failures')
183 parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,
184 help='strip docstrings')
185 parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')
186 parser.add_argument('sources', nargs='*')
187 return parser
188
189
190 def parse_args_raw(parser, args):
191 options, unknown = parser.parse_known_args(args)
192 sources = options.sources
193 # if positional arguments were interspersed
194 # some of them are in unknown
195 for option in unknown:
196 if option.startswith('-'):
197 parser.error("unknown option "+option)
198 else:
199 sources.append(option)
200 del options.sources
201 return (options, sources)
202
203
204 def parse_args(args):
205 parser = create_args_parser()
206 options, args = parse_args_raw(parser, args)
207
208 if not args:
209 parser.error("no source files provided")
210 if options.build_inplace:
211 options.build = True
212 if multiprocessing is None:
213 options.parallel = 0
214 if options.language_level:
215 assert options.language_level in (2, 3, '3str')
216 options.options['language_level'] = options.language_level
217
218 if options.lenient:
219 # increase Python compatibility by ignoring compile time errors
220 Options.error_on_unknown_names = False
221 Options.error_on_uninitialized = False
222
223 if options.annotate:
224 Options.annotate = options.annotate
225
226 if options.no_docstrings:
227 Options.docstrings = False
228
229 return options, args
230
231
232 def main(args=None):
233 options, paths = parse_args(args)
234
235 all_paths = []
236 for path in paths:
237 expanded_path = [os.path.abspath(p) for p in extended_iglob(path)]
238 if not expanded_path:
239 import sys
240 print("{}: No such file or directory: '{}'".format(sys.argv[0], path), file=sys.stderr)
241 sys.exit(1)
242 all_paths.extend(expanded_path)
243 _cython_compile_files(all_paths, options)
244
245
246 if __name__ == '__main__':
247 main()
248
[end of Cython/Build/Cythonize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py
--- a/Cython/Build/Cythonize.py
+++ b/Cython/Build/Cythonize.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
from __future__ import absolute_import, print_function
import os
| {"golden_diff": "diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py\n--- a/Cython/Build/Cythonize.py\n+++ b/Cython/Build/Cythonize.py\n@@ -1,5 +1,3 @@\n-#!/usr/bin/env python\n-\n from __future__ import absolute_import, print_function\n \n import os\n", "issue": "[BUG] Unable to run `Cythonize.py`\n### Describe the bug\n\nI tried to run `Cythonize.py` but it failed because of relative imports in file:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/cython/Cython/Build/Cythonize.py\", line 10, in <module>\r\n from .Dependencies import cythonize, extended_iglob\r\nImportError: attempted relative import with no known parent package\r\n```\n\n### Code to reproduce the behaviour:\n\n```bash\r\n# download repo\r\ncd /tmp/\r\ngit clone https://github.com/cython/cython.git\r\n\r\n# make file runable\r\ncd /tmp/cython/Cython/Build/\r\nchmod +x Cythonize.py\r\n\r\n# run file\r\n./Cythonize.py\r\n```\n\n### Expected behaviour\n\n`Cythonize.py` has the shebang so I expected it to be executable.\n\n### OS\n\nLinux\n\n### Python version\n\n3.10.12\n\n### Cython version\n\nmain branch from git\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\ndef cython_compile(path_pattern, options):\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n _cython_compile_files(all_paths, options)\n\ndef _cython_compile_files(all_paths, options):\n pool = None\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n epilog=\"\"\"\\\nEnvironment variables:\n CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n of modification times and changes.\n Environment variables accepted by setuptools are supported to configure the C compiler and build:\n https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n )\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n all_paths = []\n for path in paths:\n expanded_path = [os.path.abspath(p) for p in extended_iglob(path)]\n if not expanded_path:\n import sys\n print(\"{}: No such file or directory: '{}'\".format(sys.argv[0], path), file=sys.stderr)\n sys.exit(1)\n all_paths.extend(expanded_path)\n _cython_compile_files(all_paths, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py"}]} | 3,354 | 76 |
gh_patches_debug_4677 | rasdani/github-patches | git_diff | svthalia__concrexit-2011 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use API v2 for pizza orders
### Is your feature request related to a problem? Please describe.
For some reason (#1931) the payment widget of the order overview page is broken.
### Describe the solution you'd like
Instead of debugging the existing code, I prefer switching API v2 immedaitely.
### Motivation
Better
### Describe alternatives you've considered
Debugging
### Additional context
</issue>
<code>
[start of website/pizzas/api/v2/admin/urls.py]
1 """Events app API v2 urls."""
2 from django.urls import path
3
4 from events.api.v2.admin.views import EventRegistrationAdminFieldsView
5 from pizzas.api.v2.admin.views import (
6 FoodEventAdminListView,
7 FoodEventAdminDetailView,
8 FoodOrderAdminDetailView,
9 FoodEventOrdersAdminListView,
10 ProductsAdminListView,
11 ProductAdminDetailAPIView,
12 )
13
14 app_name = "food"
15
16 urlpatterns = [
17 path("food/events/", FoodEventAdminListView.as_view(), name="food-events-index"),
18 path(
19 "food/events/<int:pk>/",
20 FoodEventAdminDetailView.as_view(),
21 name="food-event-detail",
22 ),
23 path(
24 "food/events/<int:pk>/orders/",
25 FoodEventOrdersAdminListView.as_view(),
26 name="food-event-orders",
27 ),
28 path(
29 "food/events/<int:event_id>/orders/<int:pk>/",
30 FoodOrderAdminDetailView.as_view(),
31 name="event-registration-detail",
32 ),
33 path("food/products/", ProductsAdminListView.as_view(), name="food-products-index"),
34 path(
35 "food/products/<int:pk>/",
36 ProductAdminDetailAPIView.as_view(),
37 name="food-product-detail",
38 ),
39 ]
40
[end of website/pizzas/api/v2/admin/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/pizzas/api/v2/admin/urls.py b/website/pizzas/api/v2/admin/urls.py
--- a/website/pizzas/api/v2/admin/urls.py
+++ b/website/pizzas/api/v2/admin/urls.py
@@ -28,7 +28,7 @@
path(
"food/events/<int:event_id>/orders/<int:pk>/",
FoodOrderAdminDetailView.as_view(),
- name="event-registration-detail",
+ name="food-event-order-detail",
),
path("food/products/", ProductsAdminListView.as_view(), name="food-products-index"),
path(
| {"golden_diff": "diff --git a/website/pizzas/api/v2/admin/urls.py b/website/pizzas/api/v2/admin/urls.py\n--- a/website/pizzas/api/v2/admin/urls.py\n+++ b/website/pizzas/api/v2/admin/urls.py\n@@ -28,7 +28,7 @@\n path(\n \"food/events/<int:event_id>/orders/<int:pk>/\",\n FoodOrderAdminDetailView.as_view(),\n- name=\"event-registration-detail\",\n+ name=\"food-event-order-detail\",\n ),\n path(\"food/products/\", ProductsAdminListView.as_view(), name=\"food-products-index\"),\n path(\n", "issue": "Use API v2 for pizza orders\n### Is your feature request related to a problem? Please describe.\r\nFor some reason (#1931) the payment widget of the order overview page is broken. \r\n\r\n### Describe the solution you'd like\r\nInstead of debugging the existing code, I prefer switching API v2 immedaitely.\r\n\r\n### Motivation\r\nBetter\r\n\r\n### Describe alternatives you've considered\r\nDebugging \r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "\"\"\"Events app API v2 urls.\"\"\"\nfrom django.urls import path\n\nfrom events.api.v2.admin.views import EventRegistrationAdminFieldsView\nfrom pizzas.api.v2.admin.views import (\n FoodEventAdminListView,\n FoodEventAdminDetailView,\n FoodOrderAdminDetailView,\n FoodEventOrdersAdminListView,\n ProductsAdminListView,\n ProductAdminDetailAPIView,\n)\n\napp_name = \"food\"\n\nurlpatterns = [\n path(\"food/events/\", FoodEventAdminListView.as_view(), name=\"food-events-index\"),\n path(\n \"food/events/<int:pk>/\",\n FoodEventAdminDetailView.as_view(),\n name=\"food-event-detail\",\n ),\n path(\n \"food/events/<int:pk>/orders/\",\n FoodEventOrdersAdminListView.as_view(),\n name=\"food-event-orders\",\n ),\n path(\n \"food/events/<int:event_id>/orders/<int:pk>/\",\n FoodOrderAdminDetailView.as_view(),\n name=\"event-registration-detail\",\n ),\n path(\"food/products/\", ProductsAdminListView.as_view(), name=\"food-products-index\"),\n path(\n \"food/products/<int:pk>/\",\n ProductAdminDetailAPIView.as_view(),\n name=\"food-product-detail\",\n ),\n]\n", "path": "website/pizzas/api/v2/admin/urls.py"}]} | 953 | 134 |
gh_patches_debug_39976 | rasdani/github-patches | git_diff | carpentries__amy-2250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot view upcoming teaching opportunities
Hi @maneesha
Could you please help me with this?
The blue button at https://amy.carpentries.org/dashboard/instructor/
as shown in the screenshot below

leads to "Server Error (500)" at https://amy.carpentries.org/dashboard/instructor/teaching_opportunities/
as shown in the screenshot below

Tried a few times at different hours.
Thank you in advance.
</issue>
<code>
[start of amy/recruitment/templatetags/instructorrecruitment.py]
1 from datetime import timedelta
2 from typing import Sequence, Union
3
4 from django import template
5 from django.conf import settings
6
7 from recruitment.models import (
8 InstructorRecruitment,
9 InstructorRecruitmentSignup,
10 RecruitmentPriority,
11 )
12 from workshops.models import Event
13
14 register = template.Library()
15
16
17 @register.simple_tag
18 def is_instructor_recruitment_enabled() -> bool:
19 try:
20 return bool(settings.INSTRUCTOR_RECRUITMENT_ENABLED)
21 except AttributeError:
22 return False
23
24
25 @register.simple_tag
26 def get_event_conflicts(events: Sequence[Event], event: Event) -> list[Event]:
27 conflicts: list[Event] = []
28
29 for event_to_check in events:
30 if event == event_to_check:
31 continue
32
33 if event.start <= event_to_check.end and event.end >= event_to_check.start:
34 conflicts.append(event_to_check)
35
36 return conflicts
37
38
39 @register.simple_tag
40 def get_events_nearby(
41 events: Sequence[Event], event: Event, days_before: int = 14, days_after: int = 14
42 ) -> list[Event]:
43 nearby: list[Event] = []
44
45 for event_to_check in events:
46 if event == event_to_check:
47 continue
48
49 if (
50 event.start - timedelta(days=days_before) <= event_to_check.end
51 and event.end + timedelta(days=days_after) >= event_to_check.start
52 ):
53 nearby.append(event_to_check)
54
55 return nearby
56
57
58 @register.simple_tag
59 def get_signup_conflicts(
60 signups: Sequence[InstructorRecruitmentSignup], recruitment: InstructorRecruitment
61 ) -> list[InstructorRecruitmentSignup]:
62 conflicts: list[InstructorRecruitmentSignup] = []
63
64 for signup_to_check in signups:
65 if recruitment == signup_to_check.recruitment:
66 continue
67
68 if (
69 recruitment.event.start <= signup_to_check.recruitment.event.end
70 and recruitment.event.end >= signup_to_check.recruitment.event.start
71 ):
72 conflicts.append(signup_to_check)
73
74 return conflicts
75
76
77 @register.filter
78 def priority_label(value: Union[int, RecruitmentPriority]) -> str:
79 return RecruitmentPriority(value).label
80
[end of amy/recruitment/templatetags/instructorrecruitment.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/amy/recruitment/templatetags/instructorrecruitment.py b/amy/recruitment/templatetags/instructorrecruitment.py
--- a/amy/recruitment/templatetags/instructorrecruitment.py
+++ b/amy/recruitment/templatetags/instructorrecruitment.py
@@ -23,13 +23,21 @@
@register.simple_tag
-def get_event_conflicts(events: Sequence[Event], event: Event) -> list[Event]:
+def get_event_conflicts(events_to_check: Sequence[Event], event: Event) -> list[Event]:
conflicts: list[Event] = []
- for event_to_check in events:
+ # event must have start and end dates, otherwise we can't get conflicts
+ if not (event.start and event.end):
+ return conflicts
+
+ for event_to_check in events_to_check:
if event == event_to_check:
continue
+ # event getting checked must have start and end dates
+ if not (event_to_check.start and event_to_check.end):
+ continue
+
if event.start <= event_to_check.end and event.end >= event_to_check.start:
conflicts.append(event_to_check)
@@ -38,14 +46,26 @@
@register.simple_tag
def get_events_nearby(
- events: Sequence[Event], event: Event, days_before: int = 14, days_after: int = 14
+ events_to_check: Sequence[Event],
+ event: Event,
+ days_before: int = 14,
+ days_after: int = 14,
) -> list[Event]:
+ """Get events nearby another event time-wise."""
nearby: list[Event] = []
- for event_to_check in events:
+ # event must have start and end dates, otherwise we can't get nearby events
+ if not (event.start and event.end):
+ return nearby
+
+ for event_to_check in events_to_check:
if event == event_to_check:
continue
+ # event getting checked must have start and end dates
+ if not (event_to_check.start and event_to_check.end):
+ continue
+
if (
event.start - timedelta(days=days_before) <= event_to_check.end
and event.end + timedelta(days=days_after) >= event_to_check.start
@@ -57,14 +77,26 @@
@register.simple_tag
def get_signup_conflicts(
- signups: Sequence[InstructorRecruitmentSignup], recruitment: InstructorRecruitment
+ signups_to_check: Sequence[InstructorRecruitmentSignup],
+ recruitment: InstructorRecruitment,
) -> list[InstructorRecruitmentSignup]:
conflicts: list[InstructorRecruitmentSignup] = []
- for signup_to_check in signups:
+ # recruitment event must have start and end dates, otherwise we can't get conflicts
+ if not (recruitment.event.start and recruitment.event.end):
+ return conflicts
+
+ for signup_to_check in signups_to_check:
if recruitment == signup_to_check.recruitment:
continue
+ # event getting checked must have start and end dates
+ if not (
+ signup_to_check.recruitment.event.start
+ and signup_to_check.recruitment.event.end
+ ):
+ continue
+
if (
recruitment.event.start <= signup_to_check.recruitment.event.end
and recruitment.event.end >= signup_to_check.recruitment.event.start
| {"golden_diff": "diff --git a/amy/recruitment/templatetags/instructorrecruitment.py b/amy/recruitment/templatetags/instructorrecruitment.py\n--- a/amy/recruitment/templatetags/instructorrecruitment.py\n+++ b/amy/recruitment/templatetags/instructorrecruitment.py\n@@ -23,13 +23,21 @@\n \n \n @register.simple_tag\n-def get_event_conflicts(events: Sequence[Event], event: Event) -> list[Event]:\n+def get_event_conflicts(events_to_check: Sequence[Event], event: Event) -> list[Event]:\n conflicts: list[Event] = []\n \n- for event_to_check in events:\n+ # event must have start and end dates, otherwise we can't get conflicts\n+ if not (event.start and event.end):\n+ return conflicts\n+\n+ for event_to_check in events_to_check:\n if event == event_to_check:\n continue\n \n+ # event getting checked must have start and end dates\n+ if not (event_to_check.start and event_to_check.end):\n+ continue\n+\n if event.start <= event_to_check.end and event.end >= event_to_check.start:\n conflicts.append(event_to_check)\n \n@@ -38,14 +46,26 @@\n \n @register.simple_tag\n def get_events_nearby(\n- events: Sequence[Event], event: Event, days_before: int = 14, days_after: int = 14\n+ events_to_check: Sequence[Event],\n+ event: Event,\n+ days_before: int = 14,\n+ days_after: int = 14,\n ) -> list[Event]:\n+ \"\"\"Get events nearby another event time-wise.\"\"\"\n nearby: list[Event] = []\n \n- for event_to_check in events:\n+ # event must have start and end dates, otherwise we can't get nearby events\n+ if not (event.start and event.end):\n+ return nearby\n+\n+ for event_to_check in events_to_check:\n if event == event_to_check:\n continue\n \n+ # event getting checked must have start and end dates\n+ if not (event_to_check.start and event_to_check.end):\n+ continue\n+\n if (\n event.start - timedelta(days=days_before) <= event_to_check.end\n and event.end + timedelta(days=days_after) >= event_to_check.start\n@@ -57,14 +77,26 @@\n \n @register.simple_tag\n def get_signup_conflicts(\n- signups: Sequence[InstructorRecruitmentSignup], recruitment: InstructorRecruitment\n+ signups_to_check: Sequence[InstructorRecruitmentSignup],\n+ recruitment: InstructorRecruitment,\n ) -> list[InstructorRecruitmentSignup]:\n conflicts: list[InstructorRecruitmentSignup] = []\n \n- for signup_to_check in signups:\n+ # recruitment event must have start and end dates, otherwise we can't get conflicts\n+ if not (recruitment.event.start and recruitment.event.end):\n+ return conflicts\n+\n+ for signup_to_check in signups_to_check:\n if recruitment == signup_to_check.recruitment:\n continue\n \n+ # event getting checked must have start and end dates\n+ if not (\n+ signup_to_check.recruitment.event.start\n+ and signup_to_check.recruitment.event.end\n+ ):\n+ continue\n+\n if (\n recruitment.event.start <= signup_to_check.recruitment.event.end\n and recruitment.event.end >= signup_to_check.recruitment.event.start\n", "issue": "Cannot view upcoming teaching opportunities\nHi @maneesha \r\nCould you please help me with this?\r\n\r\nThe blue button at https://amy.carpentries.org/dashboard/instructor/\r\nas shown in the screenshot below\r\n\r\n\r\nleads to \"Server Error (500)\" at https://amy.carpentries.org/dashboard/instructor/teaching_opportunities/\r\nas shown in the screenshot below\r\n\r\n\r\nTried a few times at different hours.\r\n\r\nThank you in advance.\n", "before_files": [{"content": "from datetime import timedelta\nfrom typing import Sequence, Union\n\nfrom django import template\nfrom django.conf import settings\n\nfrom recruitment.models import (\n InstructorRecruitment,\n InstructorRecruitmentSignup,\n RecruitmentPriority,\n)\nfrom workshops.models import Event\n\nregister = template.Library()\n\n\[email protected]_tag\ndef is_instructor_recruitment_enabled() -> bool:\n try:\n return bool(settings.INSTRUCTOR_RECRUITMENT_ENABLED)\n except AttributeError:\n return False\n\n\[email protected]_tag\ndef get_event_conflicts(events: Sequence[Event], event: Event) -> list[Event]:\n conflicts: list[Event] = []\n\n for event_to_check in events:\n if event == event_to_check:\n continue\n\n if event.start <= event_to_check.end and event.end >= event_to_check.start:\n conflicts.append(event_to_check)\n\n return conflicts\n\n\[email protected]_tag\ndef get_events_nearby(\n events: Sequence[Event], event: Event, days_before: int = 14, days_after: int = 14\n) -> list[Event]:\n nearby: list[Event] = []\n\n for event_to_check in events:\n if event == event_to_check:\n continue\n\n if (\n event.start - timedelta(days=days_before) <= event_to_check.end\n and event.end + timedelta(days=days_after) >= event_to_check.start\n ):\n nearby.append(event_to_check)\n\n return nearby\n\n\[email protected]_tag\ndef get_signup_conflicts(\n signups: Sequence[InstructorRecruitmentSignup], recruitment: InstructorRecruitment\n) -> list[InstructorRecruitmentSignup]:\n conflicts: list[InstructorRecruitmentSignup] = []\n\n for signup_to_check in signups:\n if recruitment == signup_to_check.recruitment:\n continue\n\n if (\n recruitment.event.start <= signup_to_check.recruitment.event.end\n and recruitment.event.end >= signup_to_check.recruitment.event.start\n ):\n conflicts.append(signup_to_check)\n\n return conflicts\n\n\[email protected]\ndef priority_label(value: Union[int, RecruitmentPriority]) -> str:\n return RecruitmentPriority(value).label\n", "path": "amy/recruitment/templatetags/instructorrecruitment.py"}]} | 1,383 | 766 |
gh_patches_debug_38989 | rasdani/github-patches | git_diff | google__mobly-370 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Socket errors thrown from device obj are not marked with device tag
We should wrap `socket.error` with a child class of `DeviceError`.
</issue>
<code>
[start of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Base class for clients that communicate with apps over a JSON RPC interface.
15
16 The JSON protocol expected by this module is:
17
18 .. code-block:: json
19
20 Request:
21 {
22 "id": <monotonically increasing integer containing the ID of
23 this request>
24 "method": <string containing the name of the method to execute>
25 "params": <JSON array containing the arguments to the method>
26 }
27
28 Response:
29 {
30 "id": <int id of request that this response maps to>,
31 "result": <Arbitrary JSON object containing the result of
32 executing the method. If the method could not be
33 executed or returned void, contains 'null'.>,
34 "error": <String containing the error thrown by executing the
35 method. If no error occurred, contains 'null'.>
36 "callback": <String that represents a callback ID used to
37 identify events associated with a particular
38 CallbackHandler object.>
39 }
40 """
41
42 from builtins import str
43
44 import json
45 import logging
46 import socket
47 import threading
48
49 from mobly.controllers.android_device_lib import callback_handler
50 from mobly.controllers.android_device_lib import errors
51
52 # UID of the 'unknown' jsonrpc session. Will cause creation of a new session.
53 UNKNOWN_UID = -1
54
55 # Maximum time to wait for the socket to open on the device.
56 _SOCKET_CONNECTION_TIMEOUT = 60
57
58 # Maximum time to wait for a response message on the socket.
59 _SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT
60
61
62 class Error(errors.DeviceError):
63 pass
64
65
66 class AppStartError(Error):
67 """Raised when the app is not able to be started."""
68
69
70 class AppRestoreConnectionError(Error):
71 """Raised when failed to restore app from disconnection."""
72
73
74 class ApiError(Error):
75 """Raised when remote API reports an error."""
76
77
78 class ProtocolError(Error):
79 """Raised when there is some error in exchanging data with server."""
80 NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'
81 NO_RESPONSE_FROM_SERVER = 'No response from server.'
82 MISMATCHED_API_ID = 'Mismatched API id.'
83
84
85 class JsonRpcCommand(object):
86 """Commands that can be invoked on all jsonrpc clients.
87
88 INIT: Initializes a new session.
89 CONTINUE: Creates a connection.
90 """
91 INIT = 'initiate'
92 CONTINUE = 'continue'
93
94
95 class JsonRpcClientBase(object):
96 """Base class for jsonrpc clients that connect to remote servers.
97
98 Connects to a remote device running a jsonrpc-compatible app. Before opening
99 a connection a port forward must be setup to go over usb. This be done using
100 adb.forward([local, remote]). Once the port has been forwarded it can be
101 used in this object as the port of communication.
102
103 Attributes:
104 host_port: (int) The host port of this RPC client.
105 device_port: (int) The device port of this RPC client.
106 app_name: (str) The user-visible name of the app being communicated
107 with.
108 uid: (int) The uid of this session.
109 """
110
111 def __init__(self, app_name, ad):
112 """
113 Args:
114 app_name: (str) The user-visible name of the app being communicated
115 with.
116 ad: (AndroidDevice) The device object associated with a client.
117 """
118 self.host_port = None
119 self.device_port = None
120 self.app_name = app_name
121 self._ad = ad
122 self.log = self._ad.log
123 self.uid = None
124 self._client = None # prevent close errors on connect failure
125 self._conn = None
126 self._counter = None
127 self._lock = threading.Lock()
128 self._event_client = None
129
130 def __del__(self):
131 self.disconnect()
132
133 # Methods to be implemented by subclasses.
134
135 def start_app_and_connect(self):
136 """Starts the server app on the android device and connects to it.
137
138 After this, the self.host_port and self.device_port attributes must be
139 set.
140
141 Must be implemented by subclasses.
142
143 Raises:
144 AppStartError: When the app was not able to be started.
145 """
146 raise NotImplementedError()
147
148 def stop_app(self):
149 """Kills any running instance of the app.
150
151 Must be implemented by subclasses.
152 """
153 raise NotImplementedError()
154
155 def restore_app_connection(self, port=None):
156 """Reconnects to the app after device USB was disconnected.
157
158 Instead of creating new instance of the client:
159 - Uses the given port (or finds a new available host_port if none is
160 given).
161 - Tries to connect to remote server with selected port.
162
163 Must be implemented by subclasses.
164
165 Args:
166 port: If given, this is the host port from which to connect to remote
167 device port. If not provided, find a new available port as host
168 port.
169
170 Raises:
171 AppRestoreConnectionError: When the app was not able to be
172 reconnected.
173 """
174 raise NotImplementedError()
175
176 def _start_event_client(self):
177 """Starts a separate JsonRpc client to the same session for propagating
178 events.
179
180 This is an optional function that should only implement if the client
181 utilizes the snippet event mechanism.
182
183 Returns:
184 A JsonRpc Client object that connects to the same session as the
185 one on which this function is called.
186 """
187 raise NotImplementedError()
188
189 # Rest of the client methods.
190
191 def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):
192 """Opens a connection to a JSON RPC server.
193
194 Opens a connection to a remote client. The connection attempt will time
195 out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each
196 subsequent operation over this socket will time out after
197 _SOCKET_READ_TIMEOUT seconds as well.
198
199 Args:
200 uid: int, The uid of the session to join, or UNKNOWN_UID to start a
201 new session.
202 cmd: JsonRpcCommand, The command to use for creating the connection.
203
204 Raises:
205 IOError: Raised when the socket times out from io error
206 socket.timeout: Raised when the socket waits to long for connection.
207 ProtocolError: Raised when there is an error in the protocol.
208 """
209 self._counter = self._id_counter()
210 self._conn = socket.create_connection(('localhost', self.host_port),
211 _SOCKET_CONNECTION_TIMEOUT)
212 self._conn.settimeout(_SOCKET_READ_TIMEOUT)
213 self._client = self._conn.makefile(mode='brw')
214
215 resp = self._cmd(cmd, uid)
216 if not resp:
217 raise ProtocolError(
218 self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
219 result = json.loads(str(resp, encoding='utf8'))
220 if result['status']:
221 self.uid = result['uid']
222 else:
223 self.uid = UNKNOWN_UID
224
225 def disconnect(self):
226 """Close the connection to the remote client."""
227 if self._conn:
228 self._conn.close()
229 self._conn = None
230
231 def _cmd(self, command, uid=None):
232 """Send a command to the server.
233
234 Args:
235 command: str, The name of the command to execute.
236 uid: int, the uid of the session to send the command to.
237
238 Returns:
239 The line that was written back.
240 """
241 if not uid:
242 uid = self.uid
243 self._client.write(
244 json.dumps({
245 'cmd': command,
246 'uid': uid
247 }).encode("utf8") + b'\n')
248 self._client.flush()
249 return self._client.readline()
250
251 def _rpc(self, method, *args):
252 """Sends an rpc to the app.
253
254 Args:
255 method: str, The name of the method to execute.
256 args: any, The args of the method.
257
258 Returns:
259 The result of the rpc.
260
261 Raises:
262 ProtocolError: Something went wrong with the protocol.
263 ApiError: The rpc went through, however executed with errors.
264 """
265 with self._lock:
266 apiid = next(self._counter)
267 data = {'id': apiid, 'method': method, 'params': args}
268 request = json.dumps(data)
269 self._client.write(request.encode("utf8") + b'\n')
270 self._client.flush()
271 response = self._client.readline()
272 if not response:
273 raise ProtocolError(self._ad,
274 ProtocolError.NO_RESPONSE_FROM_SERVER)
275 result = json.loads(str(response, encoding="utf8"))
276 if result['error']:
277 raise ApiError(self._ad, result['error'])
278 if result['id'] != apiid:
279 raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)
280 if result.get('callback') is not None:
281 if self._event_client is None:
282 self._event_client = self._start_event_client()
283 return callback_handler.CallbackHandler(
284 callback_id=result['callback'],
285 event_client=self._event_client,
286 ret_value=result['result'],
287 method_name=method,
288 ad=self._ad)
289 return result['result']
290
291 def __getattr__(self, name):
292 """Wrapper for python magic to turn method calls into RPC calls."""
293
294 def rpc_call(*args):
295 return self._rpc(name, *args)
296
297 return rpc_call
298
299 def _id_counter(self):
300 i = 0
301 while True:
302 yield i
303 i += 1
304
[end of mobly/controllers/android_device_lib/jsonrpc_client_base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py
+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py
@@ -214,8 +214,8 @@
resp = self._cmd(cmd, uid)
if not resp:
- raise ProtocolError(
- self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
+ raise ProtocolError(self._ad,
+ ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)
result = json.loads(str(resp, encoding='utf8'))
if result['status']:
self.uid = result['uid']
@@ -228,6 +228,40 @@
self._conn.close()
self._conn = None
+ def _client_send(self, msg):
+ """Sends an Rpc message through the connection.
+
+ Args:
+ msg: string, the message to send.
+
+ Raises:
+ Error: a socket error occurred during the send.
+ """
+ try:
+ self._client.write(msg.encode("utf8") + b'\n')
+ self._client.flush()
+ except socket.error as e:
+ raise Error(
+ self._ad,
+ 'Encountered socket error "%s" sending RPC message "%s"' %
+ (e, msg))
+
+ def _client_receive(self):
+ """Receives the server's response of an Rpc message.
+
+ Returns:
+ Raw byte string of the response.
+
+ Raises:
+ Error: a socket error occurred during the read.
+ """
+ try:
+ return self._client.readline()
+ except socket.error as e:
+ raise Error(
+ self._ad,
+ 'Encountered socket error reading RPC response "%s"' % e)
+
def _cmd(self, command, uid=None):
"""Send a command to the server.
@@ -240,13 +274,8 @@
"""
if not uid:
uid = self.uid
- self._client.write(
- json.dumps({
- 'cmd': command,
- 'uid': uid
- }).encode("utf8") + b'\n')
- self._client.flush()
- return self._client.readline()
+ self._client_send(json.dumps({'cmd': command, 'uid': uid}))
+ return self._client_receive()
def _rpc(self, method, *args):
"""Sends an rpc to the app.
@@ -266,9 +295,8 @@
apiid = next(self._counter)
data = {'id': apiid, 'method': method, 'params': args}
request = json.dumps(data)
- self._client.write(request.encode("utf8") + b'\n')
- self._client.flush()
- response = self._client.readline()
+ self._client_send(request)
+ response = self._client_receive()
if not response:
raise ProtocolError(self._ad,
ProtocolError.NO_RESPONSE_FROM_SERVER)
| {"golden_diff": "diff --git a/mobly/controllers/android_device_lib/jsonrpc_client_base.py b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n--- a/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n+++ b/mobly/controllers/android_device_lib/jsonrpc_client_base.py\n@@ -214,8 +214,8 @@\n \n resp = self._cmd(cmd, uid)\n if not resp:\n- raise ProtocolError(\n- self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n+ raise ProtocolError(self._ad,\n+ ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding='utf8'))\n if result['status']:\n self.uid = result['uid']\n@@ -228,6 +228,40 @@\n self._conn.close()\n self._conn = None\n \n+ def _client_send(self, msg):\n+ \"\"\"Sends an Rpc message through the connection.\n+\n+ Args:\n+ msg: string, the message to send.\n+\n+ Raises:\n+ Error: a socket error occurred during the send.\n+ \"\"\"\n+ try:\n+ self._client.write(msg.encode(\"utf8\") + b'\\n')\n+ self._client.flush()\n+ except socket.error as e:\n+ raise Error(\n+ self._ad,\n+ 'Encountered socket error \"%s\" sending RPC message \"%s\"' %\n+ (e, msg))\n+\n+ def _client_receive(self):\n+ \"\"\"Receives the server's response of an Rpc message.\n+\n+ Returns:\n+ Raw byte string of the response.\n+\n+ Raises:\n+ Error: a socket error occurred during the read.\n+ \"\"\"\n+ try:\n+ return self._client.readline()\n+ except socket.error as e:\n+ raise Error(\n+ self._ad,\n+ 'Encountered socket error reading RPC response \"%s\"' % e)\n+\n def _cmd(self, command, uid=None):\n \"\"\"Send a command to the server.\n \n@@ -240,13 +274,8 @@\n \"\"\"\n if not uid:\n uid = self.uid\n- self._client.write(\n- json.dumps({\n- 'cmd': command,\n- 'uid': uid\n- }).encode(\"utf8\") + b'\\n')\n- self._client.flush()\n- return self._client.readline()\n+ self._client_send(json.dumps({'cmd': command, 'uid': uid}))\n+ return self._client_receive()\n \n def _rpc(self, method, *args):\n \"\"\"Sends an rpc to the app.\n@@ -266,9 +295,8 @@\n apiid = next(self._counter)\n data = {'id': apiid, 'method': method, 'params': args}\n request = json.dumps(data)\n- self._client.write(request.encode(\"utf8\") + b'\\n')\n- self._client.flush()\n- response = self._client.readline()\n+ self._client_send(request)\n+ response = self._client_receive()\n if not response:\n raise ProtocolError(self._ad,\n ProtocolError.NO_RESPONSE_FROM_SERVER)\n", "issue": "Socket errors thrown from device obj are not marked with device tag\nWe should wrap `socket.error` with a child class of `DeviceError`.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Base class for clients that communicate with apps over a JSON RPC interface.\n\nThe JSON protocol expected by this module is:\n\n.. code-block:: json\n\n Request:\n {\n \"id\": <monotonically increasing integer containing the ID of \n this request>\n \"method\": <string containing the name of the method to execute>\n \"params\": <JSON array containing the arguments to the method>\n }\n\n Response:\n {\n \"id\": <int id of request that this response maps to>,\n \"result\": <Arbitrary JSON object containing the result of\n executing the method. If the method could not be\n executed or returned void, contains 'null'.>,\n \"error\": <String containing the error thrown by executing the\n method. If no error occurred, contains 'null'.>\n \"callback\": <String that represents a callback ID used to\n identify events associated with a particular\n CallbackHandler object.>\n }\n\"\"\"\n\nfrom builtins import str\n\nimport json\nimport logging\nimport socket\nimport threading\n\nfrom mobly.controllers.android_device_lib import callback_handler\nfrom mobly.controllers.android_device_lib import errors\n\n# UID of the 'unknown' jsonrpc session. Will cause creation of a new session.\nUNKNOWN_UID = -1\n\n# Maximum time to wait for the socket to open on the device.\n_SOCKET_CONNECTION_TIMEOUT = 60\n\n# Maximum time to wait for a response message on the socket.\n_SOCKET_READ_TIMEOUT = callback_handler.MAX_TIMEOUT\n\n\nclass Error(errors.DeviceError):\n pass\n\n\nclass AppStartError(Error):\n \"\"\"Raised when the app is not able to be started.\"\"\"\n\n\nclass AppRestoreConnectionError(Error):\n \"\"\"Raised when failed to restore app from disconnection.\"\"\"\n\n\nclass ApiError(Error):\n \"\"\"Raised when remote API reports an error.\"\"\"\n\n\nclass ProtocolError(Error):\n \"\"\"Raised when there is some error in exchanging data with server.\"\"\"\n NO_RESPONSE_FROM_HANDSHAKE = 'No response from handshake.'\n NO_RESPONSE_FROM_SERVER = 'No response from server.'\n MISMATCHED_API_ID = 'Mismatched API id.'\n\n\nclass JsonRpcCommand(object):\n \"\"\"Commands that can be invoked on all jsonrpc clients.\n\n INIT: Initializes a new session.\n CONTINUE: Creates a connection.\n \"\"\"\n INIT = 'initiate'\n CONTINUE = 'continue'\n\n\nclass JsonRpcClientBase(object):\n \"\"\"Base class for jsonrpc clients that connect to remote servers.\n\n Connects to a remote device running a jsonrpc-compatible app. Before opening\n a connection a port forward must be setup to go over usb. This be done using\n adb.forward([local, remote]). Once the port has been forwarded it can be\n used in this object as the port of communication.\n\n Attributes:\n host_port: (int) The host port of this RPC client.\n device_port: (int) The device port of this RPC client.\n app_name: (str) The user-visible name of the app being communicated\n with.\n uid: (int) The uid of this session.\n \"\"\"\n\n def __init__(self, app_name, ad):\n \"\"\"\n Args:\n app_name: (str) The user-visible name of the app being communicated\n with.\n ad: (AndroidDevice) The device object associated with a client.\n \"\"\"\n self.host_port = None\n self.device_port = None\n self.app_name = app_name\n self._ad = ad\n self.log = self._ad.log\n self.uid = None\n self._client = None # prevent close errors on connect failure\n self._conn = None\n self._counter = None\n self._lock = threading.Lock()\n self._event_client = None\n\n def __del__(self):\n self.disconnect()\n\n # Methods to be implemented by subclasses.\n\n def start_app_and_connect(self):\n \"\"\"Starts the server app on the android device and connects to it.\n\n After this, the self.host_port and self.device_port attributes must be\n set.\n\n Must be implemented by subclasses.\n\n Raises:\n AppStartError: When the app was not able to be started.\n \"\"\"\n raise NotImplementedError()\n\n def stop_app(self):\n \"\"\"Kills any running instance of the app.\n\n Must be implemented by subclasses.\n \"\"\"\n raise NotImplementedError()\n\n def restore_app_connection(self, port=None):\n \"\"\"Reconnects to the app after device USB was disconnected.\n\n Instead of creating new instance of the client:\n - Uses the given port (or finds a new available host_port if none is\n given).\n - Tries to connect to remote server with selected port.\n\n Must be implemented by subclasses.\n\n Args:\n port: If given, this is the host port from which to connect to remote\n device port. If not provided, find a new available port as host\n port.\n\n Raises:\n AppRestoreConnectionError: When the app was not able to be\n reconnected.\n \"\"\"\n raise NotImplementedError()\n\n def _start_event_client(self):\n \"\"\"Starts a separate JsonRpc client to the same session for propagating\n events.\n\n This is an optional function that should only implement if the client\n utilizes the snippet event mechanism.\n\n Returns:\n A JsonRpc Client object that connects to the same session as the\n one on which this function is called.\n \"\"\"\n raise NotImplementedError()\n\n # Rest of the client methods.\n\n def connect(self, uid=UNKNOWN_UID, cmd=JsonRpcCommand.INIT):\n \"\"\"Opens a connection to a JSON RPC server.\n\n Opens a connection to a remote client. The connection attempt will time\n out if it takes longer than _SOCKET_CONNECTION_TIMEOUT seconds. Each\n subsequent operation over this socket will time out after\n _SOCKET_READ_TIMEOUT seconds as well.\n\n Args:\n uid: int, The uid of the session to join, or UNKNOWN_UID to start a\n new session.\n cmd: JsonRpcCommand, The command to use for creating the connection.\n\n Raises:\n IOError: Raised when the socket times out from io error\n socket.timeout: Raised when the socket waits to long for connection.\n ProtocolError: Raised when there is an error in the protocol.\n \"\"\"\n self._counter = self._id_counter()\n self._conn = socket.create_connection(('localhost', self.host_port),\n _SOCKET_CONNECTION_TIMEOUT)\n self._conn.settimeout(_SOCKET_READ_TIMEOUT)\n self._client = self._conn.makefile(mode='brw')\n\n resp = self._cmd(cmd, uid)\n if not resp:\n raise ProtocolError(\n self._ad, ProtocolError.NO_RESPONSE_FROM_HANDSHAKE)\n result = json.loads(str(resp, encoding='utf8'))\n if result['status']:\n self.uid = result['uid']\n else:\n self.uid = UNKNOWN_UID\n\n def disconnect(self):\n \"\"\"Close the connection to the remote client.\"\"\"\n if self._conn:\n self._conn.close()\n self._conn = None\n\n def _cmd(self, command, uid=None):\n \"\"\"Send a command to the server.\n\n Args:\n command: str, The name of the command to execute.\n uid: int, the uid of the session to send the command to.\n\n Returns:\n The line that was written back.\n \"\"\"\n if not uid:\n uid = self.uid\n self._client.write(\n json.dumps({\n 'cmd': command,\n 'uid': uid\n }).encode(\"utf8\") + b'\\n')\n self._client.flush()\n return self._client.readline()\n\n def _rpc(self, method, *args):\n \"\"\"Sends an rpc to the app.\n\n Args:\n method: str, The name of the method to execute.\n args: any, The args of the method.\n\n Returns:\n The result of the rpc.\n\n Raises:\n ProtocolError: Something went wrong with the protocol.\n ApiError: The rpc went through, however executed with errors.\n \"\"\"\n with self._lock:\n apiid = next(self._counter)\n data = {'id': apiid, 'method': method, 'params': args}\n request = json.dumps(data)\n self._client.write(request.encode(\"utf8\") + b'\\n')\n self._client.flush()\n response = self._client.readline()\n if not response:\n raise ProtocolError(self._ad,\n ProtocolError.NO_RESPONSE_FROM_SERVER)\n result = json.loads(str(response, encoding=\"utf8\"))\n if result['error']:\n raise ApiError(self._ad, result['error'])\n if result['id'] != apiid:\n raise ProtocolError(self._ad, ProtocolError.MISMATCHED_API_ID)\n if result.get('callback') is not None:\n if self._event_client is None:\n self._event_client = self._start_event_client()\n return callback_handler.CallbackHandler(\n callback_id=result['callback'],\n event_client=self._event_client,\n ret_value=result['result'],\n method_name=method,\n ad=self._ad)\n return result['result']\n\n def __getattr__(self, name):\n \"\"\"Wrapper for python magic to turn method calls into RPC calls.\"\"\"\n\n def rpc_call(*args):\n return self._rpc(name, *args)\n\n return rpc_call\n\n def _id_counter(self):\n i = 0\n while True:\n yield i\n i += 1\n", "path": "mobly/controllers/android_device_lib/jsonrpc_client_base.py"}]} | 3,553 | 694 |
gh_patches_debug_31895 | rasdani/github-patches | git_diff | ipython__ipython-2755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fail to send class definitions from interactive session to engines namespaces
Here is a [sample notebook](http://nbviewer.ipython.org/4471180/) to demonstrate the issue.
Please note that I don't get any unpickling error message when calling:
```
rc[:]['AClass'] = AClass
```
While @minrk is reporting a `DummyModule` `AttributeError` in this [SO question](http://stackoverflow.com/questions/14184621/how-to-work-with-interactively-defined-classes-in-ipython-parallel).
I will see if I can come up with a pull request for a `can_class` / `uncan_class` along the lines of [this gist](https://gist.github.com/4470212).
</issue>
<code>
[start of IPython/utils/pickleutil.py]
1 # encoding: utf-8
2
3 """Pickle related utilities. Perhaps this should be called 'can'."""
4
5 __docformat__ = "restructuredtext en"
6
7 #-------------------------------------------------------------------------------
8 # Copyright (C) 2008-2011 The IPython Development Team
9 #
10 # Distributed under the terms of the BSD License. The full license is in
11 # the file COPYING, distributed as part of this software.
12 #-------------------------------------------------------------------------------
13
14 #-------------------------------------------------------------------------------
15 # Imports
16 #-------------------------------------------------------------------------------
17
18 import copy
19 import logging
20 import sys
21 from types import FunctionType
22
23 try:
24 import cPickle as pickle
25 except ImportError:
26 import pickle
27
28 try:
29 import numpy
30 except:
31 numpy = None
32
33 import codeutil
34 import py3compat
35 from importstring import import_item
36
37 from IPython.config import Application
38
39 if py3compat.PY3:
40 buffer = memoryview
41
42 #-------------------------------------------------------------------------------
43 # Classes
44 #-------------------------------------------------------------------------------
45
46
47 class CannedObject(object):
48 def __init__(self, obj, keys=[]):
49 self.keys = keys
50 self.obj = copy.copy(obj)
51 for key in keys:
52 setattr(self.obj, key, can(getattr(obj, key)))
53
54 self.buffers = []
55
56 def get_object(self, g=None):
57 if g is None:
58 g = {}
59 for key in self.keys:
60 setattr(self.obj, key, uncan(getattr(self.obj, key), g))
61 return self.obj
62
63
64 class Reference(CannedObject):
65 """object for wrapping a remote reference by name."""
66 def __init__(self, name):
67 if not isinstance(name, basestring):
68 raise TypeError("illegal name: %r"%name)
69 self.name = name
70 self.buffers = []
71
72 def __repr__(self):
73 return "<Reference: %r>"%self.name
74
75 def get_object(self, g=None):
76 if g is None:
77 g = {}
78
79 return eval(self.name, g)
80
81
82 class CannedFunction(CannedObject):
83
84 def __init__(self, f):
85 self._check_type(f)
86 self.code = f.func_code
87 if f.func_defaults:
88 self.defaults = [ can(fd) for fd in f.func_defaults ]
89 else:
90 self.defaults = None
91 self.module = f.__module__ or '__main__'
92 self.__name__ = f.__name__
93 self.buffers = []
94
95 def _check_type(self, obj):
96 assert isinstance(obj, FunctionType), "Not a function type"
97
98 def get_object(self, g=None):
99 # try to load function back into its module:
100 if not self.module.startswith('__'):
101 __import__(self.module)
102 g = sys.modules[self.module].__dict__
103
104 if g is None:
105 g = {}
106 if self.defaults:
107 defaults = tuple(uncan(cfd, g) for cfd in self.defaults)
108 else:
109 defaults = None
110 newFunc = FunctionType(self.code, g, self.__name__, defaults)
111 return newFunc
112
113
114 class CannedArray(CannedObject):
115 def __init__(self, obj):
116 self.shape = obj.shape
117 self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str
118 if sum(obj.shape) == 0:
119 # just pickle it
120 self.buffers = [pickle.dumps(obj, -1)]
121 else:
122 # ensure contiguous
123 obj = numpy.ascontiguousarray(obj, dtype=None)
124 self.buffers = [buffer(obj)]
125
126 def get_object(self, g=None):
127 data = self.buffers[0]
128 if sum(self.shape) == 0:
129 # no shape, we just pickled it
130 return pickle.loads(data)
131 else:
132 return numpy.frombuffer(data, dtype=self.dtype).reshape(self.shape)
133
134
135 class CannedBytes(CannedObject):
136 wrap = bytes
137 def __init__(self, obj):
138 self.buffers = [obj]
139
140 def get_object(self, g=None):
141 data = self.buffers[0]
142 return self.wrap(data)
143
144 def CannedBuffer(CannedBytes):
145 wrap = buffer
146
147 #-------------------------------------------------------------------------------
148 # Functions
149 #-------------------------------------------------------------------------------
150
151 def _logger():
152 """get the logger for the current Application
153
154 the root logger will be used if no Application is running
155 """
156 if Application.initialized():
157 logger = Application.instance().log
158 else:
159 logger = logging.getLogger()
160 if not logger.handlers:
161 logging.basicConfig()
162
163 return logger
164
165 def _import_mapping(mapping, original=None):
166 """import any string-keys in a type mapping
167
168 """
169 log = _logger()
170 log.debug("Importing canning map")
171 for key,value in mapping.items():
172 if isinstance(key, basestring):
173 try:
174 cls = import_item(key)
175 except Exception:
176 if original and key not in original:
177 # only message on user-added classes
178 log.error("cannning class not importable: %r", key, exc_info=True)
179 mapping.pop(key)
180 else:
181 mapping[cls] = mapping.pop(key)
182
183 def can(obj):
184 """prepare an object for pickling"""
185
186 import_needed = False
187
188 for cls,canner in can_map.iteritems():
189 if isinstance(cls, basestring):
190 import_needed = True
191 break
192 elif isinstance(obj, cls):
193 return canner(obj)
194
195 if import_needed:
196 # perform can_map imports, then try again
197 # this will usually only happen once
198 _import_mapping(can_map, _original_can_map)
199 return can(obj)
200
201 return obj
202
203 def can_dict(obj):
204 """can the *values* of a dict"""
205 if isinstance(obj, dict):
206 newobj = {}
207 for k, v in obj.iteritems():
208 newobj[k] = can(v)
209 return newobj
210 else:
211 return obj
212
213 def can_sequence(obj):
214 """can the elements of a sequence"""
215 if isinstance(obj, (list, tuple)):
216 t = type(obj)
217 return t([can(i) for i in obj])
218 else:
219 return obj
220
221 def uncan(obj, g=None):
222 """invert canning"""
223
224 import_needed = False
225 for cls,uncanner in uncan_map.iteritems():
226 if isinstance(cls, basestring):
227 import_needed = True
228 break
229 elif isinstance(obj, cls):
230 return uncanner(obj, g)
231
232 if import_needed:
233 # perform uncan_map imports, then try again
234 # this will usually only happen once
235 _import_mapping(uncan_map, _original_uncan_map)
236 return uncan(obj, g)
237
238 return obj
239
240 def uncan_dict(obj, g=None):
241 if isinstance(obj, dict):
242 newobj = {}
243 for k, v in obj.iteritems():
244 newobj[k] = uncan(v,g)
245 return newobj
246 else:
247 return obj
248
249 def uncan_sequence(obj, g=None):
250 if isinstance(obj, (list, tuple)):
251 t = type(obj)
252 return t([uncan(i,g) for i in obj])
253 else:
254 return obj
255
256
257 #-------------------------------------------------------------------------------
258 # API dictionaries
259 #-------------------------------------------------------------------------------
260
261 # These dicts can be extended for custom serialization of new objects
262
263 can_map = {
264 'IPython.parallel.dependent' : lambda obj: CannedObject(obj, keys=('f','df')),
265 'numpy.ndarray' : CannedArray,
266 FunctionType : CannedFunction,
267 bytes : CannedBytes,
268 buffer : CannedBuffer,
269 }
270
271 uncan_map = {
272 CannedObject : lambda obj, g: obj.get_object(g),
273 }
274
275 # for use in _import_mapping:
276 _original_can_map = can_map.copy()
277 _original_uncan_map = uncan_map.copy()
278
[end of IPython/utils/pickleutil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/utils/pickleutil.py b/IPython/utils/pickleutil.py
--- a/IPython/utils/pickleutil.py
+++ b/IPython/utils/pickleutil.py
@@ -38,6 +38,10 @@
if py3compat.PY3:
buffer = memoryview
+ class_type = type
+else:
+ from types import ClassType
+ class_type = (type, ClassType)
#-------------------------------------------------------------------------------
# Classes
@@ -110,6 +114,30 @@
newFunc = FunctionType(self.code, g, self.__name__, defaults)
return newFunc
+class CannedClass(CannedObject):
+
+ def __init__(self, cls):
+ self._check_type(cls)
+ self.name = cls.__name__
+ self.old_style = not isinstance(cls, type)
+ self._canned_dict = {}
+ for k,v in cls.__dict__.items():
+ if k not in ('__weakref__', '__dict__'):
+ self._canned_dict[k] = can(v)
+ if self.old_style:
+ mro = []
+ else:
+ mro = cls.mro()
+
+ self.parents = [ can(c) for c in mro[1:] ]
+ self.buffers = []
+
+ def _check_type(self, obj):
+ assert isinstance(obj, class_type), "Not a class type"
+
+ def get_object(self, g=None):
+ parents = tuple(uncan(p, g) for p in self.parents)
+ return type(self.name, parents, uncan_dict(self._canned_dict, g=g))
class CannedArray(CannedObject):
def __init__(self, obj):
@@ -200,6 +228,12 @@
return obj
+def can_class(obj):
+ if isinstance(obj, class_type) and obj.__module__ == '__main__':
+ return CannedClass(obj)
+ else:
+ return obj
+
def can_dict(obj):
"""can the *values* of a dict"""
if isinstance(obj, dict):
@@ -266,6 +300,7 @@
FunctionType : CannedFunction,
bytes : CannedBytes,
buffer : CannedBuffer,
+ class_type : can_class,
}
uncan_map = {
| {"golden_diff": "diff --git a/IPython/utils/pickleutil.py b/IPython/utils/pickleutil.py\n--- a/IPython/utils/pickleutil.py\n+++ b/IPython/utils/pickleutil.py\n@@ -38,6 +38,10 @@\n \n if py3compat.PY3:\n buffer = memoryview\n+ class_type = type\n+else:\n+ from types import ClassType\n+ class_type = (type, ClassType)\n \n #-------------------------------------------------------------------------------\n # Classes\n@@ -110,6 +114,30 @@\n newFunc = FunctionType(self.code, g, self.__name__, defaults)\n return newFunc\n \n+class CannedClass(CannedObject):\n+\n+ def __init__(self, cls):\n+ self._check_type(cls)\n+ self.name = cls.__name__\n+ self.old_style = not isinstance(cls, type)\n+ self._canned_dict = {}\n+ for k,v in cls.__dict__.items():\n+ if k not in ('__weakref__', '__dict__'):\n+ self._canned_dict[k] = can(v)\n+ if self.old_style:\n+ mro = []\n+ else:\n+ mro = cls.mro()\n+ \n+ self.parents = [ can(c) for c in mro[1:] ]\n+ self.buffers = []\n+\n+ def _check_type(self, obj):\n+ assert isinstance(obj, class_type), \"Not a class type\"\n+\n+ def get_object(self, g=None):\n+ parents = tuple(uncan(p, g) for p in self.parents)\n+ return type(self.name, parents, uncan_dict(self._canned_dict, g=g))\n \n class CannedArray(CannedObject):\n def __init__(self, obj):\n@@ -200,6 +228,12 @@\n \n return obj\n \n+def can_class(obj):\n+ if isinstance(obj, class_type) and obj.__module__ == '__main__':\n+ return CannedClass(obj)\n+ else:\n+ return obj\n+\n def can_dict(obj):\n \"\"\"can the *values* of a dict\"\"\"\n if isinstance(obj, dict):\n@@ -266,6 +300,7 @@\n FunctionType : CannedFunction,\n bytes : CannedBytes,\n buffer : CannedBuffer,\n+ class_type : can_class,\n }\n \n uncan_map = {\n", "issue": "Fail to send class definitions from interactive session to engines namespaces\nHere is a [sample notebook](http://nbviewer.ipython.org/4471180/) to demonstrate the issue.\n\nPlease note that I don't get any unpickling error message when calling:\n\n```\nrc[:]['AClass'] = AClass\n```\n\nWhile @minrk is reporting a `DummyModule` `AttributeError` in this [SO question](http://stackoverflow.com/questions/14184621/how-to-work-with-interactively-defined-classes-in-ipython-parallel).\n\nI will see if I can come up with a pull request for a `can_class` / `uncan_class` along the lines of [this gist](https://gist.github.com/4470212).\n\n", "before_files": [{"content": "# encoding: utf-8\n\n\"\"\"Pickle related utilities. Perhaps this should be called 'can'.\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\n#-------------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# Imports\n#-------------------------------------------------------------------------------\n\nimport copy\nimport logging\nimport sys\nfrom types import FunctionType\n\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\ntry:\n import numpy\nexcept:\n numpy = None\n\nimport codeutil\nimport py3compat\nfrom importstring import import_item\n\nfrom IPython.config import Application\n\nif py3compat.PY3:\n buffer = memoryview\n\n#-------------------------------------------------------------------------------\n# Classes\n#-------------------------------------------------------------------------------\n\n\nclass CannedObject(object):\n def __init__(self, obj, keys=[]):\n self.keys = keys\n self.obj = copy.copy(obj)\n for key in keys:\n setattr(self.obj, key, can(getattr(obj, key)))\n \n self.buffers = []\n\n def get_object(self, g=None):\n if g is None:\n g = {}\n for key in self.keys:\n setattr(self.obj, key, uncan(getattr(self.obj, key), g))\n return self.obj\n \n\nclass Reference(CannedObject):\n \"\"\"object for wrapping a remote reference by name.\"\"\"\n def __init__(self, name):\n if not isinstance(name, basestring):\n raise TypeError(\"illegal name: %r\"%name)\n self.name = name\n self.buffers = []\n\n def __repr__(self):\n return \"<Reference: %r>\"%self.name\n\n def get_object(self, g=None):\n if g is None:\n g = {}\n \n return eval(self.name, g)\n\n\nclass CannedFunction(CannedObject):\n\n def __init__(self, f):\n self._check_type(f)\n self.code = f.func_code\n if f.func_defaults:\n self.defaults = [ can(fd) for fd in f.func_defaults ]\n else:\n self.defaults = None\n self.module = f.__module__ or '__main__'\n self.__name__ = f.__name__\n self.buffers = []\n\n def _check_type(self, obj):\n assert isinstance(obj, FunctionType), \"Not a function type\"\n\n def get_object(self, g=None):\n # try to load function back into its module:\n if not self.module.startswith('__'):\n __import__(self.module)\n g = sys.modules[self.module].__dict__\n\n if g is None:\n g = {}\n if self.defaults:\n defaults = tuple(uncan(cfd, g) for cfd in self.defaults)\n else:\n defaults = None\n newFunc = FunctionType(self.code, g, self.__name__, defaults)\n return newFunc\n\n\nclass CannedArray(CannedObject):\n def __init__(self, obj):\n self.shape = obj.shape\n self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str\n if sum(obj.shape) == 0:\n # just pickle it\n self.buffers = [pickle.dumps(obj, -1)]\n else:\n # ensure contiguous\n obj = numpy.ascontiguousarray(obj, dtype=None)\n self.buffers = [buffer(obj)]\n \n def get_object(self, g=None):\n data = self.buffers[0]\n if sum(self.shape) == 0:\n # no shape, we just pickled it\n return pickle.loads(data)\n else:\n return numpy.frombuffer(data, dtype=self.dtype).reshape(self.shape)\n\n\nclass CannedBytes(CannedObject):\n wrap = bytes\n def __init__(self, obj):\n self.buffers = [obj]\n \n def get_object(self, g=None):\n data = self.buffers[0]\n return self.wrap(data)\n\ndef CannedBuffer(CannedBytes):\n wrap = buffer\n\n#-------------------------------------------------------------------------------\n# Functions\n#-------------------------------------------------------------------------------\n\ndef _logger():\n \"\"\"get the logger for the current Application\n \n the root logger will be used if no Application is running\n \"\"\"\n if Application.initialized():\n logger = Application.instance().log\n else:\n logger = logging.getLogger()\n if not logger.handlers:\n logging.basicConfig()\n \n return logger\n\ndef _import_mapping(mapping, original=None):\n \"\"\"import any string-keys in a type mapping\n \n \"\"\"\n log = _logger()\n log.debug(\"Importing canning map\")\n for key,value in mapping.items():\n if isinstance(key, basestring):\n try:\n cls = import_item(key)\n except Exception:\n if original and key not in original:\n # only message on user-added classes\n log.error(\"cannning class not importable: %r\", key, exc_info=True)\n mapping.pop(key)\n else:\n mapping[cls] = mapping.pop(key)\n\ndef can(obj):\n \"\"\"prepare an object for pickling\"\"\"\n \n import_needed = False\n \n for cls,canner in can_map.iteritems():\n if isinstance(cls, basestring):\n import_needed = True\n break\n elif isinstance(obj, cls):\n return canner(obj)\n \n if import_needed:\n # perform can_map imports, then try again\n # this will usually only happen once\n _import_mapping(can_map, _original_can_map)\n return can(obj)\n \n return obj\n\ndef can_dict(obj):\n \"\"\"can the *values* of a dict\"\"\"\n if isinstance(obj, dict):\n newobj = {}\n for k, v in obj.iteritems():\n newobj[k] = can(v)\n return newobj\n else:\n return obj\n\ndef can_sequence(obj):\n \"\"\"can the elements of a sequence\"\"\"\n if isinstance(obj, (list, tuple)):\n t = type(obj)\n return t([can(i) for i in obj])\n else:\n return obj\n\ndef uncan(obj, g=None):\n \"\"\"invert canning\"\"\"\n \n import_needed = False\n for cls,uncanner in uncan_map.iteritems():\n if isinstance(cls, basestring):\n import_needed = True\n break\n elif isinstance(obj, cls):\n return uncanner(obj, g)\n \n if import_needed:\n # perform uncan_map imports, then try again\n # this will usually only happen once\n _import_mapping(uncan_map, _original_uncan_map)\n return uncan(obj, g)\n \n return obj\n\ndef uncan_dict(obj, g=None):\n if isinstance(obj, dict):\n newobj = {}\n for k, v in obj.iteritems():\n newobj[k] = uncan(v,g)\n return newobj\n else:\n return obj\n\ndef uncan_sequence(obj, g=None):\n if isinstance(obj, (list, tuple)):\n t = type(obj)\n return t([uncan(i,g) for i in obj])\n else:\n return obj\n\n\n#-------------------------------------------------------------------------------\n# API dictionaries\n#-------------------------------------------------------------------------------\n\n# These dicts can be extended for custom serialization of new objects\n\ncan_map = {\n 'IPython.parallel.dependent' : lambda obj: CannedObject(obj, keys=('f','df')),\n 'numpy.ndarray' : CannedArray,\n FunctionType : CannedFunction,\n bytes : CannedBytes,\n buffer : CannedBuffer,\n}\n\nuncan_map = {\n CannedObject : lambda obj, g: obj.get_object(g),\n}\n\n# for use in _import_mapping:\n_original_can_map = can_map.copy()\n_original_uncan_map = uncan_map.copy()\n", "path": "IPython/utils/pickleutil.py"}]} | 3,107 | 521 |
gh_patches_debug_2472 | rasdani/github-patches | git_diff | ietf-tools__datatracker-4294 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cvs export of group documents is dumping b'' strings
From Tim Wicinski:
>FYI
>
>So I went and dumped the WG documents via the CSV link:
>
>https://datatracker.ietf.org/group/dnsop/documents/csv/
>
>Then I imported them into the googles sheets.
>The CSV file has all strings as "b'<string>'" which
>makes the import needing a bit of cleanup.
>
>tim
cc @moonshiner
</issue>
<code>
[start of ietf/community/views.py]
1 # Copyright The IETF Trust 2012-2020, All Rights Reserved
2 # -*- coding: utf-8 -*-
3
4
5 import csv
6 import datetime
7 import json
8 import uuid
9
10 from django.http import HttpResponse, HttpResponseRedirect, Http404
11 from django.shortcuts import get_object_or_404, render
12 from django.contrib.auth.decorators import login_required
13 from django.utils.html import strip_tags
14
15 import debug # pyflakes:ignore
16
17 from ietf.community.models import SearchRule, EmailSubscription
18 from ietf.community.forms import SearchRuleTypeForm, SearchRuleForm, AddDocumentsForm, SubscriptionForm
19 from ietf.community.utils import lookup_community_list, can_manage_community_list
20 from ietf.community.utils import docs_tracked_by_community_list, docs_matching_community_list_rule
21 from ietf.community.utils import states_of_significant_change, reset_name_contains_index_for_rule
22 from ietf.doc.models import DocEvent, Document
23 from ietf.doc.utils_search import prepare_document_table
24 from ietf.utils.response import permission_denied
25
26 def view_list(request, username=None):
27 clist = lookup_community_list(username)
28
29 docs = docs_tracked_by_community_list(clist)
30 docs, meta = prepare_document_table(request, docs, request.GET)
31
32 subscribed = request.user.is_authenticated and EmailSubscription.objects.filter(community_list=clist, email__person__user=request.user)
33
34 return render(request, 'community/view_list.html', {
35 'clist': clist,
36 'docs': docs,
37 'meta': meta,
38 'can_manage_list': can_manage_community_list(request.user, clist),
39 'subscribed': subscribed,
40 })
41
42 @login_required
43 def manage_list(request, username=None, acronym=None, group_type=None):
44 # we need to be a bit careful because clist may not exist in the
45 # database so we can't call related stuff on it yet
46 clist = lookup_community_list(username, acronym)
47
48 if not can_manage_community_list(request.user, clist):
49 permission_denied(request, "You do not have permission to access this view")
50
51 action = request.POST.get('action')
52
53 if request.method == 'POST' and action == 'add_documents':
54 add_doc_form = AddDocumentsForm(request.POST)
55 if add_doc_form.is_valid():
56 if clist.pk is None:
57 clist.save()
58
59 for d in add_doc_form.cleaned_data['documents']:
60 if not d in clist.added_docs.all():
61 clist.added_docs.add(d)
62
63 return HttpResponseRedirect("")
64 else:
65 add_doc_form = AddDocumentsForm()
66
67 if request.method == 'POST' and action == 'remove_document':
68 document_id = request.POST.get('document')
69 if clist.pk is not None and document_id:
70 document = get_object_or_404(clist.added_docs, id=document_id)
71 clist.added_docs.remove(document)
72
73 return HttpResponseRedirect("")
74
75 rule_form = None
76 if request.method == 'POST' and action == 'add_rule':
77 rule_type_form = SearchRuleTypeForm(request.POST)
78 if rule_type_form.is_valid():
79 rule_type = rule_type_form.cleaned_data['rule_type']
80
81 if rule_type:
82 rule_form = SearchRuleForm(clist, rule_type, request.POST)
83 if rule_form.is_valid():
84 if clist.pk is None:
85 clist.save()
86
87 rule = rule_form.save(commit=False)
88 rule.community_list = clist
89 rule.rule_type = rule_type
90 rule.save()
91 if rule.rule_type == "name_contains":
92 reset_name_contains_index_for_rule(rule)
93
94 return HttpResponseRedirect("")
95 else:
96 rule_type_form = SearchRuleTypeForm()
97
98 if request.method == 'POST' and action == 'remove_rule':
99 rule_pk = request.POST.get('rule')
100 if clist.pk is not None and rule_pk:
101 rule = get_object_or_404(SearchRule, pk=rule_pk, community_list=clist)
102 rule.delete()
103
104 return HttpResponseRedirect("")
105
106 rules = clist.searchrule_set.all() if clist.pk is not None else []
107 for r in rules:
108 r.matching_documents_count = docs_matching_community_list_rule(r).count()
109
110 empty_rule_forms = { rule_type: SearchRuleForm(clist, rule_type) for rule_type, _ in SearchRule.RULE_TYPES }
111
112 total_count = docs_tracked_by_community_list(clist).count()
113
114 all_forms = [f for f in [rule_type_form, rule_form, add_doc_form, *empty_rule_forms.values()]
115 if f is not None]
116 return render(request, 'community/manage_list.html', {
117 'clist': clist,
118 'rules': rules,
119 'individually_added': clist.added_docs.all() if clist.pk is not None else [],
120 'rule_type_form': rule_type_form,
121 'rule_form': rule_form,
122 'empty_rule_forms': empty_rule_forms,
123 'total_count': total_count,
124 'add_doc_form': add_doc_form,
125 'all_forms': all_forms,
126 })
127
128
129 @login_required
130 def track_document(request, name, username=None, acronym=None):
131 doc = get_object_or_404(Document, docalias__name=name)
132
133 if request.method == "POST":
134 clist = lookup_community_list(username, acronym)
135 if not can_manage_community_list(request.user, clist):
136 permission_denied(request, "You do not have permission to access this view")
137
138 if clist.pk is None:
139 clist.save()
140
141 if not doc in clist.added_docs.all():
142 clist.added_docs.add(doc)
143
144 if request.is_ajax():
145 return HttpResponse(json.dumps({ 'success': True }), content_type='application/json')
146 else:
147 return HttpResponseRedirect(clist.get_absolute_url())
148
149 return render(request, "community/track_document.html", {
150 "name": doc.name,
151 })
152
153 @login_required
154 def untrack_document(request, name, username=None, acronym=None):
155 doc = get_object_or_404(Document, docalias__name=name)
156 clist = lookup_community_list(username, acronym)
157 if not can_manage_community_list(request.user, clist):
158 permission_denied(request, "You do not have permission to access this view")
159
160 if request.method == "POST":
161 if clist.pk is not None:
162 clist.added_docs.remove(doc)
163
164 if request.is_ajax():
165 return HttpResponse(json.dumps({ 'success': True }), content_type='application/json')
166 else:
167 return HttpResponseRedirect(clist.get_absolute_url())
168
169 return render(request, "community/untrack_document.html", {
170 "name": doc.name,
171 })
172
173
174 def export_to_csv(request, username=None, acronym=None, group_type=None):
175 clist = lookup_community_list(username, acronym)
176
177 response = HttpResponse(content_type='text/csv')
178
179 if clist.group:
180 filename = "%s-draft-list.csv" % clist.group.acronym
181 else:
182 filename = "draft-list.csv"
183
184 response['Content-Disposition'] = 'attachment; filename=%s' % filename
185
186 writer = csv.writer(response, dialect=csv.excel, delimiter=str(','))
187
188 header = [
189 "Name",
190 "Title",
191 "Date of latest revision",
192 "Status in the IETF process",
193 "Associated group",
194 "Associated AD",
195 "Date of latest change",
196 ]
197 writer.writerow(header)
198
199 docs = docs_tracked_by_community_list(clist).select_related('type', 'group', 'ad')
200 for doc in docs.prefetch_related("states", "tags"):
201 row = []
202 row.append(doc.name)
203 row.append(doc.title)
204 e = doc.latest_event(type='new_revision')
205 row.append(e.time.strftime("%Y-%m-%d") if e else "")
206 row.append(strip_tags(doc.friendly_state()))
207 row.append(doc.group.acronym if doc.group else "")
208 row.append(str(doc.ad) if doc.ad else "")
209 e = doc.latest_event()
210 row.append(e.time.strftime("%Y-%m-%d") if e else "")
211 writer.writerow([v.encode("utf-8") for v in row])
212
213 return response
214
215 def feed(request, username=None, acronym=None, group_type=None):
216 clist = lookup_community_list(username, acronym)
217
218 significant = request.GET.get('significant', '') == '1'
219
220 documents = docs_tracked_by_community_list(clist).values_list('pk', flat=True)
221 since = datetime.datetime.now() - datetime.timedelta(days=14)
222
223 events = DocEvent.objects.filter(
224 doc__id__in=documents,
225 time__gte=since,
226 ).distinct().order_by('-time', '-id').select_related("doc")
227
228 if significant:
229 events = events.filter(type="changed_state", statedocevent__state__in=list(states_of_significant_change()))
230
231 host = request.get_host()
232 feed_url = 'https://%s%s' % (host, request.get_full_path())
233 feed_id = uuid.uuid5(uuid.NAMESPACE_URL, str(feed_url))
234 title = '%s RSS Feed' % clist.long_name()
235 if significant:
236 subtitle = 'Significant document changes'
237 else:
238 subtitle = 'Document changes'
239
240 return render(request, 'community/atom.xml', {
241 'clist': clist,
242 'entries': events[:50],
243 'title': title,
244 'subtitle': subtitle,
245 'id': feed_id.urn,
246 'updated': datetime.datetime.now(),
247 }, content_type='text/xml')
248
249
250 @login_required
251 def subscription(request, username=None, acronym=None, group_type=None):
252 clist = lookup_community_list(username, acronym)
253 if clist.pk is None:
254 raise Http404
255
256 existing_subscriptions = EmailSubscription.objects.filter(community_list=clist, email__person__user=request.user)
257
258 if request.method == 'POST':
259 action = request.POST.get("action")
260 if action == "subscribe":
261 form = SubscriptionForm(request.user, clist, request.POST)
262 if form.is_valid():
263 subscription = form.save(commit=False)
264 subscription.community_list = clist
265 subscription.save()
266
267 return HttpResponseRedirect("")
268
269 elif action == "unsubscribe":
270 existing_subscriptions.filter(pk=request.POST.get("subscription_id")).delete()
271
272 return HttpResponseRedirect("")
273 else:
274 form = SubscriptionForm(request.user, clist)
275
276 return render(request, 'community/subscription.html', {
277 'clist': clist,
278 'form': form,
279 'existing_subscriptions': existing_subscriptions,
280 })
281
[end of ietf/community/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ietf/community/views.py b/ietf/community/views.py
--- a/ietf/community/views.py
+++ b/ietf/community/views.py
@@ -208,7 +208,7 @@
row.append(str(doc.ad) if doc.ad else "")
e = doc.latest_event()
row.append(e.time.strftime("%Y-%m-%d") if e else "")
- writer.writerow([v.encode("utf-8") for v in row])
+ writer.writerow(row)
return response
| {"golden_diff": "diff --git a/ietf/community/views.py b/ietf/community/views.py\n--- a/ietf/community/views.py\n+++ b/ietf/community/views.py\n@@ -208,7 +208,7 @@\n row.append(str(doc.ad) if doc.ad else \"\")\n e = doc.latest_event()\n row.append(e.time.strftime(\"%Y-%m-%d\") if e else \"\")\n- writer.writerow([v.encode(\"utf-8\") for v in row])\n+ writer.writerow(row)\n \n return response\n", "issue": "cvs export of group documents is dumping b'' strings\nFrom Tim Wicinski:\r\n>FYI\r\n>\r\n>So I went and dumped the WG documents via the CSV link:\r\n>\r\n>https://datatracker.ietf.org/group/dnsop/documents/csv/\r\n>\r\n>Then I imported them into the googles sheets.\r\n>The CSV file has all strings as \"b'<string>'\" which\r\n>makes the import needing a bit of cleanup. \r\n>\r\n>tim\r\n\r\ncc @moonshiner\r\n\n", "before_files": [{"content": "# Copyright The IETF Trust 2012-2020, All Rights Reserved\n# -*- coding: utf-8 -*-\n\n\nimport csv\nimport datetime\nimport json\nimport uuid\n\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.html import strip_tags\n\nimport debug # pyflakes:ignore\n\nfrom ietf.community.models import SearchRule, EmailSubscription\nfrom ietf.community.forms import SearchRuleTypeForm, SearchRuleForm, AddDocumentsForm, SubscriptionForm\nfrom ietf.community.utils import lookup_community_list, can_manage_community_list\nfrom ietf.community.utils import docs_tracked_by_community_list, docs_matching_community_list_rule\nfrom ietf.community.utils import states_of_significant_change, reset_name_contains_index_for_rule\nfrom ietf.doc.models import DocEvent, Document\nfrom ietf.doc.utils_search import prepare_document_table\nfrom ietf.utils.response import permission_denied\n\ndef view_list(request, username=None):\n clist = lookup_community_list(username)\n\n docs = docs_tracked_by_community_list(clist)\n docs, meta = prepare_document_table(request, docs, request.GET)\n\n subscribed = request.user.is_authenticated and EmailSubscription.objects.filter(community_list=clist, email__person__user=request.user)\n\n return render(request, 'community/view_list.html', {\n 'clist': clist,\n 'docs': docs,\n 'meta': meta,\n 'can_manage_list': can_manage_community_list(request.user, clist),\n 'subscribed': subscribed,\n })\n\n@login_required\ndef manage_list(request, username=None, acronym=None, group_type=None):\n # we need to be a bit careful because clist may not exist in the\n # database so we can't call related stuff on it yet\n clist = lookup_community_list(username, acronym)\n\n if not can_manage_community_list(request.user, clist):\n permission_denied(request, \"You do not have permission to access this view\")\n\n action = request.POST.get('action')\n\n if request.method == 'POST' and action == 'add_documents':\n add_doc_form = AddDocumentsForm(request.POST)\n if add_doc_form.is_valid():\n if clist.pk is None:\n clist.save()\n\n for d in add_doc_form.cleaned_data['documents']:\n if not d in clist.added_docs.all():\n clist.added_docs.add(d)\n\n return HttpResponseRedirect(\"\")\n else:\n add_doc_form = AddDocumentsForm()\n\n if request.method == 'POST' and action == 'remove_document':\n document_id = request.POST.get('document')\n if clist.pk is not None and document_id:\n document = get_object_or_404(clist.added_docs, id=document_id)\n clist.added_docs.remove(document)\n\n return HttpResponseRedirect(\"\")\n\n rule_form = None\n if request.method == 'POST' and action == 'add_rule':\n rule_type_form = SearchRuleTypeForm(request.POST)\n if rule_type_form.is_valid():\n rule_type = rule_type_form.cleaned_data['rule_type']\n\n if rule_type:\n rule_form = SearchRuleForm(clist, rule_type, request.POST)\n if rule_form.is_valid():\n if clist.pk is None:\n clist.save()\n\n rule = rule_form.save(commit=False)\n rule.community_list = clist\n rule.rule_type = rule_type\n rule.save()\n if rule.rule_type == \"name_contains\":\n reset_name_contains_index_for_rule(rule)\n\n return HttpResponseRedirect(\"\")\n else:\n rule_type_form = SearchRuleTypeForm()\n\n if request.method == 'POST' and action == 'remove_rule':\n rule_pk = request.POST.get('rule')\n if clist.pk is not None and rule_pk:\n rule = get_object_or_404(SearchRule, pk=rule_pk, community_list=clist)\n rule.delete()\n\n return HttpResponseRedirect(\"\")\n\n rules = clist.searchrule_set.all() if clist.pk is not None else []\n for r in rules:\n r.matching_documents_count = docs_matching_community_list_rule(r).count()\n\n empty_rule_forms = { rule_type: SearchRuleForm(clist, rule_type) for rule_type, _ in SearchRule.RULE_TYPES }\n\n total_count = docs_tracked_by_community_list(clist).count()\n\n all_forms = [f for f in [rule_type_form, rule_form, add_doc_form, *empty_rule_forms.values()]\n if f is not None]\n return render(request, 'community/manage_list.html', {\n 'clist': clist,\n 'rules': rules,\n 'individually_added': clist.added_docs.all() if clist.pk is not None else [],\n 'rule_type_form': rule_type_form,\n 'rule_form': rule_form,\n 'empty_rule_forms': empty_rule_forms,\n 'total_count': total_count,\n 'add_doc_form': add_doc_form,\n 'all_forms': all_forms,\n })\n\n\n@login_required\ndef track_document(request, name, username=None, acronym=None):\n doc = get_object_or_404(Document, docalias__name=name)\n\n if request.method == \"POST\":\n clist = lookup_community_list(username, acronym)\n if not can_manage_community_list(request.user, clist):\n permission_denied(request, \"You do not have permission to access this view\")\n\n if clist.pk is None:\n clist.save()\n\n if not doc in clist.added_docs.all():\n clist.added_docs.add(doc)\n\n if request.is_ajax():\n return HttpResponse(json.dumps({ 'success': True }), content_type='application/json')\n else:\n return HttpResponseRedirect(clist.get_absolute_url())\n\n return render(request, \"community/track_document.html\", {\n \"name\": doc.name,\n })\n\n@login_required\ndef untrack_document(request, name, username=None, acronym=None):\n doc = get_object_or_404(Document, docalias__name=name)\n clist = lookup_community_list(username, acronym)\n if not can_manage_community_list(request.user, clist):\n permission_denied(request, \"You do not have permission to access this view\")\n\n if request.method == \"POST\":\n if clist.pk is not None:\n clist.added_docs.remove(doc)\n\n if request.is_ajax():\n return HttpResponse(json.dumps({ 'success': True }), content_type='application/json')\n else:\n return HttpResponseRedirect(clist.get_absolute_url())\n\n return render(request, \"community/untrack_document.html\", {\n \"name\": doc.name,\n })\n\n\ndef export_to_csv(request, username=None, acronym=None, group_type=None):\n clist = lookup_community_list(username, acronym)\n\n response = HttpResponse(content_type='text/csv')\n\n if clist.group:\n filename = \"%s-draft-list.csv\" % clist.group.acronym\n else:\n filename = \"draft-list.csv\"\n\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n\n writer = csv.writer(response, dialect=csv.excel, delimiter=str(','))\n\n header = [\n \"Name\",\n \"Title\",\n \"Date of latest revision\",\n \"Status in the IETF process\",\n \"Associated group\",\n \"Associated AD\",\n \"Date of latest change\",\n ]\n writer.writerow(header)\n\n docs = docs_tracked_by_community_list(clist).select_related('type', 'group', 'ad')\n for doc in docs.prefetch_related(\"states\", \"tags\"):\n row = []\n row.append(doc.name)\n row.append(doc.title)\n e = doc.latest_event(type='new_revision')\n row.append(e.time.strftime(\"%Y-%m-%d\") if e else \"\")\n row.append(strip_tags(doc.friendly_state()))\n row.append(doc.group.acronym if doc.group else \"\")\n row.append(str(doc.ad) if doc.ad else \"\")\n e = doc.latest_event()\n row.append(e.time.strftime(\"%Y-%m-%d\") if e else \"\")\n writer.writerow([v.encode(\"utf-8\") for v in row])\n\n return response\n\ndef feed(request, username=None, acronym=None, group_type=None):\n clist = lookup_community_list(username, acronym)\n\n significant = request.GET.get('significant', '') == '1'\n\n documents = docs_tracked_by_community_list(clist).values_list('pk', flat=True)\n since = datetime.datetime.now() - datetime.timedelta(days=14)\n\n events = DocEvent.objects.filter(\n doc__id__in=documents,\n time__gte=since,\n ).distinct().order_by('-time', '-id').select_related(\"doc\")\n\n if significant:\n events = events.filter(type=\"changed_state\", statedocevent__state__in=list(states_of_significant_change()))\n\n host = request.get_host()\n feed_url = 'https://%s%s' % (host, request.get_full_path())\n feed_id = uuid.uuid5(uuid.NAMESPACE_URL, str(feed_url))\n title = '%s RSS Feed' % clist.long_name()\n if significant:\n subtitle = 'Significant document changes'\n else:\n subtitle = 'Document changes'\n\n return render(request, 'community/atom.xml', {\n 'clist': clist,\n 'entries': events[:50],\n 'title': title,\n 'subtitle': subtitle,\n 'id': feed_id.urn,\n 'updated': datetime.datetime.now(),\n }, content_type='text/xml')\n\n\n@login_required\ndef subscription(request, username=None, acronym=None, group_type=None):\n clist = lookup_community_list(username, acronym)\n if clist.pk is None:\n raise Http404\n\n existing_subscriptions = EmailSubscription.objects.filter(community_list=clist, email__person__user=request.user)\n\n if request.method == 'POST':\n action = request.POST.get(\"action\")\n if action == \"subscribe\":\n form = SubscriptionForm(request.user, clist, request.POST)\n if form.is_valid():\n subscription = form.save(commit=False)\n subscription.community_list = clist\n subscription.save()\n\n return HttpResponseRedirect(\"\")\n\n elif action == \"unsubscribe\":\n existing_subscriptions.filter(pk=request.POST.get(\"subscription_id\")).delete()\n\n return HttpResponseRedirect(\"\")\n else:\n form = SubscriptionForm(request.user, clist)\n\n return render(request, 'community/subscription.html', {\n 'clist': clist,\n 'form': form,\n 'existing_subscriptions': existing_subscriptions,\n })\n", "path": "ietf/community/views.py"}]} | 3,656 | 110 |
gh_patches_debug_10303 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-41 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Invalid argument: '--cookie-name=jupyter-hub-token-MYUSERNAME'
I can't run cells in py notebooks, I got a popup `The kernel appears to have died`
Here's the log running jupyterhub from master branch
```
[IPKernelApp] CRITICAL | Bad config encountered during initialization:
[IPKernelApp] CRITICAL | Invalid argument: '--cookie-name=jupyter-hub-token-ben'
2014-09-14 20:53:59.865 [SingleUserNotebookApp] WARNING | KernelRestarter: restart failed
2014-09-14 20:53:59.866 [SingleUserNotebookApp] WARNING | Kernel ec8afdad-2a1e-47f2-8202-d3644832c13e died, removing from map.
ERROR:root:kernel ec8afdad-2a1e-47f2-8202-d3644832c13e restarted failed!
```
</issue>
<code>
[start of jupyterhub/singleuserapp.py]
1 #!/usr/bin/env python
2 """Extend regular notebook server to be aware of multiuser things."""
3
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8
9 import requests
10
11 from tornado import ioloop
12 from tornado import web
13
14 from IPython.utils.traitlets import Unicode
15
16 from IPython.html.notebookapp import NotebookApp
17
18 from IPython.html.utils import url_path_join
19
20
21 from distutils.version import LooseVersion as V
22
23 import IPython
24 if V(IPython.__version__) < V('2.2'):
25 raise ImportError("JupyterHub Requires IPython >= 2.2, found %s" % IPython.__version__)
26
27 # Define two methods to attach to AuthenticatedHandler,
28 # which authenticate via the central auth server.
29
30
31 def verify_token(self, token):
32 """monkeypatch method for token verification"""
33 token_cache = self.settings['token_cache']
34 if token in token_cache:
35 # we've seen this token before, don't ask upstream again
36 return token_cache[token]
37
38 hub_api_url = self.settings['hub_api_url']
39 hub_api_key = self.settings['hub_api_key']
40 r = requests.get(url_path_join(
41 hub_api_url, "authorizations", token,
42 ),
43 headers = {'Authorization' : 'token %s' % hub_api_key}
44 )
45 if r.status_code == 404:
46 data = {'user' : ''}
47 else:
48 r.raise_for_status()
49 data = r.json()
50 token_cache[token] = data
51 return data
52
53
54 def get_current_user(self):
55 """alternative get_current_user to query the central server"""
56 my_user = self.settings['user']
57 token = self.get_cookie(self.cookie_name, '')
58 if token:
59 auth_data = self.verify_token(token)
60 if not auth_data:
61 # treat invalid token the same as no token
62 return None
63 user = auth_data['user']
64 if user == my_user:
65 return user
66 else:
67 return None
68 else:
69 self.log.debug("No token cookie")
70 return None
71
72
73 # register new hub related command-line aliases
74 aliases = NotebookApp.aliases.get_default_value()
75 aliases.update({
76 'user' : 'SingleUserNotebookApp.user',
77 'cookie-name': 'SingleUserNotebookApp.cookie_name',
78 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
79 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
80 'base-url': 'SingleUserNotebookApp.base_url',
81 })
82
83
84 class SingleUserNotebookApp(NotebookApp):
85 """A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
86 user = Unicode(config=True)
87 cookie_name = Unicode(config=True)
88 hub_prefix = Unicode(config=True)
89 hub_api_url = Unicode(config=True)
90 aliases = aliases
91 open_browser = False
92
93 def _confirm_exit(self):
94 # disable the exit confirmation for background notebook processes
95 ioloop.IOLoop.instance().stop()
96
97 def init_webapp(self):
98 # monkeypatch authentication to use the hub
99 from IPython.html.base.handlers import AuthenticatedHandler
100 AuthenticatedHandler.verify_token = verify_token
101 AuthenticatedHandler.get_current_user = get_current_user
102
103 # load the hub related settings into the tornado settings dict
104 env = os.environ
105 s = self.webapp_settings
106 s['token_cache'] = {}
107 s['user'] = self.user
108 s['hub_api_key'] = env.pop('JPY_API_TOKEN', '')
109 s['cookie_secret'] = env.pop('JPY_COOKIE_SECRET', '')
110 s['cookie_name'] = self.cookie_name
111 s['login_url'] = url_path_join(self.hub_prefix, 'login')
112 s['hub_api_url'] = self.hub_api_url
113 super(SingleUserNotebookApp, self).init_webapp()
114
115
116 def main():
117 return SingleUserNotebookApp.launch_instance()
118
119
120 if __name__ == "__main__":
121 main()
122
[end of jupyterhub/singleuserapp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jupyterhub/singleuserapp.py b/jupyterhub/singleuserapp.py
--- a/jupyterhub/singleuserapp.py
+++ b/jupyterhub/singleuserapp.py
@@ -94,6 +94,11 @@
# disable the exit confirmation for background notebook processes
ioloop.IOLoop.instance().stop()
+ def init_kernel_argv(self):
+ """construct the kernel arguments"""
+ # FIXME: This is 2.x-compat, remove when 3.x is requred
+ self.kernel_argv = ["--profile-dir", self.profile_dir.location]
+
def init_webapp(self):
# monkeypatch authentication to use the hub
from IPython.html.base.handlers import AuthenticatedHandler
| {"golden_diff": "diff --git a/jupyterhub/singleuserapp.py b/jupyterhub/singleuserapp.py\n--- a/jupyterhub/singleuserapp.py\n+++ b/jupyterhub/singleuserapp.py\n@@ -94,6 +94,11 @@\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n \n+ def init_kernel_argv(self):\n+ \"\"\"construct the kernel arguments\"\"\"\n+ # FIXME: This is 2.x-compat, remove when 3.x is requred\n+ self.kernel_argv = [\"--profile-dir\", self.profile_dir.location]\n+ \n def init_webapp(self):\n # monkeypatch authentication to use the hub\n from IPython.html.base.handlers import AuthenticatedHandler\n", "issue": "Invalid argument: '--cookie-name=jupyter-hub-token-MYUSERNAME'\nI can't run cells in py notebooks, I got a popup `The kernel appears to have died`\n\nHere's the log running jupyterhub from master branch\n\n```\n[IPKernelApp] CRITICAL | Bad config encountered during initialization:\n[IPKernelApp] CRITICAL | Invalid argument: '--cookie-name=jupyter-hub-token-ben'\n2014-09-14 20:53:59.865 [SingleUserNotebookApp] WARNING | KernelRestarter: restart failed\n2014-09-14 20:53:59.866 [SingleUserNotebookApp] WARNING | Kernel ec8afdad-2a1e-47f2-8202-d3644832c13e died, removing from map.\nERROR:root:kernel ec8afdad-2a1e-47f2-8202-d3644832c13e restarted failed!\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\n\nimport requests\n\nfrom tornado import ioloop\nfrom tornado import web\n\nfrom IPython.utils.traitlets import Unicode\n\nfrom IPython.html.notebookapp import NotebookApp\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('2.2'):\n raise ImportError(\"JupyterHub Requires IPython >= 2.2, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\n\ndef verify_token(self, token):\n \"\"\"monkeypatch method for token verification\"\"\"\n token_cache = self.settings['token_cache']\n if token in token_cache:\n # we've seen this token before, don't ask upstream again\n return token_cache[token]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations\", token,\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key}\n )\n if r.status_code == 404:\n data = {'user' : ''}\n else:\n r.raise_for_status()\n data = r.json()\n token_cache[token] = data\n return data\n\n\ndef get_current_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n my_user = self.settings['user']\n token = self.get_cookie(self.cookie_name, '')\n if token:\n auth_data = self.verify_token(token)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = Unicode(config=True)\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n \n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n \n def init_webapp(self):\n # monkeypatch authentication to use the hub\n from IPython.html.base.handlers import AuthenticatedHandler\n AuthenticatedHandler.verify_token = verify_token\n AuthenticatedHandler.get_current_user = get_current_user\n \n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.webapp_settings\n s['token_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN', '')\n s['cookie_secret'] = env.pop('JPY_COOKIE_SECRET', '')\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuserapp.py"}]} | 1,912 | 161 |
gh_patches_debug_12386 | rasdani/github-patches | git_diff | tensorflow__addons-1245 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
poincare_test.py is not run by bazel
**Describe the bug**
This time it's just that it's not declared in the BUILD.
**Code to reproduce the issue**
Provide a reproducible test case that is the bare minimum necessary to generate the problem.
```
bazel test //tensorflow_addons/layers:poincare_test.py
```
```
INFO: Writing tracer profile to '/home/gdemarmi/.cache/bazel/_bazel_gdemarmi/c0c8233ef0c2491af30539fc61a9ec68/command.profile.gz'
ERROR: Skipping '//tensorflow_addons/layers:poincare_test.py': no such target '//tensorflow_addons/layers:poincare_test.py': target 'poincare_test.py' not declared in package 'tensorflow_addons/layers'; however, a source file of this name exists. (Perhaps add 'exports_files(["poincare_test.py"])' to tensorflow_addons/layers/BUILD?) defined by /mnt/c/Users/gdemarmi/Desktop/projects/addons/tensorflow_addons/layers/BUILD
ERROR: no such target '//tensorflow_addons/layers:poincare_test.py': target 'poincare_test.py' not declared in package 'tensorflow_addons/layers'; however, a source file of this name exists. (Perhaps add 'exports_files(["poincare_test.py"])' to tensorflow_addons/layers/BUILD?) defined by /mnt/c/Users/gdemarmi/Desktop/projects/addons/tensorflow_addons/layers/BUILD
INFO: Elapsed time: 0.372s
INFO: 0 processes.
FAILED: Build did NOT complete successfully (0 packages loaded)
FAILED: Build did NOT complete successfully (0 packages loaded)
```
**Other info / logs**
When actually running the tests in the file, we get this error:
```
(proof_of_concept_pytest) /mnt/c/Users/gdemarmi/Desktop/projects/addons $ pytest -v tensorflow_addons/layers/poincare_test.py
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/pep8.py:110: FutureWarning: Possible nested set at position 1
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
================================================================================== test session starts ==================================================================================
platform linux -- Python 3.7.5, pytest-5.0.1, py-1.8.0, pluggy-0.13.0 -- /home/gdemarmi/softwares/python/anaconda/bin/python
cachedir: .pytest_cache
rootdir: /mnt/c/Users/gdemarmi/Desktop/projects/addons
plugins: arraydiff-0.3, cov-2.7.1, doctestplus-0.2.0, flake8-1.0.4, forked-1.0.2, openfiles-0.4.0, pep8-1.0.6, remotedata-0.3.2, xdist-1.27.0, typeguard-2.7.1
collected 3 items
tensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalize PASSED [ 33%]
tensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalizeDimArray FAILED [ 66%]
tensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::test_session SKIPPED [100%]
======================================================================================= FAILURES ========================================================================================
__________________________________________________________________ PoincareNormalizeTest.testPoincareNormalizeDimArray __________________________________________________________________
self = <tensorflow_addons.layers.poincare_test.PoincareNormalizeTest testMethod=testPoincareNormalizeDimArray>, args = (), kwargs = {}
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
> f(self, *args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1111:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
tensorflow_addons/layers/poincare_test.py:71: in testPoincareNormalizeDimArray
expected_output=outputs_expected,
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1666: in decorated
result = f(self, *args, **kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/keras/testing_utils.py:126: in layer_test
layer = layer_cls(**kwargs)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:809: in wrapper
check_argument_types(memo)
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:670: in check_argument_types
raise exc from None
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:668: in check_argument_types
check_type(description, value, expected_type, memo)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
argname = 'argument "axis"', value = [1, 2], expected_type = <class 'int'>, memo = <typeguard._CallMemo object at 0x7f59a4229440>
def check_type(argname: str, value, expected_type, memo: Optional[_CallMemo] = None) -> None:
"""
Ensure that ``value`` matches ``expected_type``.
The types from the :mod:`typing` module do not support :func:`isinstance` or :func:`issubclass`
so a number of type specific checks are required. This function knows which checker to call
for which type.
:param argname: name of the argument to check; used for error messages
:param value: value to be checked against ``expected_type``
:param expected_type: a class or generic type instance
"""
if expected_type is Any:
return
if expected_type is None:
# Only happens on < 3.6
expected_type = type(None)
origin_type = getattr(expected_type, '__origin__', None)
if origin_type is not None:
checker_func = origin_type_checkers.get(origin_type)
if checker_func:
checker_func(argname, value, expected_type, memo)
else:
check_type(argname, value, origin_type, memo)
elif isclass(expected_type):
if issubclass(expected_type, Tuple):
check_tuple(argname, value, expected_type, memo)
elif issubclass(expected_type, Callable) and hasattr(expected_type, '__args__'):
# Needed on Python 3.5.0 to 3.5.2
check_callable(argname, value, expected_type, memo)
elif issubclass(expected_type, (float, complex)):
check_number(argname, value, expected_type)
elif _subclass_check_unions and issubclass(expected_type, Union):
check_union(argname, value, expected_type, memo)
elif isinstance(expected_type, TypeVar):
check_typevar(argname, value, expected_type, memo)
elif issubclass(expected_type, IO):
check_io(argname, value, expected_type)
elif issubclass(expected_type, dict) and hasattr(expected_type, '__annotations__'):
check_typed_dict(argname, value, expected_type, memo)
elif getattr(expected_type, '_is_protocol', False):
check_protocol(argname, value, expected_type)
else:
expected_type = (getattr(expected_type, '__extra__', None) or origin_type or
expected_type)
if expected_type is bytes:
# As per https://github.com/python/typing/issues/552
expected_type = (bytearray, bytes)
if not isinstance(value, expected_type):
raise TypeError(
'type of {} must be {}; got {} instead'.
> format(argname, qualified_name(expected_type), qualified_name(value)))
E TypeError: type of argument "axis" must be int; got list instead
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:598: TypeError
=================================================================================== warnings summary ====================================================================================
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
from google.protobuf.pyext import _message
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
from collections import Mapping, MutableMapping
tensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalize
/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/indexed_slices.py:348: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working
if not isinstance(values, collections.Sequence):
-- Docs: https://docs.pytest.org/en/latest/warnings.html
============================================================== 1 failed, 1 passed, 1 skipped, 4 warnings in 18.87 seconds ===============================================================
```
@rahulunair could you take a look? Thanks! :)
</issue>
<code>
[start of tensorflow_addons/layers/poincare.py]
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implementing PoincareNormalize layer."""
16
17 import tensorflow as tf
18 from typeguard import typechecked
19
20
21 @tf.keras.utils.register_keras_serializable(package="Addons")
22 class PoincareNormalize(tf.keras.layers.Layer):
23 """Project into the Poincare ball with norm <= 1.0 - epsilon.
24
25 https://en.wikipedia.org/wiki/Poincare_ball_model
26
27 Used in Poincare Embeddings for Learning Hierarchical Representations
28 Maximilian Nickel, Douwe Kiela https://arxiv.org/pdf/1705.08039.pdf
29
30 For a 1-D tensor with `axis = 0`, computes
31
32 (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon
33 output =
34 x otherwise
35
36 For `x` with more dimensions, independently normalizes each 1-D slice along
37 dimension `axis`.
38
39 Arguments:
40 axis: Axis along which to normalize. A scalar or a vector of integers.
41 epsilon: A small deviation from the edge of the unit sphere for
42 numerical stability.
43 """
44
45 @typechecked
46 def __init__(self, axis: int = 1, epsilon: float = 1e-5, **kwargs):
47 super().__init__(**kwargs)
48 self.axis = axis
49 self.epsilon = epsilon
50
51 def call(self, inputs):
52 x = tf.convert_to_tensor(inputs)
53 square_sum = tf.math.reduce_sum(tf.math.square(x), self.axis, keepdims=True)
54 x_inv_norm = tf.math.rsqrt(square_sum)
55 x_inv_norm = tf.math.minimum((1.0 - self.epsilon) * x_inv_norm, 1.0)
56 outputs = tf.math.multiply(x, x_inv_norm)
57 return outputs
58
59 def compute_output_shape(self, input_shape):
60 return input_shape
61
62 def get_config(self):
63 config = {"axis": self.axis, "epsilon": self.epsilon}
64 base_config = super().get_config()
65 return {**base_config, **config}
66
[end of tensorflow_addons/layers/poincare.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tensorflow_addons/layers/poincare.py b/tensorflow_addons/layers/poincare.py
--- a/tensorflow_addons/layers/poincare.py
+++ b/tensorflow_addons/layers/poincare.py
@@ -16,6 +16,7 @@
import tensorflow as tf
from typeguard import typechecked
+from typing import Union, List
@tf.keras.utils.register_keras_serializable(package="Addons")
@@ -43,7 +44,9 @@
"""
@typechecked
- def __init__(self, axis: int = 1, epsilon: float = 1e-5, **kwargs):
+ def __init__(
+ self, axis: Union[None, int, List[int]] = 1, epsilon: float = 1e-5, **kwargs
+ ):
super().__init__(**kwargs)
self.axis = axis
self.epsilon = epsilon
| {"golden_diff": "diff --git a/tensorflow_addons/layers/poincare.py b/tensorflow_addons/layers/poincare.py\n--- a/tensorflow_addons/layers/poincare.py\n+++ b/tensorflow_addons/layers/poincare.py\n@@ -16,6 +16,7 @@\n \n import tensorflow as tf\n from typeguard import typechecked\n+from typing import Union, List\n \n \n @tf.keras.utils.register_keras_serializable(package=\"Addons\")\n@@ -43,7 +44,9 @@\n \"\"\"\n \n @typechecked\n- def __init__(self, axis: int = 1, epsilon: float = 1e-5, **kwargs):\n+ def __init__(\n+ self, axis: Union[None, int, List[int]] = 1, epsilon: float = 1e-5, **kwargs\n+ ):\n super().__init__(**kwargs)\n self.axis = axis\n self.epsilon = epsilon\n", "issue": "poincare_test.py is not run by bazel\n**Describe the bug**\r\n\r\nThis time it's just that it's not declared in the BUILD.\r\n\r\n**Code to reproduce the issue**\r\n\r\nProvide a reproducible test case that is the bare minimum necessary to generate the problem.\r\n\r\n```\r\nbazel test //tensorflow_addons/layers:poincare_test.py\r\n```\r\n\r\n```\r\nINFO: Writing tracer profile to '/home/gdemarmi/.cache/bazel/_bazel_gdemarmi/c0c8233ef0c2491af30539fc61a9ec68/command.profile.gz'\r\nERROR: Skipping '//tensorflow_addons/layers:poincare_test.py': no such target '//tensorflow_addons/layers:poincare_test.py': target 'poincare_test.py' not declared in package 'tensorflow_addons/layers'; however, a source file of this name exists. (Perhaps add 'exports_files([\"poincare_test.py\"])' to tensorflow_addons/layers/BUILD?) defined by /mnt/c/Users/gdemarmi/Desktop/projects/addons/tensorflow_addons/layers/BUILD\r\nERROR: no such target '//tensorflow_addons/layers:poincare_test.py': target 'poincare_test.py' not declared in package 'tensorflow_addons/layers'; however, a source file of this name exists. (Perhaps add 'exports_files([\"poincare_test.py\"])' to tensorflow_addons/layers/BUILD?) defined by /mnt/c/Users/gdemarmi/Desktop/projects/addons/tensorflow_addons/layers/BUILD\r\nINFO: Elapsed time: 0.372s\r\nINFO: 0 processes.\r\nFAILED: Build did NOT complete successfully (0 packages loaded)\r\nFAILED: Build did NOT complete successfully (0 packages loaded)\r\n```\r\n\r\n**Other info / logs**\r\n\r\nWhen actually running the tests in the file, we get this error:\r\n\r\n```\r\n(proof_of_concept_pytest) /mnt/c/Users/gdemarmi/Desktop/projects/addons $ pytest -v tensorflow_addons/layers/poincare_test.py\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/pep8.py:110: FutureWarning: Possible nested set at position 1\r\n EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')\r\n================================================================================== test session starts ==================================================================================\r\nplatform linux -- Python 3.7.5, pytest-5.0.1, py-1.8.0, pluggy-0.13.0 -- /home/gdemarmi/softwares/python/anaconda/bin/python\r\ncachedir: .pytest_cache\r\nrootdir: /mnt/c/Users/gdemarmi/Desktop/projects/addons\r\nplugins: arraydiff-0.3, cov-2.7.1, doctestplus-0.2.0, flake8-1.0.4, forked-1.0.2, openfiles-0.4.0, pep8-1.0.6, remotedata-0.3.2, xdist-1.27.0, typeguard-2.7.1\r\ncollected 3 items\r\n\r\ntensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalize PASSED [ 33%]\r\ntensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalizeDimArray FAILED [ 66%]\r\ntensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::test_session SKIPPED [100%]\r\n\r\n======================================================================================= FAILURES ========================================================================================\r\n__________________________________________________________________ PoincareNormalizeTest.testPoincareNormalizeDimArray __________________________________________________________________\r\n\r\nself = <tensorflow_addons.layers.poincare_test.PoincareNormalizeTest testMethod=testPoincareNormalizeDimArray>, args = (), kwargs = {}\r\n\r\n def decorated(self, *args, **kwargs):\r\n try:\r\n with context.graph_mode():\r\n with self.test_session(use_gpu=use_gpu, config=config):\r\n> f(self, *args, **kwargs)\r\n\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1111:\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\ntensorflow_addons/layers/poincare_test.py:71: in testPoincareNormalizeDimArray\r\n expected_output=outputs_expected,\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/test_util.py:1666: in decorated\r\n result = f(self, *args, **kwargs)\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/keras/testing_utils.py:126: in layer_test\r\n layer = layer_cls(**kwargs)\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:809: in wrapper\r\n check_argument_types(memo)\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:670: in check_argument_types\r\n raise exc from None\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:668: in check_argument_types\r\n check_type(description, value, expected_type, memo)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\nargname = 'argument \"axis\"', value = [1, 2], expected_type = <class 'int'>, memo = <typeguard._CallMemo object at 0x7f59a4229440>\r\n\r\n def check_type(argname: str, value, expected_type, memo: Optional[_CallMemo] = None) -> None:\r\n \"\"\"\r\n Ensure that ``value`` matches ``expected_type``.\r\n\r\n The types from the :mod:`typing` module do not support :func:`isinstance` or :func:`issubclass`\r\n so a number of type specific checks are required. This function knows which checker to call\r\n for which type.\r\n\r\n :param argname: name of the argument to check; used for error messages\r\n :param value: value to be checked against ``expected_type``\r\n :param expected_type: a class or generic type instance\r\n\r\n \"\"\"\r\n if expected_type is Any:\r\n return\r\n\r\n if expected_type is None:\r\n # Only happens on < 3.6\r\n expected_type = type(None)\r\n\r\n origin_type = getattr(expected_type, '__origin__', None)\r\n if origin_type is not None:\r\n checker_func = origin_type_checkers.get(origin_type)\r\n if checker_func:\r\n checker_func(argname, value, expected_type, memo)\r\n else:\r\n check_type(argname, value, origin_type, memo)\r\n elif isclass(expected_type):\r\n if issubclass(expected_type, Tuple):\r\n check_tuple(argname, value, expected_type, memo)\r\n elif issubclass(expected_type, Callable) and hasattr(expected_type, '__args__'):\r\n # Needed on Python 3.5.0 to 3.5.2\r\n check_callable(argname, value, expected_type, memo)\r\n elif issubclass(expected_type, (float, complex)):\r\n check_number(argname, value, expected_type)\r\n elif _subclass_check_unions and issubclass(expected_type, Union):\r\n check_union(argname, value, expected_type, memo)\r\n elif isinstance(expected_type, TypeVar):\r\n check_typevar(argname, value, expected_type, memo)\r\n elif issubclass(expected_type, IO):\r\n check_io(argname, value, expected_type)\r\n elif issubclass(expected_type, dict) and hasattr(expected_type, '__annotations__'):\r\n check_typed_dict(argname, value, expected_type, memo)\r\n elif getattr(expected_type, '_is_protocol', False):\r\n check_protocol(argname, value, expected_type)\r\n else:\r\n expected_type = (getattr(expected_type, '__extra__', None) or origin_type or\r\n expected_type)\r\n\r\n if expected_type is bytes:\r\n # As per https://github.com/python/typing/issues/552\r\n expected_type = (bytearray, bytes)\r\n\r\n if not isinstance(value, expected_type):\r\n raise TypeError(\r\n 'type of {} must be {}; got {} instead'.\r\n> format(argname, qualified_name(expected_type), qualified_name(value)))\r\nE TypeError: type of argument \"axis\" must be int; got list instead\r\n\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/typeguard/__init__.py:598: TypeError\r\n=================================================================================== warnings summary ====================================================================================\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47\r\n /home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/google/protobuf/descriptor.py:47: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\r\n from google.protobuf.pyext import _message\r\n\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1\r\n/home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1\r\n /home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/botocore/vendored/requests/packages/urllib3/_collections.py:1: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\r\n from collections import Mapping, MutableMapping\r\n\r\ntensorflow_addons/layers/poincare_test.py::PoincareNormalizeTest::testPoincareNormalize\r\n /home/gdemarmi/softwares/python/anaconda/lib/python3.7/site-packages/tensorflow_core/python/framework/indexed_slices.py:348: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\r\n if not isinstance(values, collections.Sequence):\r\n\r\n-- Docs: https://docs.pytest.org/en/latest/warnings.html\r\n============================================================== 1 failed, 1 passed, 1 skipped, 4 warnings in 18.87 seconds ===============================================================\r\n```\r\n\r\n @rahulunair could you take a look? Thanks! :)\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implementing PoincareNormalize layer.\"\"\"\n\nimport tensorflow as tf\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass PoincareNormalize(tf.keras.layers.Layer):\n \"\"\"Project into the Poincare ball with norm <= 1.0 - epsilon.\n\n https://en.wikipedia.org/wiki/Poincare_ball_model\n\n Used in Poincare Embeddings for Learning Hierarchical Representations\n Maximilian Nickel, Douwe Kiela https://arxiv.org/pdf/1705.08039.pdf\n\n For a 1-D tensor with `axis = 0`, computes\n\n (x * (1 - epsilon)) / ||x|| if ||x|| > 1 - epsilon\n output =\n x otherwise\n\n For `x` with more dimensions, independently normalizes each 1-D slice along\n dimension `axis`.\n\n Arguments:\n axis: Axis along which to normalize. A scalar or a vector of integers.\n epsilon: A small deviation from the edge of the unit sphere for\n numerical stability.\n \"\"\"\n\n @typechecked\n def __init__(self, axis: int = 1, epsilon: float = 1e-5, **kwargs):\n super().__init__(**kwargs)\n self.axis = axis\n self.epsilon = epsilon\n\n def call(self, inputs):\n x = tf.convert_to_tensor(inputs)\n square_sum = tf.math.reduce_sum(tf.math.square(x), self.axis, keepdims=True)\n x_inv_norm = tf.math.rsqrt(square_sum)\n x_inv_norm = tf.math.minimum((1.0 - self.epsilon) * x_inv_norm, 1.0)\n outputs = tf.math.multiply(x, x_inv_norm)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return input_shape\n\n def get_config(self):\n config = {\"axis\": self.axis, \"epsilon\": self.epsilon}\n base_config = super().get_config()\n return {**base_config, **config}\n", "path": "tensorflow_addons/layers/poincare.py"}]} | 3,734 | 213 |
gh_patches_debug_65582 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not auto add DistributedSampler for DDP training
## π Bug
<!-- A clear and concise description of what the bug is. -->
in 0.72, even if we don't set sampler, pytorch_lightning will not add DistributedSampler for us.
### To Reproduce
the reason is in pytorch, if we don't set sampler, pytorch will add a sampler for us.
in pytorch's dataloader.py:
```
if sampler is None: # give default samplers
if self._dataset_kind == _DatasetKind.Iterable:
# See NOTE [ Custom Samplers and IterableDataset ]
sampler = _InfiniteConstantSampler()
else: # map-style
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
```
but in pytorch_lightning we check whether sampler is None to decide to add sampler
in data_loading.py funciton auto_add_sampler:
```
no_sampler_added = dataloader.sampler is None
```
because pytorch have default sampler for us, which is not None, pytorch_lighting will not automatically add sampler.
</issue>
<code>
[start of pytorch_lightning/trainer/data_loading.py]
1 import warnings
2 from abc import ABC, abstractmethod
3 from typing import Union, List, Tuple, Callable
4
5 import torch.distributed as torch_distrib
6 from torch.utils.data import DataLoader
7 from torch.utils.data.distributed import DistributedSampler
8
9 from pytorch_lightning.core import LightningModule
10 from pytorch_lightning.utilities.exceptions import MisconfigurationException
11
12 try:
13 from apex import amp
14 except ImportError:
15 APEX_AVAILABLE = False
16 else:
17 APEX_AVAILABLE = True
18
19 try:
20 import torch_xla
21 import torch_xla.core.xla_model as xm
22 import torch_xla.distributed.xla_multiprocessing as xmp
23 except ImportError:
24 XLA_AVAILABLE = False
25 else:
26 XLA_AVAILABLE = True
27
28
29 def _has_len(dataloader: DataLoader) -> bool:
30 """ Checks if a given Dataloader has __len__ method implemented i.e. if
31 it is a finite dataloader or infinite dataloader """
32 try:
33 # try getting the length
34 if len(dataloader) == 0:
35 raise ValueError('Dataloader returned 0 length. Please make sure'
36 ' that your Dataloader atleast returns 1 batch')
37 return True
38 except TypeError:
39 return False
40
41
42 class TrainerDataLoadingMixin(ABC):
43
44 # this is just a summary on variables used in this abstract class,
45 # the proper values/initialisation should be done in child class
46 proc_rank: int
47 use_ddp: bool
48 use_ddp2: bool
49 shown_warnings: ...
50 val_check_interval: float
51 use_tpu: bool
52 tpu_local_core_rank: int
53 train_dataloader: DataLoader
54 num_training_batches: Union[int, float]
55 val_check_batch: ...
56 val_dataloaders: List[DataLoader]
57 num_val_batches: Union[int, float]
58 test_dataloaders: List[DataLoader]
59 num_test_batches: Union[int, float]
60 train_percent_check: float
61 val_percent_check: float
62 test_percent_check: float
63
64 @abstractmethod
65 def is_overriden(self, *args):
66 """Warning: this is just empty shell for code implemented in other class."""
67
68 def _percent_range_check(self, name: str) -> None:
69 value = getattr(self, name)
70 msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'
71 if name == 'val_check_interval':
72 msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'
73
74 if not 0. <= value <= 1.:
75 raise ValueError(msg)
76
77 def _worker_check(self, dataloader: DataLoader, name: str) -> None:
78 if isinstance(dataloader, DataLoader) and dataloader.num_workers <= 2:
79 warnings.warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'
80 ' Consider increasing the value of the `num_workers` argument`'
81 ' in the `DataLoader` init to improve performance.')
82
83 def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:
84
85 # don't do anything if it's not a dataloader
86 if not isinstance(dataloader, DataLoader):
87 return dataloader
88
89 need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu
90 no_sampler_added = dataloader.sampler is None
91
92 if need_dist_sampler and no_sampler_added:
93
94 skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']
95
96 dl_args = {
97 k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys
98 }
99
100 if self.use_tpu:
101 sampler = DistributedSampler(
102 dataloader.dataset,
103 num_replicas=xm.xrt_world_size(),
104 rank=xm.get_ordinal()
105 )
106 else:
107 sampler = DistributedSampler(dataloader.dataset)
108
109 dl_args['sampler'] = sampler
110 dataloader = type(dataloader)(**dl_args)
111
112 return dataloader
113
114 def reset_train_dataloader(self, model: LightningModule) -> None:
115 """Resets the train dataloader and initialises required variables
116 (number of batches, when to validate, etc.).
117
118 Args:
119 model: The current `LightningModule`
120 """
121 self.train_dataloader = self.request_dataloader(model.train_dataloader)
122
123 self.num_training_batches = 0
124
125 # automatically add samplers
126 self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)
127
128 self._worker_check(self.train_dataloader, 'train dataloader')
129 self._percent_range_check('train_percent_check')
130
131 if not _has_len(self.train_dataloader):
132 self.num_training_batches = float('inf')
133 else:
134 # try getting the length
135 self.num_training_batches = len(self.train_dataloader)
136 self.num_training_batches = int(self.num_training_batches * self.train_percent_check)
137
138 # determine when to check validation
139 # if int passed in, val checks that often
140 # otherwise, it checks in [0, 1.0] % range of a training epoch
141 if isinstance(self.val_check_interval, int):
142 self.val_check_batch = self.val_check_interval
143 if self.val_check_batch > self.num_training_batches:
144 raise ValueError(
145 f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '
146 f'to the number of the training batches ({self.num_training_batches}). '
147 'If you want to disable validation set `val_percent_check` to 0.0 instead.')
148 else:
149 if not _has_len(self.train_dataloader):
150 if self.val_check_interval == 1.0:
151 self.val_check_batch = float('inf')
152 else:
153 raise MisconfigurationException(
154 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
155 'DataLoader does not implement `__len__`) for `train_dataloader`, '
156 '`Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies '
157 'checking validation every k training batches.')
158 else:
159 self._percent_range_check('val_check_interval')
160
161 self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
162 self.val_check_batch = max(1, self.val_check_batch)
163
164 def _reset_eval_dataloader(self, model: LightningModule,
165 mode: str) -> Tuple[int, List[DataLoader]]:
166 """Generic method to reset a dataloader for evaluation.
167
168 Args:
169 model: The current `LightningModule`
170 mode: Either `'val'` or `'test'`
171
172 Returns:
173 Tuple (num_batches, dataloaders)
174 """
175 dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))
176
177 if not isinstance(dataloaders, list):
178 dataloaders = [dataloaders]
179
180 # add samplers
181 dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]
182
183 num_batches = 0
184
185 # determine number of batches
186 # datasets could be none, 1 or 2+
187 if len(dataloaders) != 0:
188 for i, dataloader in enumerate(dataloaders):
189 self._worker_check(dataloader, f'{mode} dataloader {i}')
190 if not _has_len(dataloader):
191 num_batches = float('inf')
192
193 percent_check = getattr(self, f'{mode}_percent_check')
194
195 if num_batches != float('inf'):
196 self._percent_range_check(f'{mode}_percent_check')
197
198 num_batches = sum(len(dataloader) for dataloader in dataloaders)
199 num_batches = int(num_batches * percent_check)
200 elif percent_check not in (0.0, 1.0):
201 raise MisconfigurationException(
202 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
203 f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '
204 f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')
205 return num_batches, dataloaders
206
207 def reset_val_dataloader(self, model: LightningModule) -> None:
208 """Resets the validation dataloader and determines the number of batches.
209
210 Args:
211 model: The current `LightningModule`
212 """
213 if self.is_overriden('validation_step'):
214 self.num_val_batches, self.val_dataloaders =\
215 self._reset_eval_dataloader(model, 'val')
216
217 def reset_test_dataloader(self, model) -> None:
218 """Resets the validation dataloader and determines the number of batches.
219
220 Args:
221 model: The current `LightningModule`
222 """
223 if self.is_overriden('test_step'):
224 self.num_test_batches, self.test_dataloaders =\
225 self._reset_eval_dataloader(model, 'test')
226
227 def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:
228 """Handles downloading data in the GPU or TPU case.
229
230 Args:
231 dataloader_fx: The bound dataloader getter
232
233 Returns:
234 The dataloader
235 """
236 dataloader = dataloader_fx()
237
238 # get the function we'll use to get data
239 if self.use_ddp or self.use_ddp2:
240 # all processes wait until data download has happened
241 torch_distrib.barrier()
242
243 # data download/load on TPU
244 elif self.use_tpu and XLA_AVAILABLE:
245 # all processes wait until data download has happened
246 torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')
247
248 return dataloader
249
250 def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,
251 test_percent_check: float, overfit_pct: float) -> None:
252 """Use less data for debugging purposes
253 """
254 self.train_percent_check = train_percent_check
255 self.val_percent_check = val_percent_check
256 self.test_percent_check = test_percent_check
257 if overfit_pct > 0:
258 if overfit_pct > 1:
259 raise ValueError(
260 f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')
261
262 self.train_percent_check = overfit_pct
263 self.val_percent_check = overfit_pct
264 self.test_percent_check = overfit_pct
265
[end of pytorch_lightning/trainer/data_loading.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py
--- a/pytorch_lightning/trainer/data_loading.py
+++ b/pytorch_lightning/trainer/data_loading.py
@@ -87,9 +87,8 @@
return dataloader
need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu
- no_sampler_added = dataloader.sampler is None
- if need_dist_sampler and no_sampler_added:
+ if need_dist_sampler:
skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py\n--- a/pytorch_lightning/trainer/data_loading.py\n+++ b/pytorch_lightning/trainer/data_loading.py\n@@ -87,9 +87,8 @@\n return dataloader\n \n need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu\n- no_sampler_added = dataloader.sampler is None\n \n- if need_dist_sampler and no_sampler_added:\n+ if need_dist_sampler:\n \n skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']\n", "issue": "Not auto add DistributedSampler for DDP training\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\nin 0.72, even if we don't set sampler, pytorch_lightning will not add DistributedSampler for us.\r\n### To Reproduce\r\nthe reason is in pytorch, if we don't set sampler, pytorch will add a sampler for us.\r\nin pytorch's dataloader.py:\r\n```\r\n if sampler is None: # give default samplers\r\n if self._dataset_kind == _DatasetKind.Iterable:\r\n # See NOTE [ Custom Samplers and IterableDataset ]\r\n sampler = _InfiniteConstantSampler()\r\n else: # map-style\r\n if shuffle:\r\n sampler = RandomSampler(dataset)\r\n else:\r\n sampler = SequentialSampler(dataset)\r\n```\r\n\r\nbut in pytorch_lightning we check whether sampler is None to decide to add sampler \r\nin data_loading.py funciton auto_add_sampler:\r\n```\r\n no_sampler_added = dataloader.sampler is None\r\n```\r\n\r\nbecause pytorch have default sampler for us, which is not None, pytorch_lighting will not automatically add sampler.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "import warnings\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n \"\"\" Checks if a given Dataloader has __len__ method implemented i.e. if\n it is a finite dataloader or infinite dataloader \"\"\"\n try:\n # try getting the length\n if len(dataloader) == 0:\n raise ValueError('Dataloader returned 0 length. Please make sure'\n ' that your Dataloader atleast returns 1 batch')\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def _worker_check(self, dataloader: DataLoader, name: str) -> None:\n if isinstance(dataloader, DataLoader) and dataloader.num_workers <= 2:\n warnings.warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'\n ' Consider increasing the value of the `num_workers` argument`'\n ' in the `DataLoader` init to improve performance.')\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n\n # don't do anything if it's not a dataloader\n if not isinstance(dataloader, DataLoader):\n return dataloader\n\n need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu\n no_sampler_added = dataloader.sampler is None\n\n if need_dist_sampler and no_sampler_added:\n\n skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']\n\n dl_args = {\n k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n else:\n sampler = DistributedSampler(dataloader.dataset)\n\n dl_args['sampler'] = sampler\n dataloader = type(dataloader)(**dl_args)\n\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._worker_check(self.train_dataloader, 'train dataloader')\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float('inf')\n else:\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies '\n 'checking validation every k training batches.')\n else:\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for i, dataloader in enumerate(dataloaders):\n self._worker_check(dataloader, f'{mode} dataloader {i}')\n if not _has_len(dataloader):\n num_batches = float('inf')\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}]} | 3,810 | 136 |
gh_patches_debug_60363 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-2598 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug affichage intervention selon niveau de zoom
Bonjour, sur la 2.49.0 selon le niveau de zoom sΓ©lectionnΓ©, certaines interventions apparaissent et disparaissent. Faire glisser la carte provoque aussi ce comportement :
https://user-images.githubusercontent.com/45095227/108825316-2719b200-75c3-11eb-8406-11d57bcd2531.mov
J'en profite pour dire que je n'ai pas pu changer l'opacitΓ© des linΓ©aires pour qu'ils soient plus visibles ( voir : https://github.com/GeotrekCE/Geotrek-admin/issues/2554)
</issue>
<code>
[start of geotrek/maintenance/filters.py]
1 from django.db.models import Q
2 from django.conf import settings
3 from django.contrib.contenttypes.models import ContentType
4 from django.utils.translation import gettext_lazy as _
5 from django_filters import ChoiceFilter, MultipleChoiceFilter
6
7 from mapentity.filters import PolygonFilter, PythonPolygonFilter
8
9 from geotrek.core.models import Topology
10 from geotrek.authent.filters import StructureRelatedFilterSet
11 from geotrek.common.filters import RightFilter
12 from geotrek.zoning.filters import ZoningFilterSet
13 from geotrek.zoning.models import City, District
14
15 from .models import Intervention, Project
16
17 if 'geotrek.signage' in settings.INSTALLED_APPS:
18 from geotrek.signage.models import Blade
19
20
21 class PolygonInterventionFilterMixin(object):
22 def get_geom(self, value):
23 return value
24
25 def filter(self, qs, values):
26 if not values:
27 return qs
28 lookup = self.lookup_expr
29
30 blade_content_type = ContentType.objects.get_for_model(Blade)
31 topologies = []
32 for value in values:
33 topologies += Topology.objects.filter(**{'geom__%s' % lookup: self.get_geom(value)}).values_list('id', flat=True)
34 topologies_intervention = Intervention.objects.existing().filter(target_id__in=topologies).exclude(
35 target_type=blade_content_type).distinct('pk').values_list('id', flat=True)
36
37 interventions = list(topologies_intervention)
38 if 'geotrek.signage' in settings.INSTALLED_APPS:
39 blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))
40 blades_intervention = Intervention.objects.existing().filter(target_id__in=blades,
41 target_type=blade_content_type).values_list('id',
42 flat=True)
43 interventions.extend(blades_intervention)
44 if hasattr(self, 'lookup_queryset_in'):
45 lookup_queryset = self.lookup_queryset_in
46 else:
47 lookup_queryset = 'pk__in'
48 qs = qs.filter(**{'%s' % lookup_queryset: interventions})
49 return qs
50
51
52 class PolygonTopologyFilter(PolygonInterventionFilterMixin, PolygonFilter):
53 pass
54
55
56 class ProjectIntersectionFilterCity(PolygonInterventionFilterMixin, RightFilter):
57 model = City
58
59 def __init__(self, *args, **kwargs):
60 super(ProjectIntersectionFilterCity, self).__init__(*args, **kwargs)
61 self.lookup_expr = 'intersects'
62 self.lookup_queryset_in = 'interventions__in'
63
64 def get_geom(self, value):
65 return value.geom
66
67
68 class ProjectIntersectionFilterDistrict(PolygonInterventionFilterMixin, RightFilter):
69 model = District
70
71 def __init__(self, *args, **kwargs):
72 super(ProjectIntersectionFilterDistrict, self).__init__(*args, **kwargs)
73 self.lookup_expr = 'intersects'
74 self.lookup_queryset_in = 'interventions__in'
75
76 def get_geom(self, value):
77 return value.geom
78
79
80 class InterventionFilterSet(ZoningFilterSet, StructureRelatedFilterSet):
81 ON_CHOICES = (('infrastructure', _("Infrastructure")), ('signage', _("Signage")), ('blade', _("Blade")),
82 ('topology', _("Path")), ('trek', _("Trek")), ('poi', _("POI")), ('service', _("Service")),
83 ('trail', _("Trail")))
84 bbox = PolygonTopologyFilter(lookup_expr='intersects')
85 year = MultipleChoiceFilter(choices=Intervention.objects.year_choices(),
86 field_name='date', lookup_expr='year', label=_("Year"))
87 on = ChoiceFilter(field_name='target_type__model', choices=ON_CHOICES, label=_("On"), empty_label=_("On"))
88
89 class Meta(StructureRelatedFilterSet.Meta):
90 model = Intervention
91 fields = StructureRelatedFilterSet.Meta.fields + [
92 'status', 'type', 'stake', 'subcontracting', 'project', 'on',
93 ]
94
95
96 class ProjectFilterSet(StructureRelatedFilterSet):
97 bbox = PythonPolygonFilter(field_name='geom')
98 year = MultipleChoiceFilter(
99 label=_("Year of activity"), method='filter_year',
100 choices=lambda: Project.objects.year_choices() # Could change over time
101 )
102 city = ProjectIntersectionFilterCity(label=_('City'), required=False)
103 district = ProjectIntersectionFilterDistrict(label=_('District'), required=False)
104
105 class Meta(StructureRelatedFilterSet.Meta):
106 model = Project
107 fields = StructureRelatedFilterSet.Meta.fields + [
108 'year', 'type', 'domain', 'contractors', 'project_owner',
109 'project_manager', 'founders'
110 ]
111
112 def filter_year(self, qs, name, values):
113 q = Q()
114 for value in values:
115 q |= Q(begin_year__lte=value, end_year__gte=value)
116 return qs.filter(q)
117
[end of geotrek/maintenance/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geotrek/maintenance/filters.py b/geotrek/maintenance/filters.py
--- a/geotrek/maintenance/filters.py
+++ b/geotrek/maintenance/filters.py
@@ -25,6 +25,9 @@
def filter(self, qs, values):
if not values:
return qs
+ if not isinstance(values, list):
+ values = [values]
+
lookup = self.lookup_expr
blade_content_type = ContentType.objects.get_for_model(Blade)
| {"golden_diff": "diff --git a/geotrek/maintenance/filters.py b/geotrek/maintenance/filters.py\n--- a/geotrek/maintenance/filters.py\n+++ b/geotrek/maintenance/filters.py\n@@ -25,6 +25,9 @@\n def filter(self, qs, values):\n if not values:\n return qs\n+ if not isinstance(values, list):\n+ values = [values]\n+\n lookup = self.lookup_expr\n \n blade_content_type = ContentType.objects.get_for_model(Blade)\n", "issue": "Bug affichage intervention selon niveau de zoom\nBonjour, sur la 2.49.0 selon le niveau de zoom s\u00e9lectionn\u00e9, certaines interventions apparaissent et disparaissent. Faire glisser la carte provoque aussi ce comportement :\r\n\r\n\r\nhttps://user-images.githubusercontent.com/45095227/108825316-2719b200-75c3-11eb-8406-11d57bcd2531.mov\r\n\r\nJ'en profite pour dire que je n'ai pas pu changer l'opacit\u00e9 des lin\u00e9aires pour qu'ils soient plus visibles ( voir : https://github.com/GeotrekCE/Geotrek-admin/issues/2554)\n", "before_files": [{"content": "from django.db.models import Q\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import gettext_lazy as _\nfrom django_filters import ChoiceFilter, MultipleChoiceFilter\n\nfrom mapentity.filters import PolygonFilter, PythonPolygonFilter\n\nfrom geotrek.core.models import Topology\nfrom geotrek.authent.filters import StructureRelatedFilterSet\nfrom geotrek.common.filters import RightFilter\nfrom geotrek.zoning.filters import ZoningFilterSet\nfrom geotrek.zoning.models import City, District\n\nfrom .models import Intervention, Project\n\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n from geotrek.signage.models import Blade\n\n\nclass PolygonInterventionFilterMixin(object):\n def get_geom(self, value):\n return value\n\n def filter(self, qs, values):\n if not values:\n return qs\n lookup = self.lookup_expr\n\n blade_content_type = ContentType.objects.get_for_model(Blade)\n topologies = []\n for value in values:\n topologies += Topology.objects.filter(**{'geom__%s' % lookup: self.get_geom(value)}).values_list('id', flat=True)\n topologies_intervention = Intervention.objects.existing().filter(target_id__in=topologies).exclude(\n target_type=blade_content_type).distinct('pk').values_list('id', flat=True)\n\n interventions = list(topologies_intervention)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n blades_intervention = Intervention.objects.existing().filter(target_id__in=blades,\n target_type=blade_content_type).values_list('id',\n flat=True)\n interventions.extend(blades_intervention)\n if hasattr(self, 'lookup_queryset_in'):\n lookup_queryset = self.lookup_queryset_in\n else:\n lookup_queryset = 'pk__in'\n qs = qs.filter(**{'%s' % lookup_queryset: interventions})\n return qs\n\n\nclass PolygonTopologyFilter(PolygonInterventionFilterMixin, PolygonFilter):\n pass\n\n\nclass ProjectIntersectionFilterCity(PolygonInterventionFilterMixin, RightFilter):\n model = City\n\n def __init__(self, *args, **kwargs):\n super(ProjectIntersectionFilterCity, self).__init__(*args, **kwargs)\n self.lookup_expr = 'intersects'\n self.lookup_queryset_in = 'interventions__in'\n\n def get_geom(self, value):\n return value.geom\n\n\nclass ProjectIntersectionFilterDistrict(PolygonInterventionFilterMixin, RightFilter):\n model = District\n\n def __init__(self, *args, **kwargs):\n super(ProjectIntersectionFilterDistrict, self).__init__(*args, **kwargs)\n self.lookup_expr = 'intersects'\n self.lookup_queryset_in = 'interventions__in'\n\n def get_geom(self, value):\n return value.geom\n\n\nclass InterventionFilterSet(ZoningFilterSet, StructureRelatedFilterSet):\n ON_CHOICES = (('infrastructure', _(\"Infrastructure\")), ('signage', _(\"Signage\")), ('blade', _(\"Blade\")),\n ('topology', _(\"Path\")), ('trek', _(\"Trek\")), ('poi', _(\"POI\")), ('service', _(\"Service\")),\n ('trail', _(\"Trail\")))\n bbox = PolygonTopologyFilter(lookup_expr='intersects')\n year = MultipleChoiceFilter(choices=Intervention.objects.year_choices(),\n field_name='date', lookup_expr='year', label=_(\"Year\"))\n on = ChoiceFilter(field_name='target_type__model', choices=ON_CHOICES, label=_(\"On\"), empty_label=_(\"On\"))\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Intervention\n fields = StructureRelatedFilterSet.Meta.fields + [\n 'status', 'type', 'stake', 'subcontracting', 'project', 'on',\n ]\n\n\nclass ProjectFilterSet(StructureRelatedFilterSet):\n bbox = PythonPolygonFilter(field_name='geom')\n year = MultipleChoiceFilter(\n label=_(\"Year of activity\"), method='filter_year',\n choices=lambda: Project.objects.year_choices() # Could change over time\n )\n city = ProjectIntersectionFilterCity(label=_('City'), required=False)\n district = ProjectIntersectionFilterDistrict(label=_('District'), required=False)\n\n class Meta(StructureRelatedFilterSet.Meta):\n model = Project\n fields = StructureRelatedFilterSet.Meta.fields + [\n 'year', 'type', 'domain', 'contractors', 'project_owner',\n 'project_manager', 'founders'\n ]\n\n def filter_year(self, qs, name, values):\n q = Q()\n for value in values:\n q |= Q(begin_year__lte=value, end_year__gte=value)\n return qs.filter(q)\n", "path": "geotrek/maintenance/filters.py"}]} | 1,978 | 112 |
gh_patches_debug_7612 | rasdani/github-patches | git_diff | SCons__scons-4013 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError
Why i get error if type help command?
Python 3.8.10
Scons 4.2.0

UnicodeDecodeError
Why i get error if type help command?
Python 3.8.10
Scons 4.2.0

</issue>
<code>
[start of SCons/Tool/gcc.py]
1 # MIT License
2 #
3 # Copyright The SCons Foundation
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
17 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
18 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23
24 """SCons.Tool.gcc
25
26 Tool-specific initialization for gcc.
27
28 There normally shouldn't be any need to import this module directly.
29 It will usually be imported through the generic SCons.Tool.Tool()
30 selection method.
31
32 """
33
34 from . import cc
35 import re
36 import subprocess
37
38 import SCons.Util
39
40 compilers = ['gcc', 'cc']
41
42
43 def generate(env):
44 """Add Builders and construction variables for gcc to an Environment."""
45
46 if 'CC' not in env:
47 env['CC'] = env.Detect(compilers) or compilers[0]
48
49 cc.generate(env)
50
51 if env['PLATFORM'] in ['cygwin', 'win32']:
52 env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
53 else:
54 env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -fPIC')
55 # determine compiler version
56 version = detect_version(env, env['CC'])
57 if version:
58 env['CCVERSION'] = version
59
60
61 def exists(env):
62 # is executable, and is a GNU compiler (or accepts '--version' at least)
63 return detect_version(env, env.Detect(env.get('CC', compilers)))
64
65
66 def detect_version(env, cc):
67 """Return the version of the GNU compiler, or None if it is not a GNU compiler."""
68 version = None
69 cc = env.subst(cc)
70 if not cc:
71 return version
72
73 # -dumpversion was added in GCC 3.0. As long as we're supporting
74 # GCC versions older than that, we should use --version and a
75 # regular expression.
76 # pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],
77 pipe=SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['--version'],
78 stdin='devnull',
79 stderr='devnull',
80 stdout=subprocess.PIPE)
81 if pipe.wait() != 0:
82 return version
83
84 with pipe.stdout:
85 # -dumpversion variant:
86 # line = pipe.stdout.read().strip()
87 # --version variant:
88 line = SCons.Util.to_str(pipe.stdout.readline())
89 # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:
90 # So continue with reading to let the child process actually terminate.
91 while SCons.Util.to_str(pipe.stdout.readline()):
92 pass
93
94
95 # -dumpversion variant:
96 # if line:
97 # version = line
98 # --version variant:
99 match = re.search(r'[0-9]+(\.[0-9]+)+', line)
100 if match:
101 version = match.group(0)
102
103 return version
104
105 # Local Variables:
106 # tab-width:4
107 # indent-tabs-mode:nil
108 # End:
109 # vim: set expandtab tabstop=4 shiftwidth=4:
110
[end of SCons/Tool/gcc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/SCons/Tool/gcc.py b/SCons/Tool/gcc.py
--- a/SCons/Tool/gcc.py
+++ b/SCons/Tool/gcc.py
@@ -88,7 +88,8 @@
line = SCons.Util.to_str(pipe.stdout.readline())
# Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:
# So continue with reading to let the child process actually terminate.
- while SCons.Util.to_str(pipe.stdout.readline()):
+ # We don't need to know the rest of the data, so don't bother decoding.
+ while pipe.stdout.readline():
pass
| {"golden_diff": "diff --git a/SCons/Tool/gcc.py b/SCons/Tool/gcc.py\n--- a/SCons/Tool/gcc.py\n+++ b/SCons/Tool/gcc.py\n@@ -88,7 +88,8 @@\n line = SCons.Util.to_str(pipe.stdout.readline())\n # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:\n # So continue with reading to let the child process actually terminate.\n- while SCons.Util.to_str(pipe.stdout.readline()):\n+ # We don't need to know the rest of the data, so don't bother decoding.\n+ while pipe.stdout.readline():\n pass\n", "issue": "UnicodeDecodeError\nWhy i get error if type help command?\r\nPython 3.8.10\r\nScons 4.2.0\r\n\r\n\nUnicodeDecodeError\nWhy i get error if type help command?\r\nPython 3.8.10\r\nScons 4.2.0\r\n\r\n\n", "before_files": [{"content": "# MIT License\n#\n# Copyright The SCons Foundation\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"SCons.Tool.gcc\n\nTool-specific initialization for gcc.\n\nThere normally shouldn't be any need to import this module directly.\nIt will usually be imported through the generic SCons.Tool.Tool()\nselection method.\n\n\"\"\"\n\nfrom . import cc\nimport re\nimport subprocess\n\nimport SCons.Util\n\ncompilers = ['gcc', 'cc']\n\n\ndef generate(env):\n \"\"\"Add Builders and construction variables for gcc to an Environment.\"\"\"\n\n if 'CC' not in env:\n env['CC'] = env.Detect(compilers) or compilers[0]\n\n cc.generate(env)\n\n if env['PLATFORM'] in ['cygwin', 'win32']:\n env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')\n else:\n env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -fPIC')\n # determine compiler version\n version = detect_version(env, env['CC'])\n if version:\n env['CCVERSION'] = version\n\n\ndef exists(env):\n # is executable, and is a GNU compiler (or accepts '--version' at least)\n return detect_version(env, env.Detect(env.get('CC', compilers)))\n\n\ndef detect_version(env, cc):\n \"\"\"Return the version of the GNU compiler, or None if it is not a GNU compiler.\"\"\"\n version = None\n cc = env.subst(cc)\n if not cc:\n return version\n\n # -dumpversion was added in GCC 3.0. As long as we're supporting\n # GCC versions older than that, we should use --version and a\n # regular expression.\n # pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'],\n pipe=SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['--version'],\n stdin='devnull',\n stderr='devnull',\n stdout=subprocess.PIPE)\n if pipe.wait() != 0:\n return version\n\n with pipe.stdout:\n # -dumpversion variant:\n # line = pipe.stdout.read().strip()\n # --version variant:\n line = SCons.Util.to_str(pipe.stdout.readline())\n # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer:\n # So continue with reading to let the child process actually terminate.\n while SCons.Util.to_str(pipe.stdout.readline()):\n pass\n\n\n # -dumpversion variant:\n # if line:\n # version = line\n # --version variant:\n match = re.search(r'[0-9]+(\\.[0-9]+)+', line)\n if match:\n version = match.group(0)\n\n return version\n\n# Local Variables:\n# tab-width:4\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=4 shiftwidth=4:\n", "path": "SCons/Tool/gcc.py"}]} | 1,814 | 139 |
gh_patches_debug_919 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1603 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transaction admin creates internal server error
</issue>
<code>
[start of akvo/rsr/models/transaction.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from django.db import models
9 from django.utils.translation import ugettext_lazy as _
10
11 from ..fields import ValidXMLCharField
12
13 from akvo.codelists.models import (Currency, DisbursementChannel,TransactionType, Country, Region,
14 RegionVocabulary, Sector, SectorCategory, SectorVocabulary)
15 from akvo.codelists.store.codelists_v201 import (AID_TYPE, CURRENCY, DISBURSEMENT_CHANNEL,
16 FINANCE_TYPE, FLOW_TYPE, TIED_STATUS,
17 TRANSACTION_TYPE, COUNTRY, REGION,
18 REGION_VOCABULARY, SECTOR_VOCABULARY)
19 from akvo.utils import codelist_choices, codelist_value
20
21
22 class Transaction(models.Model):
23 project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='transactions')
24 reference = ValidXMLCharField(
25 _(u'reference'), blank=True, max_length=25,
26 help_text=_(u'Enter a reference for the transaction. (25 characters)')
27 )
28 aid_type = ValidXMLCharField(
29 _(u'aid type'), blank=True, max_length=3, choices=codelist_choices(AID_TYPE)
30 )
31 description = ValidXMLCharField(
32 _(u'description'), max_length=255, blank=True,
33 help_text=_(u'Enter a description for the transaction. (255 characters)')
34 )
35 disbursement_channel = ValidXMLCharField(
36 _(u'disbursement channel'), blank=True, max_length=1,
37 choices=codelist_choices(DISBURSEMENT_CHANNEL)
38 )
39 finance_type = ValidXMLCharField(
40 _(u'finance type'), max_length=3, blank=True, choices=codelist_choices(FINANCE_TYPE)
41 )
42 flow_type = ValidXMLCharField(
43 _(u'flow type'), max_length=2, blank=True, choices=codelist_choices(FLOW_TYPE)
44 )
45 tied_status = ValidXMLCharField(
46 _(u'tied status'), blank=True, max_length=1, choices=codelist_choices(TIED_STATUS)
47 )
48 transaction_date = models.DateField(
49 _(u'transaction date'), blank=True, null=True,
50 help_text=_(u'Enter the financial reporting date that '
51 u'the transaction was/will be undertaken.')
52 )
53 transaction_type = ValidXMLCharField(
54 _(u'transaction type'), blank=True, max_length=2,
55 choices=codelist_choices(TRANSACTION_TYPE),
56 help_text=_(u'Select the type of transaction from the list.')
57 )
58 value = models.DecimalField(
59 _(u'value'), blank=True, null=True, max_digits=11, decimal_places=2,
60 help_text=_(u'Enter the transaction amount.')
61 )
62 value_date = models.DateField(_(u'value date'), blank=True, null=True)
63 currency = ValidXMLCharField(
64 _(u'currency'), blank=True, max_length=3, choices=codelist_choices(CURRENCY)
65 )
66 provider_organisation = models.ForeignKey(
67 'Organisation', verbose_name=_(u'provider organisation'),
68 related_name='providing_transactions', blank=True, null=True, on_delete=models.SET_NULL
69 )
70 provider_organisation_activity = ValidXMLCharField(
71 _(u'provider organisation activity id'), blank=True, max_length=50
72 )
73 receiver_organisation = models.ForeignKey(
74 'Organisation', verbose_name=_(u'receiver organisation'),
75 related_name='receiving_transactions', blank=True, null=True, on_delete=models.SET_NULL
76 )
77 receiver_organisation_activity = ValidXMLCharField(
78 _(u'receiver organisation activity id'), blank=True, max_length=50
79 )
80 recipient_country = ValidXMLCharField(
81 _(u'recipient country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY)
82 )
83 recipient_region = ValidXMLCharField(
84 _(u'recipient region'), blank=True, max_length=3, choices=codelist_choices(REGION)
85 )
86 recipient_region_vocabulary = ValidXMLCharField(
87 _(u'recipient region vocabulary'), blank=True, max_length=1,
88 choices=codelist_choices(REGION_VOCABULARY)
89 )
90
91 def __unicode__(self):
92 return self.value
93
94 def iati_currency(self):
95 return codelist_value(Currency, self, 'currency')
96
97 def iati_transaction_type(self):
98 return codelist_value(TransactionType, self, 'transaction_type')
99
100 def iati_disbursement_channel(self):
101 return codelist_value(DisbursementChannel, self, 'disbursement_channel')
102
103 def iati_recipient_country(self):
104 return codelist_value(Country, self, 'recipient_country')
105
106 def iati_recipient_region(self):
107 return codelist_value(Region, self, 'recipient_region')
108
109 def iati_recipient_region_vocabulary(self):
110 return codelist_value(RegionVocabulary, self, 'recipient_region_vocabulary')
111
112 class Meta:
113 app_label = 'rsr'
114 verbose_name = _(u'transaction')
115 verbose_name_plural = _(u'transactions')
116
117
118 class TransactionSector(models.Model):
119 project = models.ForeignKey(
120 'Transaction', verbose_name=_(u'transaction'), related_name='sectors'
121 )
122 code = ValidXMLCharField(_(u'sector'), blank=True, max_length=5)
123 text = ValidXMLCharField(
124 _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')
125 )
126 vocabulary = ValidXMLCharField(
127 _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)
128 )
129
130 def iati_sector(self):
131 if self.code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):
132 return codelist_value(Sector, self, 'code')
133 elif self.code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):
134 return codelist_value(SectorCategory, self, 'code')
135 else:
136 return self.code
137
138 def iati_vocabulary(self):
139 return codelist_value(SectorVocabulary, self, 'vocabulary')
140
141 class Meta:
142 app_label = 'rsr'
143 verbose_name = _(u'transaction sector')
144 verbose_name_plural = _(u'transaction sectors')
145 unique_together = ('project', 'vocabulary')
146
[end of akvo/rsr/models/transaction.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/models/transaction.py b/akvo/rsr/models/transaction.py
--- a/akvo/rsr/models/transaction.py
+++ b/akvo/rsr/models/transaction.py
@@ -89,7 +89,7 @@
)
def __unicode__(self):
- return self.value
+ return unicode(self.value)
def iati_currency(self):
return codelist_value(Currency, self, 'currency')
| {"golden_diff": "diff --git a/akvo/rsr/models/transaction.py b/akvo/rsr/models/transaction.py\n--- a/akvo/rsr/models/transaction.py\n+++ b/akvo/rsr/models/transaction.py\n@@ -89,7 +89,7 @@\n )\n \n def __unicode__(self):\n- return self.value\n+ return unicode(self.value)\n \n def iati_currency(self):\n return codelist_value(Currency, self, 'currency')\n", "issue": "Transaction admin creates internal server error\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import ValidXMLCharField\n\nfrom akvo.codelists.models import (Currency, DisbursementChannel,TransactionType, Country, Region,\n RegionVocabulary, Sector, SectorCategory, SectorVocabulary)\nfrom akvo.codelists.store.codelists_v201 import (AID_TYPE, CURRENCY, DISBURSEMENT_CHANNEL,\n FINANCE_TYPE, FLOW_TYPE, TIED_STATUS,\n TRANSACTION_TYPE, COUNTRY, REGION,\n REGION_VOCABULARY, SECTOR_VOCABULARY)\nfrom akvo.utils import codelist_choices, codelist_value\n\n\nclass Transaction(models.Model):\n project = models.ForeignKey('Project', verbose_name=_(u'project'), related_name='transactions')\n reference = ValidXMLCharField(\n _(u'reference'), blank=True, max_length=25,\n help_text=_(u'Enter a reference for the transaction. (25 characters)')\n )\n aid_type = ValidXMLCharField(\n _(u'aid type'), blank=True, max_length=3, choices=codelist_choices(AID_TYPE)\n )\n description = ValidXMLCharField(\n _(u'description'), max_length=255, blank=True,\n help_text=_(u'Enter a description for the transaction. (255 characters)')\n )\n disbursement_channel = ValidXMLCharField(\n _(u'disbursement channel'), blank=True, max_length=1,\n choices=codelist_choices(DISBURSEMENT_CHANNEL)\n )\n finance_type = ValidXMLCharField(\n _(u'finance type'), max_length=3, blank=True, choices=codelist_choices(FINANCE_TYPE)\n )\n flow_type = ValidXMLCharField(\n _(u'flow type'), max_length=2, blank=True, choices=codelist_choices(FLOW_TYPE)\n )\n tied_status = ValidXMLCharField(\n _(u'tied status'), blank=True, max_length=1, choices=codelist_choices(TIED_STATUS)\n )\n transaction_date = models.DateField(\n _(u'transaction date'), blank=True, null=True,\n help_text=_(u'Enter the financial reporting date that '\n u'the transaction was/will be undertaken.')\n )\n transaction_type = ValidXMLCharField(\n _(u'transaction type'), blank=True, max_length=2,\n choices=codelist_choices(TRANSACTION_TYPE),\n help_text=_(u'Select the type of transaction from the list.')\n )\n value = models.DecimalField(\n _(u'value'), blank=True, null=True, max_digits=11, decimal_places=2,\n help_text=_(u'Enter the transaction amount.')\n )\n value_date = models.DateField(_(u'value date'), blank=True, null=True)\n currency = ValidXMLCharField(\n _(u'currency'), blank=True, max_length=3, choices=codelist_choices(CURRENCY)\n )\n provider_organisation = models.ForeignKey(\n 'Organisation', verbose_name=_(u'provider organisation'),\n related_name='providing_transactions', blank=True, null=True, on_delete=models.SET_NULL\n )\n provider_organisation_activity = ValidXMLCharField(\n _(u'provider organisation activity id'), blank=True, max_length=50\n )\n receiver_organisation = models.ForeignKey(\n 'Organisation', verbose_name=_(u'receiver organisation'),\n related_name='receiving_transactions', blank=True, null=True, on_delete=models.SET_NULL\n )\n receiver_organisation_activity = ValidXMLCharField(\n _(u'receiver organisation activity id'), blank=True, max_length=50\n )\n recipient_country = ValidXMLCharField(\n _(u'recipient country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY)\n )\n recipient_region = ValidXMLCharField(\n _(u'recipient region'), blank=True, max_length=3, choices=codelist_choices(REGION)\n )\n recipient_region_vocabulary = ValidXMLCharField(\n _(u'recipient region vocabulary'), blank=True, max_length=1,\n choices=codelist_choices(REGION_VOCABULARY)\n )\n\n def __unicode__(self):\n return self.value\n\n def iati_currency(self):\n return codelist_value(Currency, self, 'currency')\n\n def iati_transaction_type(self):\n return codelist_value(TransactionType, self, 'transaction_type')\n\n def iati_disbursement_channel(self):\n return codelist_value(DisbursementChannel, self, 'disbursement_channel')\n\n def iati_recipient_country(self):\n return codelist_value(Country, self, 'recipient_country')\n\n def iati_recipient_region(self):\n return codelist_value(Region, self, 'recipient_region')\n\n def iati_recipient_region_vocabulary(self):\n return codelist_value(RegionVocabulary, self, 'recipient_region_vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'transaction')\n verbose_name_plural = _(u'transactions')\n\n\nclass TransactionSector(models.Model):\n project = models.ForeignKey(\n 'Transaction', verbose_name=_(u'transaction'), related_name='sectors'\n )\n code = ValidXMLCharField(_(u'sector'), blank=True, max_length=5)\n text = ValidXMLCharField(\n _(u'description'), blank=True, max_length=100, help_text=_(u'(max 100 characters)')\n )\n vocabulary = ValidXMLCharField(\n _(u'vocabulary'), blank=True, max_length=5, choices=codelist_choices(SECTOR_VOCABULARY)\n )\n\n def iati_sector(self):\n if self.code and (self.vocabulary == '1' or self.vocabulary == 'DAC'):\n return codelist_value(Sector, self, 'code')\n elif self.code and (self.vocabulary == '2' or self.vocabulary == 'DAC-3'):\n return codelist_value(SectorCategory, self, 'code')\n else:\n return self.code\n\n def iati_vocabulary(self):\n return codelist_value(SectorVocabulary, self, 'vocabulary')\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'transaction sector')\n verbose_name_plural = _(u'transaction sectors')\n unique_together = ('project', 'vocabulary')\n", "path": "akvo/rsr/models/transaction.py"}]} | 2,327 | 104 |
gh_patches_debug_27338 | rasdani/github-patches | git_diff | freedomofpress__securedrop-1306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use upstream version of python-gnupg
See https://github.com/freedomofpress/securedrop/issues/250#issuecomment-86237456 for context. We have worked with the upstream maintainer to fix the issues that were preventing us from using the upstream version with SecureDrop. Now that they are fixed, we should start using the upstream and discontinue our use of the outdated `gnupg-securedrop` [fork](https://pypi.python.org/pypi/gnupg-securedrop/1.2.5-9-g6f9d63a-dirty).
</issue>
<code>
[start of securedrop/crypto_util.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import subprocess
4 from base64 import b32encode
5
6 from Crypto.Random import random
7 import gnupg
8 import scrypt
9
10 import config
11 import store
12
13 # to fix gpg error #78 on production
14 os.environ['USERNAME'] = 'www-data'
15
16 GPG_KEY_TYPE = "RSA"
17 if os.environ.get('SECUREDROP_ENV') == 'test':
18 # Optimize crypto to speed up tests (at the expense of security - DO NOT
19 # use these settings in production)
20 GPG_KEY_LENGTH = 1024
21 SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)
22 else:
23 GPG_KEY_LENGTH = 4096
24 SCRYPT_PARAMS = config.SCRYPT_PARAMS
25
26 SCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER
27 SCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER
28
29 DEFAULT_WORDS_IN_RANDOM_ID = 8
30
31
32 # Make sure these pass before the app can run
33 # TODO: Add more tests
34 def do_runtime_tests():
35 assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)
36 # crash if we don't have srm:
37 try:
38 subprocess.check_call(['srm'], stdout=subprocess.PIPE)
39 except subprocess.CalledProcessError:
40 pass
41
42 do_runtime_tests()
43
44 # HACK: use_agent=True is used to avoid logging noise.
45 #
46 # --use-agent is a dummy option in gpg2, which is the only version of
47 # gpg used by SecureDrop. If use_agent=False, gpg2 prints a warning
48 # message every time it runs because the option is deprecated and has
49 # no effect. This message cannot be silenced even if you change the
50 # --debug-level (controlled via the verbose= keyword argument to the
51 # gnupg.GPG constructor), and creates a lot of logging noise.
52 #
53 # The best solution here would be to avoid passing either --use-agent
54 # or --no-use-agent to gpg2, and I have filed an issue upstream to
55 # address this: https://github.com/isislovecruft/python-gnupg/issues/96
56 gpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR, use_agent=True)
57
58 words = file(config.WORD_LIST).read().split('\n')
59 nouns = file(config.NOUNS).read().split('\n')
60 adjectives = file(config.ADJECTIVES).read().split('\n')
61
62
63 class CryptoException(Exception):
64 pass
65
66
67 def clean(s, also=''):
68 """
69 >>> clean("Hello, world!")
70 Traceback (most recent call last):
71 ...
72 CryptoException: invalid input
73 >>> clean("Helloworld")
74 'Helloworld'
75 """
76 # safe characters for every possible word in the wordlist includes capital
77 # letters because codename hashes are base32-encoded with capital letters
78 ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'
79 for c in s:
80 if c not in ok and c not in also:
81 raise CryptoException("invalid input: %s" % s)
82 # scrypt.hash requires input of type str. Since the wordlist is all ASCII
83 # characters, this conversion is not problematic
84 return str(s)
85
86
87 def genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):
88 return ' '.join(random.choice(words) for x in range(words_in_random_id))
89
90
91 def display_id():
92 return ' '.join([random.choice(adjectives), random.choice(nouns)])
93
94
95 def hash_codename(codename, salt=SCRYPT_ID_PEPPER):
96 """
97 >>> hash_codename('Hello, world!')
98 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'
99 """
100 return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))
101
102
103 def genkeypair(name, secret):
104 """
105 >>> if not gpg.list_keys(hash_codename('randomid')):
106 ... genkeypair(hash_codename('randomid'), 'randomid').type
107 ... else:
108 ... u'P'
109 u'P'
110 """
111 name = clean(name)
112 secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
113 return gpg.gen_key(gpg.gen_key_input(
114 key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
115 passphrase=secret,
116 name_email=name
117 ))
118
119
120 def delete_reply_keypair(source_id):
121 key = getkey(source_id)
122 # If this source was never flagged for review, they won't have a reply keypair
123 if not key:
124 return
125 # The private key needs to be deleted before the public key can be deleted
126 # http://pythonhosted.org/python-gnupg/#deleting-keys
127 gpg.delete_keys(key, True) # private key
128 gpg.delete_keys(key) # public key
129 # TODO: srm?
130
131
132 def getkey(name):
133 for key in gpg.list_keys():
134 for uid in key['uids']:
135 if name in uid:
136 return key['fingerprint']
137 return None
138
139
140 def get_key_by_fingerprint(fingerprint):
141 matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys())
142 return matches[0] if matches else None
143
144
145 def encrypt(plaintext, fingerprints, output=None):
146 # Verify the output path
147 if output:
148 store.verify(output)
149
150 # Remove any spaces from provided fingerprints
151 # GPG outputs fingerprints with spaces for readability, but requires the
152 # spaces to be removed when using fingerprints to specify recipients.
153 if not isinstance(fingerprints, (list, tuple)):
154 fingerprints = [fingerprints, ]
155 fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]
156
157 if isinstance(plaintext, unicode):
158 plaintext = plaintext.encode('utf8')
159
160 encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file
161 out = encrypt_fn(plaintext,
162 *fingerprints,
163 output=output,
164 always_trust=True,
165 armor=False)
166 if out.ok:
167 return out.data
168 else:
169 raise CryptoException(out.stderr)
170
171
172 def decrypt(secret, plain_text):
173 """
174 >>> key = genkeypair('randomid', 'randomid')
175 >>> decrypt('randomid', 'randomid',
176 ... encrypt('randomid', 'Goodbye, cruel world!')
177 ... )
178 'Goodbye, cruel world!'
179 """
180 hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)
181 return gpg.decrypt(plain_text, passphrase=hashed_codename).data
182
183
184 if __name__ == "__main__":
185 import doctest
186 doctest.testmod()
187
[end of securedrop/crypto_util.py]
[start of securedrop/secure_tempfile.py]
1 import base64
2 import os
3 from tempfile import _TemporaryFileWrapper
4
5 from Crypto.Cipher import AES
6 from Crypto.Random import random
7 from Crypto.Util import Counter
8
9
10 class SecureTemporaryFile(_TemporaryFileWrapper):
11 """Temporary file that is ephemerally encrypted on the fly.
12
13 Since only encrypted data is ever written to disk, using this
14 classes minimizes the chances of plaintext recovery through
15 forensic disk analysis.
16
17 Adapted from Globaleaks' GLSecureTemporaryFile: https://github.com/globaleaks/GlobaLeaks/blob/master/backend/globaleaks/security.py#L35
18
19 WARNING: you can't use this like a normal file object. It supports
20 being written to exactly once, then read from exactly once.
21 """
22
23 AES_key_size = 256
24 AES_block_size = 128
25
26 def __init__(self, store_dir):
27 self.last_action = 'init'
28 self.create_key()
29
30 self.tmp_file_id = base64.urlsafe_b64encode(os.urandom(32)).strip('=')
31 self.filepath = os.path.join(store_dir, "{}.aes".format(self.tmp_file_id))
32 self.file = open(self.filepath, 'w+b')
33
34 _TemporaryFileWrapper.__init__(self, self.file, self.filepath, delete=True)
35
36 def create_key(self):
37 """
38 Randomly generate an AES key to encrypt the file
39 """
40 self.key = os.urandom(self.AES_key_size / 8)
41 self.iv = random.getrandbits(self.AES_block_size)
42 self.initialize_cipher()
43
44 def initialize_cipher(self):
45 self.ctr_e = Counter.new(self.AES_block_size, initial_value=self.iv)
46 self.ctr_d = Counter.new(self.AES_block_size, initial_value=self.iv)
47 self.encryptor = AES.new(self.key, AES.MODE_CTR, counter=self.ctr_e)
48 self.decryptor = AES.new(self.key, AES.MODE_CTR, counter=self.ctr_d)
49
50 def write(self, data):
51 """
52 We track the internal status and don't allow writing after reading.
53 It might be possible to be smarter about this.
54 """
55 assert self.last_action != 'read', "You cannot write after read!"
56 self.last_action = 'write'
57
58 try:
59 if isinstance(data, unicode):
60 data = data.encode('utf-8')
61 self.file.write(self.encryptor.encrypt(data))
62 except Exception as err:
63 raise err
64
65 def read(self, count=None):
66 """
67 The first time 'read' is called after a write, automatically seek(0).
68 """
69 if self.last_action == 'write':
70 self.seek(0, 0)
71 self.last_action = 'read'
72
73 if count is None:
74 return self.decryptor.decrypt(self.file.read())
75 else:
76 return self.decryptor.decrypt(self.file.read(count))
77
78 def close(self):
79 return _TemporaryFileWrapper.close(self)
80
[end of securedrop/secure_tempfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py
--- a/securedrop/crypto_util.py
+++ b/securedrop/crypto_util.py
@@ -154,15 +154,11 @@
fingerprints = [fingerprints, ]
fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]
- if isinstance(plaintext, unicode):
- plaintext = plaintext.encode('utf8')
-
- encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file
- out = encrypt_fn(plaintext,
- *fingerprints,
- output=output,
- always_trust=True,
- armor=False)
+ out = gpg.encrypt(plaintext,
+ *fingerprints,
+ output=output,
+ always_trust=True,
+ armor=False)
if out.ok:
return out.data
else:
diff --git a/securedrop/secure_tempfile.py b/securedrop/secure_tempfile.py
--- a/securedrop/secure_tempfile.py
+++ b/securedrop/secure_tempfile.py
@@ -6,6 +6,7 @@
from Crypto.Random import random
from Crypto.Util import Counter
+from gnupg._util import _STREAMLIKE_TYPES
class SecureTemporaryFile(_TemporaryFileWrapper):
"""Temporary file that is ephemerally encrypted on the fly.
@@ -77,3 +78,8 @@
def close(self):
return _TemporaryFileWrapper.close(self)
+
+# python-gnupg will not recognize our SecureTemporaryFile as a stream-like type
+# and will attempt to call encode on it, thinking it's a string-like type. To
+# avoid this we add it the list of stream-like types.
+_STREAMLIKE_TYPES.append(_TemporaryFileWrapper)
| {"golden_diff": "diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py\n--- a/securedrop/crypto_util.py\n+++ b/securedrop/crypto_util.py\n@@ -154,15 +154,11 @@\n fingerprints = [fingerprints, ]\n fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]\n \n- if isinstance(plaintext, unicode):\n- plaintext = plaintext.encode('utf8')\n-\n- encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file\n- out = encrypt_fn(plaintext,\n- *fingerprints,\n- output=output,\n- always_trust=True,\n- armor=False)\n+ out = gpg.encrypt(plaintext,\n+ *fingerprints,\n+ output=output,\n+ always_trust=True,\n+ armor=False)\n if out.ok:\n return out.data\n else:\ndiff --git a/securedrop/secure_tempfile.py b/securedrop/secure_tempfile.py\n--- a/securedrop/secure_tempfile.py\n+++ b/securedrop/secure_tempfile.py\n@@ -6,6 +6,7 @@\n from Crypto.Random import random\n from Crypto.Util import Counter\n \n+from gnupg._util import _STREAMLIKE_TYPES\n \n class SecureTemporaryFile(_TemporaryFileWrapper):\n \"\"\"Temporary file that is ephemerally encrypted on the fly.\n@@ -77,3 +78,8 @@\n \n def close(self):\n return _TemporaryFileWrapper.close(self)\n+\n+# python-gnupg will not recognize our SecureTemporaryFile as a stream-like type\n+# and will attempt to call encode on it, thinking it's a string-like type. To\n+# avoid this we add it the list of stream-like types.\n+_STREAMLIKE_TYPES.append(_TemporaryFileWrapper)\n", "issue": "Use upstream version of python-gnupg\nSee https://github.com/freedomofpress/securedrop/issues/250#issuecomment-86237456 for context. We have worked with the upstream maintainer to fix the issues that were preventing us from using the upstream version with SecureDrop. Now that they are fixed, we should start using the upstream and discontinue our use of the outdated `gnupg-securedrop` [fork](https://pypi.python.org/pypi/gnupg-securedrop/1.2.5-9-g6f9d63a-dirty).\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nfrom base64 import b32encode\n\nfrom Crypto.Random import random\nimport gnupg\nimport scrypt\n\nimport config\nimport store\n\n# to fix gpg error #78 on production\nos.environ['USERNAME'] = 'www-data'\n\nGPG_KEY_TYPE = \"RSA\"\nif os.environ.get('SECUREDROP_ENV') == 'test':\n # Optimize crypto to speed up tests (at the expense of security - DO NOT\n # use these settings in production)\n GPG_KEY_LENGTH = 1024\n SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)\nelse:\n GPG_KEY_LENGTH = 4096\n SCRYPT_PARAMS = config.SCRYPT_PARAMS\n\nSCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER\nSCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER\n\nDEFAULT_WORDS_IN_RANDOM_ID = 8\n\n\n# Make sure these pass before the app can run\n# TODO: Add more tests\ndef do_runtime_tests():\n assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)\n # crash if we don't have srm:\n try:\n subprocess.check_call(['srm'], stdout=subprocess.PIPE)\n except subprocess.CalledProcessError:\n pass\n\ndo_runtime_tests()\n\n# HACK: use_agent=True is used to avoid logging noise.\n#\n# --use-agent is a dummy option in gpg2, which is the only version of\n# gpg used by SecureDrop. If use_agent=False, gpg2 prints a warning\n# message every time it runs because the option is deprecated and has\n# no effect. This message cannot be silenced even if you change the\n# --debug-level (controlled via the verbose= keyword argument to the\n# gnupg.GPG constructor), and creates a lot of logging noise.\n#\n# The best solution here would be to avoid passing either --use-agent\n# or --no-use-agent to gpg2, and I have filed an issue upstream to\n# address this: https://github.com/isislovecruft/python-gnupg/issues/96\ngpg = gnupg.GPG(binary='gpg2', homedir=config.GPG_KEY_DIR, use_agent=True)\n\nwords = file(config.WORD_LIST).read().split('\\n')\nnouns = file(config.NOUNS).read().split('\\n')\nadjectives = file(config.ADJECTIVES).read().split('\\n')\n\n\nclass CryptoException(Exception):\n pass\n\n\ndef clean(s, also=''):\n \"\"\"\n >>> clean(\"Hello, world!\")\n Traceback (most recent call last):\n ...\n CryptoException: invalid input\n >>> clean(\"Helloworld\")\n 'Helloworld'\n \"\"\"\n # safe characters for every possible word in the wordlist includes capital\n # letters because codename hashes are base32-encoded with capital letters\n ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n for c in s:\n if c not in ok and c not in also:\n raise CryptoException(\"invalid input: %s\" % s)\n # scrypt.hash requires input of type str. Since the wordlist is all ASCII\n # characters, this conversion is not problematic\n return str(s)\n\n\ndef genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):\n return ' '.join(random.choice(words) for x in range(words_in_random_id))\n\n\ndef display_id():\n return ' '.join([random.choice(adjectives), random.choice(nouns)])\n\n\ndef hash_codename(codename, salt=SCRYPT_ID_PEPPER):\n \"\"\"\n >>> hash_codename('Hello, world!')\n 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'\n \"\"\"\n return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))\n\n\ndef genkeypair(name, secret):\n \"\"\"\n >>> if not gpg.list_keys(hash_codename('randomid')):\n ... genkeypair(hash_codename('randomid'), 'randomid').type\n ... else:\n ... u'P'\n u'P'\n \"\"\"\n name = clean(name)\n secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.gen_key(gpg.gen_key_input(\n key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,\n passphrase=secret,\n name_email=name\n ))\n\n\ndef delete_reply_keypair(source_id):\n key = getkey(source_id)\n # If this source was never flagged for review, they won't have a reply keypair\n if not key:\n return\n # The private key needs to be deleted before the public key can be deleted\n # http://pythonhosted.org/python-gnupg/#deleting-keys\n gpg.delete_keys(key, True) # private key\n gpg.delete_keys(key) # public key\n # TODO: srm?\n\n\ndef getkey(name):\n for key in gpg.list_keys():\n for uid in key['uids']:\n if name in uid:\n return key['fingerprint']\n return None\n\n\ndef get_key_by_fingerprint(fingerprint):\n matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys())\n return matches[0] if matches else None\n\n\ndef encrypt(plaintext, fingerprints, output=None):\n # Verify the output path\n if output:\n store.verify(output)\n\n # Remove any spaces from provided fingerprints\n # GPG outputs fingerprints with spaces for readability, but requires the\n # spaces to be removed when using fingerprints to specify recipients.\n if not isinstance(fingerprints, (list, tuple)):\n fingerprints = [fingerprints, ]\n fingerprints = [fpr.replace(' ', '') for fpr in fingerprints]\n\n if isinstance(plaintext, unicode):\n plaintext = plaintext.encode('utf8')\n\n encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file\n out = encrypt_fn(plaintext,\n *fingerprints,\n output=output,\n always_trust=True,\n armor=False)\n if out.ok:\n return out.data\n else:\n raise CryptoException(out.stderr)\n\n\ndef decrypt(secret, plain_text):\n \"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> decrypt('randomid', 'randomid',\n ... encrypt('randomid', 'Goodbye, cruel world!')\n ... )\n 'Goodbye, cruel world!'\n \"\"\"\n hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.decrypt(plain_text, passphrase=hashed_codename).data\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "securedrop/crypto_util.py"}, {"content": "import base64\nimport os\nfrom tempfile import _TemporaryFileWrapper\n\nfrom Crypto.Cipher import AES\nfrom Crypto.Random import random\nfrom Crypto.Util import Counter\n\n\nclass SecureTemporaryFile(_TemporaryFileWrapper):\n \"\"\"Temporary file that is ephemerally encrypted on the fly.\n\n Since only encrypted data is ever written to disk, using this\n classes minimizes the chances of plaintext recovery through\n forensic disk analysis.\n\n Adapted from Globaleaks' GLSecureTemporaryFile: https://github.com/globaleaks/GlobaLeaks/blob/master/backend/globaleaks/security.py#L35\n\n WARNING: you can't use this like a normal file object. It supports\n being written to exactly once, then read from exactly once.\n \"\"\"\n\n AES_key_size = 256\n AES_block_size = 128\n\n def __init__(self, store_dir):\n self.last_action = 'init'\n self.create_key()\n\n self.tmp_file_id = base64.urlsafe_b64encode(os.urandom(32)).strip('=')\n self.filepath = os.path.join(store_dir, \"{}.aes\".format(self.tmp_file_id))\n self.file = open(self.filepath, 'w+b')\n\n _TemporaryFileWrapper.__init__(self, self.file, self.filepath, delete=True)\n\n def create_key(self):\n \"\"\"\n Randomly generate an AES key to encrypt the file\n \"\"\"\n self.key = os.urandom(self.AES_key_size / 8)\n self.iv = random.getrandbits(self.AES_block_size)\n self.initialize_cipher()\n\n def initialize_cipher(self):\n self.ctr_e = Counter.new(self.AES_block_size, initial_value=self.iv)\n self.ctr_d = Counter.new(self.AES_block_size, initial_value=self.iv)\n self.encryptor = AES.new(self.key, AES.MODE_CTR, counter=self.ctr_e)\n self.decryptor = AES.new(self.key, AES.MODE_CTR, counter=self.ctr_d)\n\n def write(self, data):\n \"\"\"\n We track the internal status and don't allow writing after reading.\n It might be possible to be smarter about this.\n \"\"\"\n assert self.last_action != 'read', \"You cannot write after read!\"\n self.last_action = 'write'\n\n try:\n if isinstance(data, unicode):\n data = data.encode('utf-8')\n self.file.write(self.encryptor.encrypt(data))\n except Exception as err:\n raise err\n\n def read(self, count=None):\n \"\"\"\n The first time 'read' is called after a write, automatically seek(0).\n \"\"\"\n if self.last_action == 'write':\n self.seek(0, 0)\n self.last_action = 'read'\n\n if count is None:\n return self.decryptor.decrypt(self.file.read())\n else:\n return self.decryptor.decrypt(self.file.read(count))\n\n def close(self):\n return _TemporaryFileWrapper.close(self)\n", "path": "securedrop/secure_tempfile.py"}]} | 3,489 | 389 |
gh_patches_debug_23911 | rasdani/github-patches | git_diff | pymedusa__Medusa-10131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update PMS library not working
Hi,
All boxes from Notifications -> Plex Media server are completed with the correct information (token, user, password, server ip:port) and I`m receiving the blue box with (error trying to update plex).
What is surprisingly, when using the Test Plex Media Server, the test is ok even if the Token field is empty.
Error received:
Thread_2 :: [] PLEX: Unauthorized. Please set TOKEN or USERNAME and PASSWORD in Plex settings
</issue>
<code>
[start of medusa/server/api/v2/notifications.py]
1 # coding=utf-8
2 """Request handler for notifications data."""
3 from __future__ import unicode_literals
4
5
6 import logging
7 import re
8
9 from medusa import app, notifiers, ui
10 from medusa.logger.adapters.style import BraceAdapter
11 from medusa.server.api.v2.base import BaseRequestHandler
12 from medusa.tv.series import Series, SeriesIdentifier
13
14 log = BraceAdapter(logging.getLogger(__name__))
15 log.logger.addHandler(logging.NullHandler())
16
17
18 class NotificationsHandler(BaseRequestHandler):
19 """Notifications data request handler."""
20
21 #: resource name
22 name = 'notifications'
23 #: identifier
24 identifier = ('resource', r'\w+')
25 #: path param
26 path_param = ('path_param', r'\w+')
27 #: allowed HTTP methods
28 allowed_methods = ('GET', 'POST')
29
30 def post(self, resource, path_param=None):
31 """Post Notifications actions for a specific external source.
32
33 :param resource: a resource name
34 :param path_param:
35 :type path_param: str
36 """
37 if resource is None:
38 return self._bad_request('You must provide a notifications resource name')
39
40 available_resources = (
41 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt'
42 )
43
44 if resource not in available_resources:
45 return self._bad_request(f"Resource must be one of {', '.join(available_resources)}")
46
47 # Convert 'camelCase' to 'resource_snake_case'
48 resource_function_name = resource + '_' + re.sub('([A-Z]+)', r'_\1', path_param).lower()
49 resource_function = getattr(self, resource_function_name, None)
50
51 if resource_function is None:
52 log.error('Unable to get function "{func}" for resource "{resource}"',
53 {'func': resource_function_name, 'resource': path_param})
54 return self._bad_request('{key} is a invalid resource'.format(key=path_param))
55
56 return resource_function()
57
58 def kodi_update(self):
59 """Update kodi's show library."""
60 if app.KODI_UPDATE_ONLYFIRST:
61 host = app.KODI_HOST[0].strip()
62 else:
63 host = ', '.join(app.KODI_HOST)
64
65 if notifiers.kodi_notifier.update_library():
66 ui.notifications.message(f'Library update command sent to KODI host(s): {host}')
67 else:
68 ui.notifications.error(f'Unable to contact one or more KODI host(s): {host}')
69
70 return self._created()
71
72 def emby_update(self):
73 """Update emby's show library."""
74 show_slug = self.get_argument('showslug', '')
75 show = None
76
77 if show_slug:
78 show_identifier = SeriesIdentifier.from_slug(show_slug)
79 if not show_identifier:
80 return self._bad_request('Invalid show slug')
81
82 show = Series.find_by_identifier(show_identifier)
83 if not show:
84 return self._not_found('Series not found')
85
86 if notifiers.emby_notifier.update_library(show):
87 ui.notifications.message(f'Library update command sent to Emby host: {app.EMBY_HOST}')
88 else:
89 ui.notifications.error(f'Unable to contact Emby host: {app.EMBY_HOST}')
90
91 return self._created()
92
93 def plex_update(self):
94 """Update plex's show library."""
95 if None is notifiers.plex_notifier.update_library():
96 ui.notifications.message(
97 'Library update command sent to Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))
98 else:
99 ui.notifications.error('Unable to contact Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))
100
101 return self._created()
102
[end of medusa/server/api/v2/notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/medusa/server/api/v2/notifications.py b/medusa/server/api/v2/notifications.py
--- a/medusa/server/api/v2/notifications.py
+++ b/medusa/server/api/v2/notifications.py
@@ -38,7 +38,7 @@
return self._bad_request('You must provide a notifications resource name')
available_resources = (
- 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt'
+ 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt', 'plex'
)
if resource not in available_resources:
@@ -92,10 +92,10 @@
def plex_update(self):
"""Update plex's show library."""
- if None is notifiers.plex_notifier.update_library():
+ if not notifiers.plex_notifier.update_library():
ui.notifications.message(
- 'Library update command sent to Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))
+ f"Library update command sent to Plex Media Server host: {', '.join(app.PLEX_SERVER_HOST)}")
else:
- ui.notifications.error('Unable to contact Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))
+ ui.notifications.error(f"Unable to contact Plex Media Server host: {', '.join(app.PLEX_SERVER_HOST)}")
return self._created()
| {"golden_diff": "diff --git a/medusa/server/api/v2/notifications.py b/medusa/server/api/v2/notifications.py\n--- a/medusa/server/api/v2/notifications.py\n+++ b/medusa/server/api/v2/notifications.py\n@@ -38,7 +38,7 @@\n return self._bad_request('You must provide a notifications resource name')\n \n available_resources = (\n- 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt'\n+ 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt', 'plex'\n )\n \n if resource not in available_resources:\n@@ -92,10 +92,10 @@\n \n def plex_update(self):\n \"\"\"Update plex's show library.\"\"\"\n- if None is notifiers.plex_notifier.update_library():\n+ if not notifiers.plex_notifier.update_library():\n ui.notifications.message(\n- 'Library update command sent to Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))\n+ f\"Library update command sent to Plex Media Server host: {', '.join(app.PLEX_SERVER_HOST)}\")\n else:\n- ui.notifications.error('Unable to contact Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))\n+ ui.notifications.error(f\"Unable to contact Plex Media Server host: {', '.join(app.PLEX_SERVER_HOST)}\")\n \n return self._created()\n", "issue": "Update PMS library not working\nHi,\r\n\r\nAll boxes from Notifications -> Plex Media server are completed with the correct information (token, user, password, server ip:port) and I`m receiving the blue box with (error trying to update plex).\r\nWhat is surprisingly, when using the Test Plex Media Server, the test is ok even if the Token field is empty.\r\n\r\nError received:\r\nThread_2 :: [] PLEX: Unauthorized. Please set TOKEN or USERNAME and PASSWORD in Plex settings\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Request handler for notifications data.\"\"\"\nfrom __future__ import unicode_literals\n\n\nimport logging\nimport re\n\nfrom medusa import app, notifiers, ui\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.server.api.v2.base import BaseRequestHandler\nfrom medusa.tv.series import Series, SeriesIdentifier\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass NotificationsHandler(BaseRequestHandler):\n \"\"\"Notifications data request handler.\"\"\"\n\n #: resource name\n name = 'notifications'\n #: identifier\n identifier = ('resource', r'\\w+')\n #: path param\n path_param = ('path_param', r'\\w+')\n #: allowed HTTP methods\n allowed_methods = ('GET', 'POST')\n\n def post(self, resource, path_param=None):\n \"\"\"Post Notifications actions for a specific external source.\n\n :param resource: a resource name\n :param path_param:\n :type path_param: str\n \"\"\"\n if resource is None:\n return self._bad_request('You must provide a notifications resource name')\n\n available_resources = (\n 'kodi', 'plexserver', 'plexhome', 'emby', 'nmj', 'nmjv2', 'trakt'\n )\n\n if resource not in available_resources:\n return self._bad_request(f\"Resource must be one of {', '.join(available_resources)}\")\n\n # Convert 'camelCase' to 'resource_snake_case'\n resource_function_name = resource + '_' + re.sub('([A-Z]+)', r'_\\1', path_param).lower()\n resource_function = getattr(self, resource_function_name, None)\n\n if resource_function is None:\n log.error('Unable to get function \"{func}\" for resource \"{resource}\"',\n {'func': resource_function_name, 'resource': path_param})\n return self._bad_request('{key} is a invalid resource'.format(key=path_param))\n\n return resource_function()\n\n def kodi_update(self):\n \"\"\"Update kodi's show library.\"\"\"\n if app.KODI_UPDATE_ONLYFIRST:\n host = app.KODI_HOST[0].strip()\n else:\n host = ', '.join(app.KODI_HOST)\n\n if notifiers.kodi_notifier.update_library():\n ui.notifications.message(f'Library update command sent to KODI host(s): {host}')\n else:\n ui.notifications.error(f'Unable to contact one or more KODI host(s): {host}')\n\n return self._created()\n\n def emby_update(self):\n \"\"\"Update emby's show library.\"\"\"\n show_slug = self.get_argument('showslug', '')\n show = None\n\n if show_slug:\n show_identifier = SeriesIdentifier.from_slug(show_slug)\n if not show_identifier:\n return self._bad_request('Invalid show slug')\n\n show = Series.find_by_identifier(show_identifier)\n if not show:\n return self._not_found('Series not found')\n\n if notifiers.emby_notifier.update_library(show):\n ui.notifications.message(f'Library update command sent to Emby host: {app.EMBY_HOST}')\n else:\n ui.notifications.error(f'Unable to contact Emby host: {app.EMBY_HOST}')\n\n return self._created()\n\n def plex_update(self):\n \"\"\"Update plex's show library.\"\"\"\n if None is notifiers.plex_notifier.update_library():\n ui.notifications.message(\n 'Library update command sent to Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))\n else:\n ui.notifications.error('Unable to contact Plex Media Server host: {host}'.format(host=', '.join(app.PLEX_SERVER_HOST)))\n\n return self._created()\n", "path": "medusa/server/api/v2/notifications.py"}]} | 1,642 | 340 |
gh_patches_debug_529 | rasdani/github-patches | git_diff | pyscript__pyscript-1941 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Py Terminal issues aggregation
### Checklist
- [X] I added a descriptive title
- [X] I searched for other issues and couldn't find a solution or duplication
- [X] I already searched in Google and didn't find any good information or help
### What happened?
We have at least 3 issues in the current PyTerminal:
- [x] errors (on main?) are actually not shown
- [x] escape chars, **only on main**, are not recognized
- [x] the config relative URL is resolved differently between main and worker ... this is probably not strictly related to PyTerminal but it's surely worth fixing this too because otherwise terminals can't easily switch from main to worker and/or vice-versa (see https://github.com/pyscript/polyscript/issues/77)
### What browsers are you seeing the problem on? (if applicable)
_No response_
### Console info
_No response_
### Additional Context
_No response_
</issue>
<code>
[start of pyscript.core/src/stdlib/pyscript/magic_js.py]
1 import sys
2
3 import js as globalThis
4 from polyscript import js_modules
5 from pyscript.util import NotSupported
6
7 RUNNING_IN_WORKER = not hasattr(globalThis, "document")
8
9
10 # allow `from pyscript.js_modules.xxx import yyy`
11 class JSModule(object):
12 def __init__(self, name):
13 self.name = name
14
15 def __getattr__(self, field):
16 # avoid pyodide looking for non existent fields
17 if not field.startswith("_"):
18 return getattr(getattr(js_modules, self.name), field)
19
20
21 # generate N modules in the system that will proxy the real value
22 for name in globalThis.Reflect.ownKeys(js_modules):
23 sys.modules[f"pyscript.js_modules.{name}"] = JSModule(name)
24 sys.modules["pyscript.js_modules"] = js_modules
25
26 if RUNNING_IN_WORKER:
27 import js
28 import polyscript
29
30 PyWorker = NotSupported(
31 "pyscript.PyWorker",
32 "pyscript.PyWorker works only when running in the main thread",
33 )
34 window = polyscript.xworker.window
35 document = window.document
36 js.document = document
37 sync = polyscript.xworker.sync
38
39 # in workers the display does not have a default ID
40 # but there is a sync utility from xworker
41 def current_target():
42 return polyscript.target
43
44 else:
45 import _pyscript
46 from _pyscript import PyWorker
47
48 window = globalThis
49 document = globalThis.document
50 sync = NotSupported(
51 "pyscript.sync", "pyscript.sync works only when running in a worker"
52 )
53
54 # in MAIN the current element target exist, just use it
55 def current_target():
56 return _pyscript.target
57
[end of pyscript.core/src/stdlib/pyscript/magic_js.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyscript.core/src/stdlib/pyscript/magic_js.py b/pyscript.core/src/stdlib/pyscript/magic_js.py
--- a/pyscript.core/src/stdlib/pyscript/magic_js.py
+++ b/pyscript.core/src/stdlib/pyscript/magic_js.py
@@ -8,7 +8,7 @@
# allow `from pyscript.js_modules.xxx import yyy`
-class JSModule(object):
+class JSModule:
def __init__(self, name):
self.name = name
| {"golden_diff": "diff --git a/pyscript.core/src/stdlib/pyscript/magic_js.py b/pyscript.core/src/stdlib/pyscript/magic_js.py\n--- a/pyscript.core/src/stdlib/pyscript/magic_js.py\n+++ b/pyscript.core/src/stdlib/pyscript/magic_js.py\n@@ -8,7 +8,7 @@\n \n \n # allow `from pyscript.js_modules.xxx import yyy`\n-class JSModule(object):\n+class JSModule:\n def __init__(self, name):\n self.name = name\n", "issue": "Py Terminal issues aggregation\n### Checklist\r\n\r\n- [X] I added a descriptive title\r\n- [X] I searched for other issues and couldn't find a solution or duplication\r\n- [X] I already searched in Google and didn't find any good information or help\r\n\r\n### What happened?\r\n\r\nWe have at least 3 issues in the current PyTerminal:\r\n\r\n- [x] errors (on main?) are actually not shown\r\n- [x] escape chars, **only on main**, are not recognized\r\n- [x] the config relative URL is resolved differently between main and worker ... this is probably not strictly related to PyTerminal but it's surely worth fixing this too because otherwise terminals can't easily switch from main to worker and/or vice-versa (see https://github.com/pyscript/polyscript/issues/77)\r\n\r\n### What browsers are you seeing the problem on? (if applicable)\r\n\r\n_No response_\r\n\r\n### Console info\r\n\r\n_No response_\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "import sys\n\nimport js as globalThis\nfrom polyscript import js_modules\nfrom pyscript.util import NotSupported\n\nRUNNING_IN_WORKER = not hasattr(globalThis, \"document\")\n\n\n# allow `from pyscript.js_modules.xxx import yyy`\nclass JSModule(object):\n def __init__(self, name):\n self.name = name\n\n def __getattr__(self, field):\n # avoid pyodide looking for non existent fields\n if not field.startswith(\"_\"):\n return getattr(getattr(js_modules, self.name), field)\n\n\n# generate N modules in the system that will proxy the real value\nfor name in globalThis.Reflect.ownKeys(js_modules):\n sys.modules[f\"pyscript.js_modules.{name}\"] = JSModule(name)\nsys.modules[\"pyscript.js_modules\"] = js_modules\n\nif RUNNING_IN_WORKER:\n import js\n import polyscript\n\n PyWorker = NotSupported(\n \"pyscript.PyWorker\",\n \"pyscript.PyWorker works only when running in the main thread\",\n )\n window = polyscript.xworker.window\n document = window.document\n js.document = document\n sync = polyscript.xworker.sync\n\n # in workers the display does not have a default ID\n # but there is a sync utility from xworker\n def current_target():\n return polyscript.target\n\nelse:\n import _pyscript\n from _pyscript import PyWorker\n\n window = globalThis\n document = globalThis.document\n sync = NotSupported(\n \"pyscript.sync\", \"pyscript.sync works only when running in a worker\"\n )\n\n # in MAIN the current element target exist, just use it\n def current_target():\n return _pyscript.target\n", "path": "pyscript.core/src/stdlib/pyscript/magic_js.py"}]} | 1,243 | 117 |
gh_patches_debug_36418 | rasdani/github-patches | git_diff | conan-io__conan-center-index-5322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[request] <pkgconf>/<1.7.4>
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
The pkgconf recipe retrieves the sources from [https://distfiles.dereferenced.org/pkgconf](https://distfiles.dereferenced.org/pkgconf) but a git repo is available: https://github.com/pkgconf/pkgconf/tree/pkgconf-1.7.4
The distfiles website is reporting an expired SSL cerrtificate if I try to install pkgconf/1.7.3
</issue>
<code>
[start of recipes/pkgconf/all/conanfile.py]
1 from conans import ConanFile, Meson, tools
2 import os
3
4 required_conan_version = ">= 1.29.1"
5
6
7 class PkgConfConan(ConanFile):
8 name = "pkgconf"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://git.sr.ht/~kaniini/pkgconf"
11 topics = ("conan", "pkgconf")
12 settings = "os", "arch", "compiler", "build_type"
13 license = "ISC"
14 description = "package compiler and linker metadata toolkit"
15 exports_sources = "patches/**"
16 options = {
17 "shared": [True, False],
18 "fPIC": [True, False],
19 }
20 default_options = {
21 "shared": False,
22 "fPIC": True,
23 }
24
25 _meson = None
26
27 @property
28 def _source_subfolder(self):
29 return "source_subfolder"
30
31 @property
32 def _build_subfolder(self):
33 return "build_subfolder"
34
35 def config_options(self):
36 if self.settings.os == "Windows":
37 del self.options.fPIC
38
39 def configure(self):
40 if self.options.shared:
41 del self.options.fPIC
42 del self.settings.compiler.libcxx
43 del self.settings.compiler.cppstd
44
45 def source(self):
46 tools.get(**self.conan_data["sources"][self.version])
47 os.rename("pkgconf-{}".format(self.version), self._source_subfolder)
48
49 def build_requirements(self):
50 self.build_requires("meson/0.56.2")
51
52 @property
53 def _sharedstatedir(self):
54 return os.path.join(self.package_folder, "bin", "share")
55
56 def _configure_meson(self):
57 if self._meson:
58 return self._meson
59 self._meson = Meson(self)
60 self._meson.options["tests"] = False
61 self._meson.options["sharedstatedir"] = self._sharedstatedir
62 self._meson.configure(source_folder=self._source_subfolder, build_folder=self._build_subfolder)
63 return self._meson
64
65 def _patch_sources(self):
66 for patch in self.conan_data["patches"][self.version]:
67 tools.patch(**patch)
68 tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
69 "shared_library(", "library(")
70 if not self.options.shared:
71 tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
72 "'-DLIBPKGCONF_EXPORT'",
73 "'-DPKGCONFIG_IS_STATIC'")
74
75 def build(self):
76 self._patch_sources()
77 meson = self._configure_meson()
78 meson.build()
79
80 def package(self):
81 self.copy("COPYING", src=self._source_subfolder, dst="licenses")
82 meson = self._meson
83 meson.install()
84
85 if self.settings.compiler == "Visual Studio":
86 tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
87 if not self.options.shared:
88 os.rename(os.path.join(self.package_folder, "lib", "libpkgconf.a"),
89 os.path.join(self.package_folder, "lib", "pkgconf.lib"),)
90
91 tools.rmdir(os.path.join(self.package_folder, "share", "man"))
92 os.rename(os.path.join(self.package_folder, "share", "aclocal"),
93 os.path.join(self.package_folder, "bin", "aclocal"))
94 tools.rmdir(os.path.join(self.package_folder, "share"))
95
96 def package_info(self):
97 self.cpp_info.includedirs.append(os.path.join("include", "libpkgconf"))
98 self.cpp_info.libs = ["pkgconf"]
99 if not self.options.shared:
100 self.cpp_info.defines = ["PKGCONFIG_IS_STATIC"]
101
102 bindir = os.path.join(self.package_folder, "bin")
103 self.output.info("Appending PATH env var: {}".format(bindir))
104 self.env_info.PATH.append(bindir)
105
106 exesuffix = ".exe" if self.settings.os == "Windows" else ""
107 pkg_config = os.path.join(bindir, "pkgconf" + exesuffix).replace("\\", "/")
108 self.output.info("Setting PKG_CONFIG env var: {}".format(pkg_config))
109 self.env_info.PKG_CONFIG = pkg_config
110
111 automake_extra_includes = tools.unix_path(os.path.join(self.package_folder , "bin", "aclocal").replace("\\", "/"))
112 self.output.info("Appending AUTOMAKE_CONAN_INCLUDES env var: {}".format(automake_extra_includes))
113 self.env_info.AUTOMAKE_CONAN_INCLUDES.append(automake_extra_includes)
114
[end of recipes/pkgconf/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/pkgconf/all/conanfile.py b/recipes/pkgconf/all/conanfile.py
--- a/recipes/pkgconf/all/conanfile.py
+++ b/recipes/pkgconf/all/conanfile.py
@@ -47,7 +47,7 @@
os.rename("pkgconf-{}".format(self.version), self._source_subfolder)
def build_requirements(self):
- self.build_requires("meson/0.56.2")
+ self.build_requires("meson/0.57.2")
@property
def _sharedstatedir(self):
@@ -65,12 +65,16 @@
def _patch_sources(self):
for patch in self.conan_data["patches"][self.version]:
tools.patch(**patch)
- tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
- "shared_library(", "library(")
+ if tools.Version(self.version) < "1.7.4":
+ tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
+ "shared_library(", "library(")
if not self.options.shared:
tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
"'-DLIBPKGCONF_EXPORT'",
"'-DPKGCONFIG_IS_STATIC'")
+ tools.replace_in_file(os.path.join(self._source_subfolder, "meson.build"),
+ "project('pkgconf', 'c',",
+ "project('pkgconf', 'c',\ndefault_options : ['c_std=gnu99'],")
def build(self):
self._patch_sources()
@@ -92,9 +96,14 @@
os.rename(os.path.join(self.package_folder, "share", "aclocal"),
os.path.join(self.package_folder, "bin", "aclocal"))
tools.rmdir(os.path.join(self.package_folder, "share"))
+ tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
def package_info(self):
- self.cpp_info.includedirs.append(os.path.join("include", "libpkgconf"))
+ if tools.Version(self.version) < "1.7.4":
+ self.cpp_info.includedirs.append("include")
+ else:
+ self.cpp_info.includedirs.append(os.path.join("include", "pkgconf"))
+ self.cpp_info.names["pkg_config"] = "libpkgconf"
self.cpp_info.libs = ["pkgconf"]
if not self.options.shared:
self.cpp_info.defines = ["PKGCONFIG_IS_STATIC"]
| {"golden_diff": "diff --git a/recipes/pkgconf/all/conanfile.py b/recipes/pkgconf/all/conanfile.py\n--- a/recipes/pkgconf/all/conanfile.py\n+++ b/recipes/pkgconf/all/conanfile.py\n@@ -47,7 +47,7 @@\n os.rename(\"pkgconf-{}\".format(self.version), self._source_subfolder)\n \n def build_requirements(self):\n- self.build_requires(\"meson/0.56.2\")\n+ self.build_requires(\"meson/0.57.2\")\n \n @property\n def _sharedstatedir(self):\n@@ -65,12 +65,16 @@\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n- tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n- \"shared_library(\", \"library(\")\n+ if tools.Version(self.version) < \"1.7.4\":\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n+ \"shared_library(\", \"library(\")\n if not self.options.shared:\n tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n \"'-DLIBPKGCONF_EXPORT'\",\n \"'-DPKGCONFIG_IS_STATIC'\")\n+ tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n+ \"project('pkgconf', 'c',\",\n+ \"project('pkgconf', 'c',\\ndefault_options : ['c_std=gnu99'],\")\n \n def build(self):\n self._patch_sources()\n@@ -92,9 +96,14 @@\n os.rename(os.path.join(self.package_folder, \"share\", \"aclocal\"),\n os.path.join(self.package_folder, \"bin\", \"aclocal\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n+ tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n \n def package_info(self):\n- self.cpp_info.includedirs.append(os.path.join(\"include\", \"libpkgconf\"))\n+ if tools.Version(self.version) < \"1.7.4\":\n+ self.cpp_info.includedirs.append(\"include\")\n+ else:\n+ self.cpp_info.includedirs.append(os.path.join(\"include\", \"pkgconf\"))\n+ self.cpp_info.names[\"pkg_config\"] = \"libpkgconf\"\n self.cpp_info.libs = [\"pkgconf\"]\n if not self.options.shared:\n self.cpp_info.defines = [\"PKGCONFIG_IS_STATIC\"]\n", "issue": "[request] <pkgconf>/<1.7.4>\nThe above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.\r\n\r\nThe pkgconf recipe retrieves the sources from [https://distfiles.dereferenced.org/pkgconf](https://distfiles.dereferenced.org/pkgconf) but a git repo is available: https://github.com/pkgconf/pkgconf/tree/pkgconf-1.7.4 \r\n\r\nThe distfiles website is reporting an expired SSL cerrtificate if I try to install pkgconf/1.7.3\r\n\n", "before_files": [{"content": "from conans import ConanFile, Meson, tools\nimport os\n\nrequired_conan_version = \">= 1.29.1\"\n\n\nclass PkgConfConan(ConanFile):\n name = \"pkgconf\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://git.sr.ht/~kaniini/pkgconf\"\n topics = (\"conan\", \"pkgconf\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n license = \"ISC\"\n description = \"package compiler and linker metadata toolkit\"\n exports_sources = \"patches/**\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n }\n\n _meson = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"pkgconf-{}\".format(self.version), self._source_subfolder)\n\n def build_requirements(self):\n self.build_requires(\"meson/0.56.2\")\n\n @property\n def _sharedstatedir(self):\n return os.path.join(self.package_folder, \"bin\", \"share\")\n\n def _configure_meson(self):\n if self._meson:\n return self._meson\n self._meson = Meson(self)\n self._meson.options[\"tests\"] = False\n self._meson.options[\"sharedstatedir\"] = self._sharedstatedir\n self._meson.configure(source_folder=self._source_subfolder, build_folder=self._build_subfolder)\n return self._meson\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n \"shared_library(\", \"library(\")\n if not self.options.shared:\n tools.replace_in_file(os.path.join(self._source_subfolder, \"meson.build\"),\n \"'-DLIBPKGCONF_EXPORT'\",\n \"'-DPKGCONFIG_IS_STATIC'\")\n\n def build(self):\n self._patch_sources()\n meson = self._configure_meson()\n meson.build()\n\n def package(self):\n self.copy(\"COPYING\", src=self._source_subfolder, dst=\"licenses\")\n meson = self._meson\n meson.install()\n\n if self.settings.compiler == \"Visual Studio\":\n tools.remove_files_by_mask(os.path.join(self.package_folder, \"bin\"), \"*.pdb\")\n if not self.options.shared:\n os.rename(os.path.join(self.package_folder, \"lib\", \"libpkgconf.a\"),\n os.path.join(self.package_folder, \"lib\", \"pkgconf.lib\"),)\n\n tools.rmdir(os.path.join(self.package_folder, \"share\", \"man\"))\n os.rename(os.path.join(self.package_folder, \"share\", \"aclocal\"),\n os.path.join(self.package_folder, \"bin\", \"aclocal\"))\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"libpkgconf\"))\n self.cpp_info.libs = [\"pkgconf\"]\n if not self.options.shared:\n self.cpp_info.defines = [\"PKGCONFIG_IS_STATIC\"]\n\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n\n exesuffix = \".exe\" if self.settings.os == \"Windows\" else \"\"\n pkg_config = os.path.join(bindir, \"pkgconf\" + exesuffix).replace(\"\\\\\", \"/\")\n self.output.info(\"Setting PKG_CONFIG env var: {}\".format(pkg_config))\n self.env_info.PKG_CONFIG = pkg_config\n\n automake_extra_includes = tools.unix_path(os.path.join(self.package_folder , \"bin\", \"aclocal\").replace(\"\\\\\", \"/\"))\n self.output.info(\"Appending AUTOMAKE_CONAN_INCLUDES env var: {}\".format(automake_extra_includes))\n self.env_info.AUTOMAKE_CONAN_INCLUDES.append(automake_extra_includes)\n", "path": "recipes/pkgconf/all/conanfile.py"}]} | 1,912 | 563 |
gh_patches_debug_181 | rasdani/github-patches | git_diff | unionai-oss__pandera-1419 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Date type not exported
**Describe the bug**
In the `__all__` list [here](https://github.com/unionai-oss/pandera/blob/37c24d94ae719dcf4cdc36d1f204478539fce74a/pandera/__init__.py#L104-L106), the type `Date` is missing, causing complaints from mypy if you refer to the type as `pa.Date` -- you have to fully qualify it as `pa.typing.common.Date`.
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of pandera.
- [x] (optional) I have confirmed this bug exists on the master branch of pandera.
**Note**: Please read [this guide](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your bug.
#### Code Sample, a copy-pastable example
```python
import pandera as pa
# Mypy errors [name-defined]
class ErrorSchema(pa.DataFrameModel):
date_col: pa.Date
# Mypy is happy
class NoErrorSchema(pa.DataFrameModel):
date_col: pa.typing.common.Date
```
#### Expected behavior
No errors from mypy in both cases
#### Desktop (please complete the following information):
- OS: [Manjaro Linux kernel 6.1.60 - 1]
- Browser [Firefox 119.0]
- Version [pandera 0.17.2]
</issue>
<code>
[start of pandera/__init__.py]
1 """A flexible and expressive pandas validation library."""
2 import platform
3
4 import pandera.backends
5 from pandera import errors, external_config, typing
6 from pandera.accessors import pandas_accessor
7 from pandera.api import extensions
8 from pandera.api.checks import Check
9 from pandera.api.hypotheses import Hypothesis
10 from pandera.api.pandas.array import SeriesSchema
11 from pandera.api.pandas.container import DataFrameSchema
12 from pandera.api.pandas.components import Column, Index, MultiIndex
13 from pandera.api.pandas.model import DataFrameModel, SchemaModel
14 from pandera.api.pandas.model_components import Field, check, dataframe_check
15 from pandera.decorators import check_input, check_io, check_output, check_types
16 from pandera.dtypes import (
17 Bool,
18 Category,
19 Complex,
20 Complex64,
21 Complex128,
22 DataType,
23 Date,
24 DateTime,
25 Decimal,
26 Float,
27 Float16,
28 Float32,
29 Float64,
30 Int,
31 Int8,
32 Int16,
33 Int32,
34 Int64,
35 String,
36 Timedelta,
37 Timestamp,
38 UInt,
39 UInt8,
40 UInt16,
41 UInt32,
42 UInt64,
43 )
44 from pandera.engines.numpy_engine import Object
45 from pandera.engines.pandas_engine import (
46 BOOL,
47 INT8,
48 INT16,
49 INT32,
50 INT64,
51 PANDAS_1_2_0_PLUS,
52 PANDAS_1_3_0_PLUS,
53 STRING,
54 UINT8,
55 UINT16,
56 UINT32,
57 UINT64,
58 pandas_version,
59 )
60
61 import pandera.backends.base.builtin_checks
62 import pandera.backends.base.builtin_hypotheses
63 import pandera.backends.pandas
64
65 from pandera.schema_inference.pandas import infer_schema
66 from pandera.version import __version__
67
68
69 if platform.system() != "Windows":
70 # pylint: disable=ungrouped-imports
71 from pandera.dtypes import Complex256, Float128
72
73
74 try:
75 import dask.dataframe
76
77 from pandera.accessors import dask_accessor
78 except ImportError:
79 pass
80
81
82 try:
83 import pyspark.pandas
84
85 from pandera.accessors import pyspark_accessor
86 except ImportError:
87 pass
88
89 try:
90 import modin.pandas
91
92 from pandera.accessors import modin_accessor
93 except ImportError:
94 pass
95
96 __all__ = [
97 # dtypes
98 "Bool",
99 "Category",
100 "Complex",
101 "Complex64",
102 "Complex128",
103 "Complex256",
104 "DataType",
105 "DateTime",
106 "Float",
107 "Float16",
108 "Float32",
109 "Float64",
110 "Float128",
111 "Int",
112 "Int8",
113 "Int16",
114 "Int32",
115 "Int64",
116 "String",
117 "Timedelta",
118 "Timestamp",
119 "UInt",
120 "UInt8",
121 "UInt16",
122 "UInt32",
123 "UInt64",
124 # numpy_engine
125 "Object",
126 # pandas_engine
127 "BOOL",
128 "INT8",
129 "INT16",
130 "INT32",
131 "INT64",
132 "PANDAS_1_3_0_PLUS",
133 "STRING",
134 "UINT8",
135 "UINT16",
136 "UINT32",
137 "UINT64",
138 # pandera.engines.pandas_engine
139 "pandas_version",
140 # checks
141 "Check",
142 # decorators
143 "check_input",
144 "check_io",
145 "check_output",
146 "check_types",
147 # hypotheses
148 "Hypothesis",
149 # model
150 "DataFrameModel",
151 "SchemaModel",
152 # model_components
153 "Field",
154 "check",
155 "dataframe_check",
156 # schema_components
157 "Column",
158 "Index",
159 "MultiIndex",
160 # schema_inference
161 "infer_schema",
162 # schemas
163 "DataFrameSchema",
164 "SeriesSchema",
165 # version
166 "__version__",
167 ]
168
[end of pandera/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pandera/__init__.py b/pandera/__init__.py
--- a/pandera/__init__.py
+++ b/pandera/__init__.py
@@ -101,6 +101,7 @@
"Complex64",
"Complex128",
"Complex256",
+ "Date",
"DataType",
"DateTime",
"Float",
| {"golden_diff": "diff --git a/pandera/__init__.py b/pandera/__init__.py\n--- a/pandera/__init__.py\n+++ b/pandera/__init__.py\n@@ -101,6 +101,7 @@\n \"Complex64\",\n \"Complex128\",\n \"Complex256\",\n+ \"Date\",\n \"DataType\",\n \"DateTime\",\n \"Float\",\n", "issue": "Date type not exported\n**Describe the bug**\r\nIn the `__all__` list [here](https://github.com/unionai-oss/pandera/blob/37c24d94ae719dcf4cdc36d1f204478539fce74a/pandera/__init__.py#L104-L106), the type `Date` is missing, causing complaints from mypy if you refer to the type as `pa.Date` -- you have to fully qualify it as `pa.typing.common.Date`.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [x] (optional) I have confirmed this bug exists on the master branch of pandera.\r\n\r\n**Note**: Please read [this guide](https://matthewrocklin.com/blog/work/2018/02/28/minimal-bug-reports) detailing how to provide the necessary information for us to reproduce your bug.\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\n```python\r\nimport pandera as pa\r\n\r\n# Mypy errors [name-defined]\r\nclass ErrorSchema(pa.DataFrameModel):\r\n date_col: pa.Date\r\n\r\n# Mypy is happy\r\nclass NoErrorSchema(pa.DataFrameModel):\r\n date_col: pa.typing.common.Date\r\n```\r\n\r\n#### Expected behavior\r\nNo errors from mypy in both cases\r\n\r\n#### Desktop (please complete the following information):\r\n\r\n - OS: [Manjaro Linux kernel 6.1.60 - 1]\r\n - Browser [Firefox 119.0]\r\n - Version [pandera 0.17.2]\n", "before_files": [{"content": "\"\"\"A flexible and expressive pandas validation library.\"\"\"\nimport platform\n\nimport pandera.backends\nfrom pandera import errors, external_config, typing\nfrom pandera.accessors import pandas_accessor\nfrom pandera.api import extensions\nfrom pandera.api.checks import Check\nfrom pandera.api.hypotheses import Hypothesis\nfrom pandera.api.pandas.array import SeriesSchema\nfrom pandera.api.pandas.container import DataFrameSchema\nfrom pandera.api.pandas.components import Column, Index, MultiIndex\nfrom pandera.api.pandas.model import DataFrameModel, SchemaModel\nfrom pandera.api.pandas.model_components import Field, check, dataframe_check\nfrom pandera.decorators import check_input, check_io, check_output, check_types\nfrom pandera.dtypes import (\n Bool,\n Category,\n Complex,\n Complex64,\n Complex128,\n DataType,\n Date,\n DateTime,\n Decimal,\n Float,\n Float16,\n Float32,\n Float64,\n Int,\n Int8,\n Int16,\n Int32,\n Int64,\n String,\n Timedelta,\n Timestamp,\n UInt,\n UInt8,\n UInt16,\n UInt32,\n UInt64,\n)\nfrom pandera.engines.numpy_engine import Object\nfrom pandera.engines.pandas_engine import (\n BOOL,\n INT8,\n INT16,\n INT32,\n INT64,\n PANDAS_1_2_0_PLUS,\n PANDAS_1_3_0_PLUS,\n STRING,\n UINT8,\n UINT16,\n UINT32,\n UINT64,\n pandas_version,\n)\n\nimport pandera.backends.base.builtin_checks\nimport pandera.backends.base.builtin_hypotheses\nimport pandera.backends.pandas\n\nfrom pandera.schema_inference.pandas import infer_schema\nfrom pandera.version import __version__\n\n\nif platform.system() != \"Windows\":\n # pylint: disable=ungrouped-imports\n from pandera.dtypes import Complex256, Float128\n\n\ntry:\n import dask.dataframe\n\n from pandera.accessors import dask_accessor\nexcept ImportError:\n pass\n\n\ntry:\n import pyspark.pandas\n\n from pandera.accessors import pyspark_accessor\nexcept ImportError:\n pass\n\ntry:\n import modin.pandas\n\n from pandera.accessors import modin_accessor\nexcept ImportError:\n pass\n\n__all__ = [\n # dtypes\n \"Bool\",\n \"Category\",\n \"Complex\",\n \"Complex64\",\n \"Complex128\",\n \"Complex256\",\n \"DataType\",\n \"DateTime\",\n \"Float\",\n \"Float16\",\n \"Float32\",\n \"Float64\",\n \"Float128\",\n \"Int\",\n \"Int8\",\n \"Int16\",\n \"Int32\",\n \"Int64\",\n \"String\",\n \"Timedelta\",\n \"Timestamp\",\n \"UInt\",\n \"UInt8\",\n \"UInt16\",\n \"UInt32\",\n \"UInt64\",\n # numpy_engine\n \"Object\",\n # pandas_engine\n \"BOOL\",\n \"INT8\",\n \"INT16\",\n \"INT32\",\n \"INT64\",\n \"PANDAS_1_3_0_PLUS\",\n \"STRING\",\n \"UINT8\",\n \"UINT16\",\n \"UINT32\",\n \"UINT64\",\n # pandera.engines.pandas_engine\n \"pandas_version\",\n # checks\n \"Check\",\n # decorators\n \"check_input\",\n \"check_io\",\n \"check_output\",\n \"check_types\",\n # hypotheses\n \"Hypothesis\",\n # model\n \"DataFrameModel\",\n \"SchemaModel\",\n # model_components\n \"Field\",\n \"check\",\n \"dataframe_check\",\n # schema_components\n \"Column\",\n \"Index\",\n \"MultiIndex\",\n # schema_inference\n \"infer_schema\",\n # schemas\n \"DataFrameSchema\",\n \"SeriesSchema\",\n # version\n \"__version__\",\n]\n", "path": "pandera/__init__.py"}]} | 2,200 | 91 |
gh_patches_debug_18825 | rasdani/github-patches | git_diff | mesonbuild__meson-5303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scanbuild.py doesn't handle scan-build-{7,8}
Here: https://github.com/mesonbuild/meson/blob/1e7aea65e68a43b0319a4a28908daddfec621548/mesonbuild/scripts/scanbuild.py#L39
As with other LLVM/Clang tools (e.g. `llvm-config`), the version number suffix is just the major component since LLVM 7. The LLVM dependency support handles this correctly here: https://github.com/mesonbuild/meson/blob/54db2c9babe6391bba525f92573ceeadb8303e78/mesonbuild/dependencies/dev.py#L208
</issue>
<code>
[start of mesonbuild/scripts/scanbuild.py]
1 # Copyright 2016 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shlex
17 import subprocess
18 import shutil
19 import tempfile
20 from ..environment import detect_ninja
21 from ..mesonlib import Popen_safe
22
23 def scanbuild(exelist, srcdir, blddir, privdir, logdir, args):
24 with tempfile.TemporaryDirectory(dir=privdir) as scandir:
25 meson_cmd = exelist + args
26 build_cmd = exelist + ['-o', logdir, detect_ninja(), '-C', scandir]
27 rc = subprocess.call(meson_cmd + [srcdir, scandir])
28 if rc != 0:
29 return rc
30 return subprocess.call(build_cmd)
31
32 def run(args):
33 srcdir = args[0]
34 blddir = args[1]
35 meson_cmd = args[2:]
36 privdir = os.path.join(blddir, 'meson-private')
37 logdir = os.path.join(blddir, 'meson-logs/scanbuild')
38 shutil.rmtree(logdir, ignore_errors=True)
39 tools = [
40 'scan-build', # base
41 'scan-build-5.0', 'scan-build50', # latest stable release
42 'scan-build-4.0', 'scan-build40', # old stable releases
43 'scan-build-3.9', 'scan-build39',
44 'scan-build-3.8', 'scan-build38',
45 'scan-build-3.7', 'scan-build37',
46 'scan-build-3.6', 'scan-build36',
47 'scan-build-3.5', 'scan-build35',
48 'scan-build-6.0', 'scan-build-devel', # development snapshot
49 ]
50 toolname = 'scan-build'
51 for tool in tools:
52 try:
53 p, out = Popen_safe([tool, '--help'])[:2]
54 except (FileNotFoundError, PermissionError):
55 continue
56 if p.returncode != 0:
57 continue
58 else:
59 toolname = tool
60 break
61
62 if 'SCANBUILD' in os.environ:
63 exelist = shlex.split(os.environ['SCANBUILD'])
64 else:
65 exelist = [toolname]
66
67 try:
68 Popen_safe(exelist + ['--help'])
69 except OSError:
70 print('Could not execute scan-build "%s"' % ' '.join(exelist))
71 return 1
72 return scanbuild(exelist, srcdir, blddir, privdir, logdir, meson_cmd)
73
[end of mesonbuild/scripts/scanbuild.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mesonbuild/scripts/scanbuild.py b/mesonbuild/scripts/scanbuild.py
--- a/mesonbuild/scripts/scanbuild.py
+++ b/mesonbuild/scripts/scanbuild.py
@@ -38,14 +38,17 @@
shutil.rmtree(logdir, ignore_errors=True)
tools = [
'scan-build', # base
- 'scan-build-5.0', 'scan-build50', # latest stable release
- 'scan-build-4.0', 'scan-build40', # old stable releases
+ 'scan-build-8.0', 'scan-build80',
+ 'scan-build-7.0', 'scan-build70',
+ 'scan-build-6.0', 'scan-build60',
+ 'scan-build-5.0', 'scan-build50',
+ 'scan-build-4.0', 'scan-build40',
'scan-build-3.9', 'scan-build39',
'scan-build-3.8', 'scan-build38',
'scan-build-3.7', 'scan-build37',
'scan-build-3.6', 'scan-build36',
'scan-build-3.5', 'scan-build35',
- 'scan-build-6.0', 'scan-build-devel', # development snapshot
+ 'scan-build-9.0', 'scan-build-devel', # development snapshot
]
toolname = 'scan-build'
for tool in tools:
| {"golden_diff": "diff --git a/mesonbuild/scripts/scanbuild.py b/mesonbuild/scripts/scanbuild.py\n--- a/mesonbuild/scripts/scanbuild.py\n+++ b/mesonbuild/scripts/scanbuild.py\n@@ -38,14 +38,17 @@\n shutil.rmtree(logdir, ignore_errors=True)\n tools = [\n 'scan-build', # base\n- 'scan-build-5.0', 'scan-build50', # latest stable release\n- 'scan-build-4.0', 'scan-build40', # old stable releases\n+ 'scan-build-8.0', 'scan-build80',\n+ 'scan-build-7.0', 'scan-build70',\n+ 'scan-build-6.0', 'scan-build60',\n+ 'scan-build-5.0', 'scan-build50',\n+ 'scan-build-4.0', 'scan-build40',\n 'scan-build-3.9', 'scan-build39',\n 'scan-build-3.8', 'scan-build38',\n 'scan-build-3.7', 'scan-build37',\n 'scan-build-3.6', 'scan-build36',\n 'scan-build-3.5', 'scan-build35',\n- 'scan-build-6.0', 'scan-build-devel', # development snapshot\n+ 'scan-build-9.0', 'scan-build-devel', # development snapshot\n ]\n toolname = 'scan-build'\n for tool in tools:\n", "issue": "scanbuild.py doesn't handle scan-build-{7,8}\nHere: https://github.com/mesonbuild/meson/blob/1e7aea65e68a43b0319a4a28908daddfec621548/mesonbuild/scripts/scanbuild.py#L39\r\n\r\nAs with other LLVM/Clang tools (e.g. `llvm-config`), the version number suffix is just the major component since LLVM 7. The LLVM dependency support handles this correctly here: https://github.com/mesonbuild/meson/blob/54db2c9babe6391bba525f92573ceeadb8303e78/mesonbuild/dependencies/dev.py#L208\n", "before_files": [{"content": "# Copyright 2016 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shlex\nimport subprocess\nimport shutil\nimport tempfile\nfrom ..environment import detect_ninja\nfrom ..mesonlib import Popen_safe\n\ndef scanbuild(exelist, srcdir, blddir, privdir, logdir, args):\n with tempfile.TemporaryDirectory(dir=privdir) as scandir:\n meson_cmd = exelist + args\n build_cmd = exelist + ['-o', logdir, detect_ninja(), '-C', scandir]\n rc = subprocess.call(meson_cmd + [srcdir, scandir])\n if rc != 0:\n return rc\n return subprocess.call(build_cmd)\n\ndef run(args):\n srcdir = args[0]\n blddir = args[1]\n meson_cmd = args[2:]\n privdir = os.path.join(blddir, 'meson-private')\n logdir = os.path.join(blddir, 'meson-logs/scanbuild')\n shutil.rmtree(logdir, ignore_errors=True)\n tools = [\n 'scan-build', # base\n 'scan-build-5.0', 'scan-build50', # latest stable release\n 'scan-build-4.0', 'scan-build40', # old stable releases\n 'scan-build-3.9', 'scan-build39',\n 'scan-build-3.8', 'scan-build38',\n 'scan-build-3.7', 'scan-build37',\n 'scan-build-3.6', 'scan-build36',\n 'scan-build-3.5', 'scan-build35',\n 'scan-build-6.0', 'scan-build-devel', # development snapshot\n ]\n toolname = 'scan-build'\n for tool in tools:\n try:\n p, out = Popen_safe([tool, '--help'])[:2]\n except (FileNotFoundError, PermissionError):\n continue\n if p.returncode != 0:\n continue\n else:\n toolname = tool\n break\n\n if 'SCANBUILD' in os.environ:\n exelist = shlex.split(os.environ['SCANBUILD'])\n else:\n exelist = [toolname]\n\n try:\n Popen_safe(exelist + ['--help'])\n except OSError:\n print('Could not execute scan-build \"%s\"' % ' '.join(exelist))\n return 1\n return scanbuild(exelist, srcdir, blddir, privdir, logdir, meson_cmd)\n", "path": "mesonbuild/scripts/scanbuild.py"}]} | 1,519 | 336 |
gh_patches_debug_1260 | rasdani/github-patches | git_diff | bridgecrewio__checkov-4012 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dependent Package "packaging" upgrade halts invocation
**Describe the issue**
Currently we are running checkov in a CI environment in Azure DevOps over our Terraform configurations. Earlier today Checkov started failing to run, at first it was believed to link to the release that occurred earlier.
Investigation though has shown that the dependency `packaging` has also had a release, wherein it has dropped `LegacyVersion` from its codebase (see stack trace).
The quick solution is to pin `packaging==21.3` to ensure the needed codebase functionality is in place.
This seems to only apply to environments that fresh install everything, as this was innoticed in local development until the CI pipeline triggered the issue.
**Examples**
In the ADO CI this simple version should recreate the behavior:
```
- script: |
python -m pip install --upgrade pip setuptools wheel
pip install checkov
displayName: "Install Checkov"
- task: Bash@3
displayName: Run Checkov tests
inputs:
targetType: "inline"
script: |
checkov -d . -o cli
```
**Exception Trace**
```sh
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/3.8.15/x64/bin/checkov", line 2, in <module>
from checkov.main import run
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/main.py", line 20, in <module>
from checkov.argo_workflows.runner import Runner as argo_workflows_runner
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/argo_workflows/runner.py", line 7, in <module>
from checkov.common.images.image_referencer import ImageReferencer, Image
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/images/image_referencer.py", line 12, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.image_scanner import image_scanner
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py", line 15, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import \
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/docker_image_scanning.py", line 8, in <module>
from checkov.common.bridgecrew.vulnerability_scanning.integrations.twistcli import TwistcliIntegration
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/twistcli.py", line 11, in <module>
from checkov.common.bridgecrew.platform_integration import bc_integration
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/platform_integration.py", line 31, in <module>
from checkov.common.bridgecrew.wrapper import reduce_scan_reports, persist_checks_results, \
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/wrapper.py", line 14, in <module>
from checkov.common.util.json_utils import CustomJSONEncoder
File "/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/util/json_utils.py", line 6, in <module>
from packaging.version import LegacyVersion, Version
ImportError: cannot import name 'LegacyVersion' from 'packaging.version' (/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/packaging/version.py)
```
**Desktop (please complete the following information):**
- OS: Ubuntu 20.04 ADO Pipeline Container
- Checkov Version: tested 2.2.124 and 2.2.116, likely applies to others if they have the dependency
**Additional context**
Release in packaging that causes this issue is `22.0`, `21.3` appears to function as expected.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2==0.3.47",
37 "bc-detect-secrets==1.4.5",
38 "deep-merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath<2,>=1.5.0",
44 "pyyaml>=5.4.1",
45 "boto3>=1.17",
46 "gitpython",
47 "jmespath",
48 "tqdm",
49 "update-checker",
50 "semantic-version",
51 "packaging",
52 "cloudsplaining>=0.4.3",
53 "networkx<2.7",
54 "dockerfile-parse",
55 "docker",
56 "configargparse",
57 "argcomplete",
58 "policyuniverse",
59 "typing-extensions>=4.1.0",
60 "importlib-metadata>=0.12",
61 "cachetools",
62 "cyclonedx-python-lib>=2.4.0,<4.0.0",
63 "packageurl-python",
64 "click>=8.0.0",
65 "aiohttp",
66 "aiodns",
67 "aiomultiprocess",
68 "jsonpath-ng",
69 "jsonschema>=3.0.2,<4.0.0",
70 "prettytable>=3.0.0",
71 "pycep-parser==0.3.9",
72 "charset-normalizer",
73 "pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'",
74 "pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'",
75 "schema",
76 "requests>=2.26.0",
77 ],
78 dependency_links=[], # keep it empty, needed for pipenv-setup
79 license="Apache License 2.0",
80 name="checkov",
81 version=version,
82 python_requires=">=3.7",
83 description="Infrastructure as code static analysis",
84 author="bridgecrew",
85 author_email="[email protected]",
86 url="https://github.com/bridgecrewio/checkov",
87 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
88 include_package_data=True,
89 package_dir={
90 "checkov.bicep.checks.graph_checks": "checkov/bicep/checks/graph_checks",
91 "checkov.cloudformation.checks.graph_checks": "checkov/cloudformation/checks/graph_checks",
92 "checkov.dockerfile.checks.graph_checks": "checkov/dockerfile/checks/graph_checks",
93 "checkov.github_actions.checks.graph_checks": "checkov/github_actions/checks/graph_checks",
94 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks",
95 },
96 package_data={
97 "checkov": ["py.typed"],
98 "checkov.bicep.checks.graph_checks": ["*.yaml"],
99 "checkov.common.util.templates": ["*.jinja2"],
100 "checkov.dockerfile.checks.graph_checks": ["*.yaml"],
101 "checkov.github_actions.checks.graph_checks": ["*.yaml"],
102 "checkov.terraform.checks.graph_checks": [
103 "aws/*.yaml",
104 "gcp/*.yaml",
105 "azure/*.yaml",
106 ],
107 },
108 scripts=["bin/checkov", "bin/checkov.cmd"],
109 long_description=long_description,
110 long_description_content_type="text/markdown",
111 classifiers=[
112 "Environment :: Console",
113 "Intended Audience :: Developers",
114 "Intended Audience :: System Administrators",
115 "License :: OSI Approved :: Apache Software License",
116 "Programming Language :: Python :: 3 :: Only",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Programming Language :: Python :: 3.10",
121 "Programming Language :: Python :: 3.11",
122 "Topic :: Security",
123 "Topic :: Software Development :: Build Tools",
124 "Typing :: Typed",
125 ],
126 )
127
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -48,7 +48,7 @@
"tqdm",
"update-checker",
"semantic-version",
- "packaging",
+ "packaging==21.3",
"cloudsplaining>=0.4.3",
"networkx<2.7",
"dockerfile-parse",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -48,7 +48,7 @@\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n- \"packaging\",\n+ \"packaging==21.3\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"dockerfile-parse\",\n", "issue": "Dependent Package \"packaging\" upgrade halts invocation\n**Describe the issue**\r\nCurrently we are running checkov in a CI environment in Azure DevOps over our Terraform configurations. Earlier today Checkov started failing to run, at first it was believed to link to the release that occurred earlier.\r\nInvestigation though has shown that the dependency `packaging` has also had a release, wherein it has dropped `LegacyVersion` from its codebase (see stack trace).\r\nThe quick solution is to pin `packaging==21.3` to ensure the needed codebase functionality is in place.\r\nThis seems to only apply to environments that fresh install everything, as this was innoticed in local development until the CI pipeline triggered the issue.\r\n\r\n**Examples**\r\nIn the ADO CI this simple version should recreate the behavior:\r\n```\r\n - script: |\r\n python -m pip install --upgrade pip setuptools wheel\r\n pip install checkov\r\n displayName: \"Install Checkov\"\r\n\r\n - task: Bash@3\r\n displayName: Run Checkov tests\r\n inputs:\r\n targetType: \"inline\"\r\n script: |\r\n checkov -d . -o cli\r\n```\r\n\r\n**Exception Trace**\r\n```sh\r\nTraceback (most recent call last):\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/bin/checkov\", line 2, in <module>\r\n from checkov.main import run\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/main.py\", line 20, in <module>\r\n from checkov.argo_workflows.runner import Runner as argo_workflows_runner\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/argo_workflows/runner.py\", line 7, in <module>\r\n from checkov.common.images.image_referencer import ImageReferencer, Image\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/images/image_referencer.py\", line 12, in <module>\r\n from checkov.common.bridgecrew.vulnerability_scanning.image_scanner import image_scanner\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/image_scanner.py\", line 15, in <module>\r\n from checkov.common.bridgecrew.vulnerability_scanning.integrations.docker_image_scanning import \\\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/docker_image_scanning.py\", line 8, in <module>\r\n from checkov.common.bridgecrew.vulnerability_scanning.integrations.twistcli import TwistcliIntegration\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/vulnerability_scanning/integrations/twistcli.py\", line 11, in <module>\r\n from checkov.common.bridgecrew.platform_integration import bc_integration\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/platform_integration.py\", line 31, in <module>\r\n from checkov.common.bridgecrew.wrapper import reduce_scan_reports, persist_checks_results, \\\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/bridgecrew/wrapper.py\", line 14, in <module>\r\n from checkov.common.util.json_utils import CustomJSONEncoder\r\n File \"/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/checkov/common/util/json_utils.py\", line 6, in <module>\r\n from packaging.version import LegacyVersion, Version\r\nImportError: cannot import name 'LegacyVersion' from 'packaging.version' (/opt/hostedtoolcache/Python/3.8.15/x64/lib/python3.8/site-packages/packaging/version.py)\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu 20.04 ADO Pipeline Container\r\n - Checkov Version: tested 2.2.124 and 2.2.116, likely applies to others if they have the dependency\r\n\r\n**Additional context**\r\nRelease in packaging that causes this issue is `22.0`, `21.3` appears to function as expected.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2==0.3.47\",\n \"bc-detect-secrets==1.4.5\",\n \"deep-merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath<2,>=1.5.0\",\n \"pyyaml>=5.4.1\",\n \"boto3>=1.17\",\n \"gitpython\",\n \"jmespath\",\n \"tqdm\",\n \"update-checker\",\n \"semantic-version\",\n \"packaging\",\n \"cloudsplaining>=0.4.3\",\n \"networkx<2.7\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"argcomplete\",\n \"policyuniverse\",\n \"typing-extensions>=4.1.0\",\n \"importlib-metadata>=0.12\",\n \"cachetools\",\n \"cyclonedx-python-lib>=2.4.0,<4.0.0\",\n \"packageurl-python\",\n \"click>=8.0.0\",\n \"aiohttp\",\n \"aiodns\",\n \"aiomultiprocess\",\n \"jsonpath-ng\",\n \"jsonschema>=3.0.2,<4.0.0\",\n \"prettytable>=3.0.0\",\n \"pycep-parser==0.3.9\",\n \"charset-normalizer\",\n \"pyston-autoload==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"pyston==2.3.5; python_version < '3.11' and (sys_platform == 'linux' or sys_platform == 'darwin') and platform_machine == 'x86_64'\",\n \"schema\",\n \"requests>=2.26.0\",\n ],\n dependency_links=[], # keep it empty, needed for pipenv-setup\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.bicep.checks.graph_checks\": \"checkov/bicep/checks/graph_checks\",\n \"checkov.cloudformation.checks.graph_checks\": \"checkov/cloudformation/checks/graph_checks\",\n \"checkov.dockerfile.checks.graph_checks\": \"checkov/dockerfile/checks/graph_checks\",\n \"checkov.github_actions.checks.graph_checks\": \"checkov/github_actions/checks/graph_checks\",\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\",\n },\n package_data={\n \"checkov\": [\"py.typed\"],\n \"checkov.bicep.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.common.util.templates\": [\"*.jinja2\"],\n \"checkov.dockerfile.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.github_actions.checks.graph_checks\": [\"*.yaml\"],\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ],\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n \"Typing :: Typed\",\n ],\n)\n", "path": "setup.py"}]} | 2,960 | 93 |
gh_patches_debug_44167 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError with kafka-python integration in 6.10.0
**Describe the bug**:
After upgrading the agent to 6.10.0, we immediately started seeing AttributeError failues in the ElasticAPM code when calling through to our kafka-python code paths (we've had to rollback to 6.9 because of this).
**To Reproduce**
Try to send a message using the KafkaProducer class in kafka-python.
Traceback:
```python
AttributeError: 'NoneType' object has no attribute 'id'
[...]
File "elasticapm/instrumentation/packages/kafka.py", line 94, in call_if_sampling
return self.call(module, method, wrapped, instance, args, kwargs)
File "elasticapm/instrumentation/packages/kafka.py", line 106, in call
return self._trace_send(instance, wrapped, destination_info=destination_info, *args, **kwargs)
File "elasticapm/instrumentation/packages/kafka.py", line 71, in _trace_send
tp = transaction.trace_parent.copy_from(span_id=span.id)
```
I have had to remove proprietary code from the stack trace. However, the two snipped lines of code before it hits the elasticapm code paths is basically just doing:
```python
from kafka import KafkaProducer
producer = KafkaProducer(**kafka_config) # where kafka_config is a dict of config options.
producer.send(topic, message).get()
```
**Environment (please complete the following information)**
- OS: Linux
- Python version: 3.9.6-slim-bullseye (Docker container)
- Framework and version [e.g. Django 2.1]: Django 3.2.13, kafka-python 2.0.2
- Agent version: 6.10.0
</issue>
<code>
[start of elasticapm/instrumentation/packages/kafka.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import time
32 from typing import Optional
33
34 import elasticapm
35 from elasticapm import get_client
36 from elasticapm.conf import constants
37 from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
38 from elasticapm.traces import DroppedSpan, capture_span, execution_context
39 from elasticapm.utils.disttracing import TraceParent
40
41
42 class KafkaInstrumentation(AbstractInstrumentedModule):
43
44 instrument_list = [
45 ("kafka", "KafkaProducer.send"),
46 ("kafka", "KafkaConsumer.poll"),
47 ("kafka", "KafkaConsumer.__next__"),
48 ]
49 provider_name = "kafka"
50 name = "kafka"
51
52 def _trace_send(self, instance, wrapped, *args, destination_info=None, **kwargs):
53 topic = args[0] if args else kwargs["topic"]
54 headers = args[4] if len(args) > 4 else kwargs.get("headers", None)
55
56 span_name = f"Kafka SEND to {topic}"
57 destination_info["service"]["resource"] += topic
58 with capture_span(
59 name=span_name,
60 span_type="messaging",
61 span_subtype=self.provider_name,
62 span_action="send",
63 leaf=True,
64 extra={
65 "message": {"queue": {"name": topic}},
66 "destination": destination_info,
67 },
68 ) as span:
69 transaction = execution_context.get_transaction()
70 if transaction:
71 tp = transaction.trace_parent.copy_from(span_id=span.id)
72 if headers:
73 headers.append((constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary()))
74 else:
75 headers = [(constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary())]
76 if len(args) > 4:
77 args = list(args)
78 args[4] = headers
79 else:
80 kwargs["headers"] = headers
81 result = wrapped(*args, **kwargs)
82 if instance and instance._metadata.controller and not isinstance(span, DroppedSpan):
83 address = instance._metadata.controller[1]
84 port = instance._metadata.controller[2]
85 span.context["destination"]["address"] = address
86 span.context["destination"]["port"] = port
87 return result
88
89 def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):
90 # Contrasting to the superclass implementation, we *always* want to
91 # return a proxied connection, even if there is no ongoing elasticapm
92 # transaction yet. This ensures that we instrument the cursor once
93 # the transaction started.
94 return self.call(module, method, wrapped, instance, args, kwargs)
95
96 def call(self, module, method, wrapped, instance, args, kwargs):
97 client = get_client()
98 destination_info = {
99 "service": {"name": "kafka", "resource": "kafka/", "type": "messaging"},
100 }
101
102 if method == "KafkaProducer.send":
103 topic = args[0] if args else kwargs["topic"]
104 if client.should_ignore_topic(topic) or not execution_context.get_transaction():
105 return wrapped(*args, **kwargs)
106 return self._trace_send(instance, wrapped, destination_info=destination_info, *args, **kwargs)
107
108 elif method == "KafkaConsumer.poll":
109 transaction = execution_context.get_transaction()
110 if transaction:
111 with capture_span(
112 name="Kafka POLL",
113 span_type="messaging",
114 span_subtype=self.provider_name,
115 span_action="poll",
116 leaf=True,
117 extra={
118 "destination": destination_info,
119 },
120 ) as span:
121 if not isinstance(span, DroppedSpan) and instance._subscription.subscription:
122 span.name += " from " + ", ".join(sorted(instance._subscription.subscription))
123 results = wrapped(*args, **kwargs)
124 return results
125 else:
126 return wrapped(*args, **kwargs)
127
128 elif method == "KafkaConsumer.__next__":
129 transaction = execution_context.get_transaction()
130 if transaction and transaction.transaction_type != "messaging":
131 # somebody started a transaction outside of the consumer,
132 # so we capture it as a span, and record the causal trace as a link
133 with capture_span(
134 name="consumer",
135 span_type="messaging",
136 span_subtype=self.provider_name,
137 span_action="receive",
138 leaf=True,
139 extra={
140 "message": {"queue": {"name": ""}},
141 "destination": destination_info,
142 },
143 ) as span:
144 try:
145 result = wrapped(*args, **kwargs)
146 except StopIteration:
147 span.cancel()
148 raise
149 if not isinstance(span, DroppedSpan):
150 topic = result[0]
151 if client.should_ignore_topic(topic):
152 span.cancel()
153 return result
154 trace_parent = self.get_traceparent_from_result(result)
155 if trace_parent:
156 span.add_link(trace_parent)
157 destination_info["service"]["resource"] += topic
158 span.context["message"]["queue"]["name"] = topic
159 span.name = "Kafka RECEIVE from " + topic
160 return result
161 else:
162 # No transaction running, or this is a transaction started by us,
163 # so let's end it and start the next,
164 # unless a StopIteration is raised, at which point we do nothing.
165 if transaction:
166 client.end_transaction()
167 result = wrapped(*args, **kwargs)
168 topic = result[0]
169 if client.should_ignore_topic(topic):
170 return result
171 trace_parent = self.get_traceparent_from_result(result)
172 transaction = client.begin_transaction("messaging", trace_parent=trace_parent)
173 if result.timestamp_type == 0:
174 current_time_millis = int(round(time.time() * 1000))
175 age = current_time_millis - result.timestamp
176 transaction.context = {
177 "message": {"age": {"ms": age}, "queue": {"name": topic}},
178 "service": {"framework": {"name": "Kafka"}},
179 }
180 transaction_name = "Kafka RECEIVE from " + topic
181 elasticapm.set_transaction_name(transaction_name, override=True)
182 res = constants.OUTCOME.SUCCESS
183 elasticapm.set_transaction_result(res, override=False)
184 return result
185
186 def get_traceparent_from_result(self, result) -> Optional[TraceParent]:
187 for k, v in result.headers:
188 if k == constants.TRACEPARENT_BINARY_HEADER_NAME:
189 return TraceParent.from_binary(v)
190
[end of elasticapm/instrumentation/packages/kafka.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/kafka.py b/elasticapm/instrumentation/packages/kafka.py
--- a/elasticapm/instrumentation/packages/kafka.py
+++ b/elasticapm/instrumentation/packages/kafka.py
@@ -36,7 +36,7 @@
from elasticapm.conf import constants
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import DroppedSpan, capture_span, execution_context
-from elasticapm.utils.disttracing import TraceParent
+from elasticapm.utils.disttracing import TraceParent, TracingOptions
class KafkaInstrumentation(AbstractInstrumentedModule):
@@ -48,6 +48,7 @@
]
provider_name = "kafka"
name = "kafka"
+ creates_transactions = True
def _trace_send(self, instance, wrapped, *args, destination_info=None, **kwargs):
topic = args[0] if args else kwargs["topic"]
@@ -68,7 +69,10 @@
) as span:
transaction = execution_context.get_transaction()
if transaction:
- tp = transaction.trace_parent.copy_from(span_id=span.id)
+ tp = transaction.trace_parent.copy_from(
+ span_id=span.id if span else transaction.id,
+ trace_options=None if span else TracingOptions(recorded=False),
+ )
if headers:
headers.append((constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary()))
else:
@@ -79,22 +83,17 @@
else:
kwargs["headers"] = headers
result = wrapped(*args, **kwargs)
- if instance and instance._metadata.controller and not isinstance(span, DroppedSpan):
+ if span and instance and instance._metadata.controller and not isinstance(span, DroppedSpan):
address = instance._metadata.controller[1]
port = instance._metadata.controller[2]
span.context["destination"]["address"] = address
span.context["destination"]["port"] = port
return result
- def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):
- # Contrasting to the superclass implementation, we *always* want to
- # return a proxied connection, even if there is no ongoing elasticapm
- # transaction yet. This ensures that we instrument the cursor once
- # the transaction started.
- return self.call(module, method, wrapped, instance, args, kwargs)
-
def call(self, module, method, wrapped, instance, args, kwargs):
client = get_client()
+ if client is None:
+ return wrapped(*args, **kwargs)
destination_info = {
"service": {"name": "kafka", "resource": "kafka/", "type": "messaging"},
}
@@ -118,7 +117,7 @@
"destination": destination_info,
},
) as span:
- if not isinstance(span, DroppedSpan) and instance._subscription.subscription:
+ if span and not isinstance(span, DroppedSpan) and instance._subscription.subscription:
span.name += " from " + ", ".join(sorted(instance._subscription.subscription))
results = wrapped(*args, **kwargs)
return results
@@ -146,7 +145,7 @@
except StopIteration:
span.cancel()
raise
- if not isinstance(span, DroppedSpan):
+ if span and not isinstance(span, DroppedSpan):
topic = result[0]
if client.should_ignore_topic(topic):
span.cancel()
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/kafka.py b/elasticapm/instrumentation/packages/kafka.py\n--- a/elasticapm/instrumentation/packages/kafka.py\n+++ b/elasticapm/instrumentation/packages/kafka.py\n@@ -36,7 +36,7 @@\n from elasticapm.conf import constants\n from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\n from elasticapm.traces import DroppedSpan, capture_span, execution_context\n-from elasticapm.utils.disttracing import TraceParent\n+from elasticapm.utils.disttracing import TraceParent, TracingOptions\n \n \n class KafkaInstrumentation(AbstractInstrumentedModule):\n@@ -48,6 +48,7 @@\n ]\n provider_name = \"kafka\"\n name = \"kafka\"\n+ creates_transactions = True\n \n def _trace_send(self, instance, wrapped, *args, destination_info=None, **kwargs):\n topic = args[0] if args else kwargs[\"topic\"]\n@@ -68,7 +69,10 @@\n ) as span:\n transaction = execution_context.get_transaction()\n if transaction:\n- tp = transaction.trace_parent.copy_from(span_id=span.id)\n+ tp = transaction.trace_parent.copy_from(\n+ span_id=span.id if span else transaction.id,\n+ trace_options=None if span else TracingOptions(recorded=False),\n+ )\n if headers:\n headers.append((constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary()))\n else:\n@@ -79,22 +83,17 @@\n else:\n kwargs[\"headers\"] = headers\n result = wrapped(*args, **kwargs)\n- if instance and instance._metadata.controller and not isinstance(span, DroppedSpan):\n+ if span and instance and instance._metadata.controller and not isinstance(span, DroppedSpan):\n address = instance._metadata.controller[1]\n port = instance._metadata.controller[2]\n span.context[\"destination\"][\"address\"] = address\n span.context[\"destination\"][\"port\"] = port\n return result\n \n- def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n- # Contrasting to the superclass implementation, we *always* want to\n- # return a proxied connection, even if there is no ongoing elasticapm\n- # transaction yet. This ensures that we instrument the cursor once\n- # the transaction started.\n- return self.call(module, method, wrapped, instance, args, kwargs)\n-\n def call(self, module, method, wrapped, instance, args, kwargs):\n client = get_client()\n+ if client is None:\n+ return wrapped(*args, **kwargs)\n destination_info = {\n \"service\": {\"name\": \"kafka\", \"resource\": \"kafka/\", \"type\": \"messaging\"},\n }\n@@ -118,7 +117,7 @@\n \"destination\": destination_info,\n },\n ) as span:\n- if not isinstance(span, DroppedSpan) and instance._subscription.subscription:\n+ if span and not isinstance(span, DroppedSpan) and instance._subscription.subscription:\n span.name += \" from \" + \", \".join(sorted(instance._subscription.subscription))\n results = wrapped(*args, **kwargs)\n return results\n@@ -146,7 +145,7 @@\n except StopIteration:\n span.cancel()\n raise\n- if not isinstance(span, DroppedSpan):\n+ if span and not isinstance(span, DroppedSpan):\n topic = result[0]\n if client.should_ignore_topic(topic):\n span.cancel()\n", "issue": "AttributeError with kafka-python integration in 6.10.0\n**Describe the bug**:\r\n\r\nAfter upgrading the agent to 6.10.0, we immediately started seeing AttributeError failues in the ElasticAPM code when calling through to our kafka-python code paths (we've had to rollback to 6.9 because of this).\r\n\r\n**To Reproduce**\r\n\r\nTry to send a message using the KafkaProducer class in kafka-python.\r\n\r\nTraceback:\r\n\r\n```python\r\nAttributeError: 'NoneType' object has no attribute 'id'\r\n[...]\r\n File \"elasticapm/instrumentation/packages/kafka.py\", line 94, in call_if_sampling\r\n return self.call(module, method, wrapped, instance, args, kwargs)\r\n File \"elasticapm/instrumentation/packages/kafka.py\", line 106, in call\r\n return self._trace_send(instance, wrapped, destination_info=destination_info, *args, **kwargs)\r\n File \"elasticapm/instrumentation/packages/kafka.py\", line 71, in _trace_send\r\n tp = transaction.trace_parent.copy_from(span_id=span.id)\r\n```\r\n\r\nI have had to remove proprietary code from the stack trace. However, the two snipped lines of code before it hits the elasticapm code paths is basically just doing:\r\n\r\n```python\r\nfrom kafka import KafkaProducer\r\nproducer = KafkaProducer(**kafka_config) # where kafka_config is a dict of config options.\r\nproducer.send(topic, message).get()\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.9.6-slim-bullseye (Docker container)\r\n- Framework and version [e.g. Django 2.1]: Django 3.2.13, kafka-python 2.0.2\r\n- Agent version: 6.10.0\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport time\nfrom typing import Optional\n\nimport elasticapm\nfrom elasticapm import get_client\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.base import AbstractInstrumentedModule\nfrom elasticapm.traces import DroppedSpan, capture_span, execution_context\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass KafkaInstrumentation(AbstractInstrumentedModule):\n\n instrument_list = [\n (\"kafka\", \"KafkaProducer.send\"),\n (\"kafka\", \"KafkaConsumer.poll\"),\n (\"kafka\", \"KafkaConsumer.__next__\"),\n ]\n provider_name = \"kafka\"\n name = \"kafka\"\n\n def _trace_send(self, instance, wrapped, *args, destination_info=None, **kwargs):\n topic = args[0] if args else kwargs[\"topic\"]\n headers = args[4] if len(args) > 4 else kwargs.get(\"headers\", None)\n\n span_name = f\"Kafka SEND to {topic}\"\n destination_info[\"service\"][\"resource\"] += topic\n with capture_span(\n name=span_name,\n span_type=\"messaging\",\n span_subtype=self.provider_name,\n span_action=\"send\",\n leaf=True,\n extra={\n \"message\": {\"queue\": {\"name\": topic}},\n \"destination\": destination_info,\n },\n ) as span:\n transaction = execution_context.get_transaction()\n if transaction:\n tp = transaction.trace_parent.copy_from(span_id=span.id)\n if headers:\n headers.append((constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary()))\n else:\n headers = [(constants.TRACEPARENT_BINARY_HEADER_NAME, tp.to_binary())]\n if len(args) > 4:\n args = list(args)\n args[4] = headers\n else:\n kwargs[\"headers\"] = headers\n result = wrapped(*args, **kwargs)\n if instance and instance._metadata.controller and not isinstance(span, DroppedSpan):\n address = instance._metadata.controller[1]\n port = instance._metadata.controller[2]\n span.context[\"destination\"][\"address\"] = address\n span.context[\"destination\"][\"port\"] = port\n return result\n\n def call_if_sampling(self, module, method, wrapped, instance, args, kwargs):\n # Contrasting to the superclass implementation, we *always* want to\n # return a proxied connection, even if there is no ongoing elasticapm\n # transaction yet. This ensures that we instrument the cursor once\n # the transaction started.\n return self.call(module, method, wrapped, instance, args, kwargs)\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n client = get_client()\n destination_info = {\n \"service\": {\"name\": \"kafka\", \"resource\": \"kafka/\", \"type\": \"messaging\"},\n }\n\n if method == \"KafkaProducer.send\":\n topic = args[0] if args else kwargs[\"topic\"]\n if client.should_ignore_topic(topic) or not execution_context.get_transaction():\n return wrapped(*args, **kwargs)\n return self._trace_send(instance, wrapped, destination_info=destination_info, *args, **kwargs)\n\n elif method == \"KafkaConsumer.poll\":\n transaction = execution_context.get_transaction()\n if transaction:\n with capture_span(\n name=\"Kafka POLL\",\n span_type=\"messaging\",\n span_subtype=self.provider_name,\n span_action=\"poll\",\n leaf=True,\n extra={\n \"destination\": destination_info,\n },\n ) as span:\n if not isinstance(span, DroppedSpan) and instance._subscription.subscription:\n span.name += \" from \" + \", \".join(sorted(instance._subscription.subscription))\n results = wrapped(*args, **kwargs)\n return results\n else:\n return wrapped(*args, **kwargs)\n\n elif method == \"KafkaConsumer.__next__\":\n transaction = execution_context.get_transaction()\n if transaction and transaction.transaction_type != \"messaging\":\n # somebody started a transaction outside of the consumer,\n # so we capture it as a span, and record the causal trace as a link\n with capture_span(\n name=\"consumer\",\n span_type=\"messaging\",\n span_subtype=self.provider_name,\n span_action=\"receive\",\n leaf=True,\n extra={\n \"message\": {\"queue\": {\"name\": \"\"}},\n \"destination\": destination_info,\n },\n ) as span:\n try:\n result = wrapped(*args, **kwargs)\n except StopIteration:\n span.cancel()\n raise\n if not isinstance(span, DroppedSpan):\n topic = result[0]\n if client.should_ignore_topic(topic):\n span.cancel()\n return result\n trace_parent = self.get_traceparent_from_result(result)\n if trace_parent:\n span.add_link(trace_parent)\n destination_info[\"service\"][\"resource\"] += topic\n span.context[\"message\"][\"queue\"][\"name\"] = topic\n span.name = \"Kafka RECEIVE from \" + topic\n return result\n else:\n # No transaction running, or this is a transaction started by us,\n # so let's end it and start the next,\n # unless a StopIteration is raised, at which point we do nothing.\n if transaction:\n client.end_transaction()\n result = wrapped(*args, **kwargs)\n topic = result[0]\n if client.should_ignore_topic(topic):\n return result\n trace_parent = self.get_traceparent_from_result(result)\n transaction = client.begin_transaction(\"messaging\", trace_parent=trace_parent)\n if result.timestamp_type == 0:\n current_time_millis = int(round(time.time() * 1000))\n age = current_time_millis - result.timestamp\n transaction.context = {\n \"message\": {\"age\": {\"ms\": age}, \"queue\": {\"name\": topic}},\n \"service\": {\"framework\": {\"name\": \"Kafka\"}},\n }\n transaction_name = \"Kafka RECEIVE from \" + topic\n elasticapm.set_transaction_name(transaction_name, override=True)\n res = constants.OUTCOME.SUCCESS\n elasticapm.set_transaction_result(res, override=False)\n return result\n\n def get_traceparent_from_result(self, result) -> Optional[TraceParent]:\n for k, v in result.headers:\n if k == constants.TRACEPARENT_BINARY_HEADER_NAME:\n return TraceParent.from_binary(v)\n", "path": "elasticapm/instrumentation/packages/kafka.py"}]} | 3,095 | 779 |
gh_patches_debug_43154 | rasdani/github-patches | git_diff | pypa__pip-10615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Package left un-upgraded with `pip install --upgrade`
### Description
When upgrading both `datasets` and `fsspec` in an environment, only `datasets` was upgraded to the latest version even though there was nothing constraining `fsspec` from being updated. `fsspec` is a dependency of `datasets` (`fsspec[http]>=2021.05.0` is specified in its [`setup.py`](https://github.com/huggingface/datasets/blob/1.14.0/setup.py)), and from looking at the resolver's debug output, it seems that pip is preferring to use the pre-installed version of `fsspec` instead of the newer one.
The documentation for `--upgrade` says, "Upgrade all specified packages to the newest available version." Unless I'm misunderstanding, all packages explicitly listed on the command line should have been updated if possible.
### Expected behavior
`fsspec` should be upgraded to `2021.10.1`.
### pip version
21.3.1
### Python version
3.7.12
### OS
macOS 10.15.7
### How to Reproduce
1. Create a file named `pip_fsspec_test.sh` with the following contents:
```bash
#!/usr/bin/env bash
set -euxo pipefail
# Prepare environment
python --version
pip install -U pip setuptools wheel
pip list
# Install outdated packages
pip install datasets==1.13.3 fsspec==2021.10.0
pip list --outdated
# Attempt to upgrade both packages
pip install -U 'datasets<=1.14.0' 'fsspec<=2021.10.1'
pip list --outdated
```
2. Run `bash pip_fsspec_test.sh &> pip_fsspec_log.txt` in a new environment to create [pip_fsspec_log.txt](https://github.com/pypa/pip/files/7402239/pip_fsspec_log.txt).
3. Change the penultimate line of the script to `PIP_RESOLVER_DEBUG=1 pip -vvv install -U 'datasets<=1.14.0' 'fsspec<=2021.10.1'`.
4. Run `bash pip_fsspec_test.sh &> pip_fsspec_verbose_log.txt` in a new environment to create [pip_fsspec_verbose_log.txt](https://github.com/pypa/pip/files/7402240/pip_fsspec_verbose_log.txt).
### Output
_See files under "How to Reproduce"._
### Code of Conduct
- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).
</issue>
<code>
[start of src/pip/_internal/resolution/resolvelib/provider.py]
1 import collections
2 import math
3 from typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union
4
5 from pip._vendor.resolvelib.providers import AbstractProvider
6
7 from .base import Candidate, Constraint, Requirement
8 from .candidates import REQUIRES_PYTHON_IDENTIFIER
9 from .factory import Factory
10
11 if TYPE_CHECKING:
12 from pip._vendor.resolvelib.providers import Preference
13 from pip._vendor.resolvelib.resolvers import RequirementInformation
14
15 PreferenceInformation = RequirementInformation[Requirement, Candidate]
16
17 _ProviderBase = AbstractProvider[Requirement, Candidate, str]
18 else:
19 _ProviderBase = AbstractProvider
20
21 # Notes on the relationship between the provider, the factory, and the
22 # candidate and requirement classes.
23 #
24 # The provider is a direct implementation of the resolvelib class. Its role
25 # is to deliver the API that resolvelib expects.
26 #
27 # Rather than work with completely abstract "requirement" and "candidate"
28 # concepts as resolvelib does, pip has concrete classes implementing these two
29 # ideas. The API of Requirement and Candidate objects are defined in the base
30 # classes, but essentially map fairly directly to the equivalent provider
31 # methods. In particular, `find_matches` and `is_satisfied_by` are
32 # requirement methods, and `get_dependencies` is a candidate method.
33 #
34 # The factory is the interface to pip's internal mechanisms. It is stateless,
35 # and is created by the resolver and held as a property of the provider. It is
36 # responsible for creating Requirement and Candidate objects, and provides
37 # services to those objects (access to pip's finder and preparer).
38
39
40 class PipProvider(_ProviderBase):
41 """Pip's provider implementation for resolvelib.
42
43 :params constraints: A mapping of constraints specified by the user. Keys
44 are canonicalized project names.
45 :params ignore_dependencies: Whether the user specified ``--no-deps``.
46 :params upgrade_strategy: The user-specified upgrade strategy.
47 :params user_requested: A set of canonicalized package names that the user
48 supplied for pip to install/upgrade.
49 """
50
51 def __init__(
52 self,
53 factory: Factory,
54 constraints: Dict[str, Constraint],
55 ignore_dependencies: bool,
56 upgrade_strategy: str,
57 user_requested: Dict[str, int],
58 ) -> None:
59 self._factory = factory
60 self._constraints = constraints
61 self._ignore_dependencies = ignore_dependencies
62 self._upgrade_strategy = upgrade_strategy
63 self._user_requested = user_requested
64 self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf)
65
66 def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:
67 return requirement_or_candidate.name
68
69 def get_preference( # type: ignore
70 self,
71 identifier: str,
72 resolutions: Mapping[str, Candidate],
73 candidates: Mapping[str, Iterator[Candidate]],
74 information: Mapping[str, Iterable["PreferenceInformation"]],
75 backtrack_causes: Sequence["PreferenceInformation"],
76 ) -> "Preference":
77 """Produce a sort key for given requirement based on preference.
78
79 The lower the return value is, the more preferred this group of
80 arguments is.
81
82 Currently pip considers the followings in order:
83
84 * Prefer if any of the known requirements is "direct", e.g. points to an
85 explicit URL.
86 * If equal, prefer if any requirement is "pinned", i.e. contains
87 operator ``===`` or ``==``.
88 * If equal, calculate an approximate "depth" and resolve requirements
89 closer to the user-specified requirements first.
90 * Order user-specified requirements by the order they are specified.
91 * If equal, prefers "non-free" requirements, i.e. contains at least one
92 operator, such as ``>=`` or ``<``.
93 * If equal, order alphabetically for consistency (helps debuggability).
94 """
95 lookups = (r.get_candidate_lookup() for r, _ in information[identifier])
96 candidate, ireqs = zip(*lookups)
97 operators = [
98 specifier.operator
99 for specifier_set in (ireq.specifier for ireq in ireqs if ireq)
100 for specifier in specifier_set
101 ]
102
103 direct = candidate is not None
104 pinned = any(op[:2] == "==" for op in operators)
105 unfree = bool(operators)
106
107 try:
108 requested_order: Union[int, float] = self._user_requested[identifier]
109 except KeyError:
110 requested_order = math.inf
111 parent_depths = (
112 self._known_depths[parent.name] if parent is not None else 0.0
113 for _, parent in information[identifier]
114 )
115 inferred_depth = min(d for d in parent_depths) + 1.0
116 else:
117 inferred_depth = 1.0
118 self._known_depths[identifier] = inferred_depth
119
120 requested_order = self._user_requested.get(identifier, math.inf)
121
122 # Requires-Python has only one candidate and the check is basically
123 # free, so we always do it first to avoid needless work if it fails.
124 requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER
125
126 # HACK: Setuptools have a very long and solid backward compatibility
127 # track record, and extremely few projects would request a narrow,
128 # non-recent version range of it since that would break a lot things.
129 # (Most projects specify it only to request for an installer feature,
130 # which does not work, but that's another topic.) Intentionally
131 # delaying Setuptools helps reduce branches the resolver has to check.
132 # This serves as a temporary fix for issues like "apache-airflow[all]"
133 # while we work on "proper" branch pruning techniques.
134 delay_this = identifier == "setuptools"
135
136 # Prefer the causes of backtracking on the assumption that the problem
137 # resolving the dependency tree is related to the failures that caused
138 # the backtracking
139 backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes)
140
141 return (
142 not requires_python,
143 delay_this,
144 not direct,
145 not pinned,
146 not backtrack_cause,
147 inferred_depth,
148 requested_order,
149 not unfree,
150 identifier,
151 )
152
153 def _get_constraint(self, identifier: str) -> Constraint:
154 if identifier in self._constraints:
155 return self._constraints[identifier]
156
157 # HACK: Theoretically we should check whether this identifier is a valid
158 # "NAME[EXTRAS]" format, and parse out the name part with packaging or
159 # some regular expression. But since pip's resolver only spits out
160 # three kinds of identifiers: normalized PEP 503 names, normalized names
161 # plus extras, and Requires-Python, we can cheat a bit here.
162 name, open_bracket, _ = identifier.partition("[")
163 if open_bracket and name in self._constraints:
164 return self._constraints[name]
165
166 return Constraint.empty()
167
168 def find_matches(
169 self,
170 identifier: str,
171 requirements: Mapping[str, Iterator[Requirement]],
172 incompatibilities: Mapping[str, Iterator[Candidate]],
173 ) -> Iterable[Candidate]:
174 def _eligible_for_upgrade(name: str) -> bool:
175 """Are upgrades allowed for this project?
176
177 This checks the upgrade strategy, and whether the project was one
178 that the user specified in the command line, in order to decide
179 whether we should upgrade if there's a newer version available.
180
181 (Note that we don't need access to the `--upgrade` flag, because
182 an upgrade strategy of "to-satisfy-only" means that `--upgrade`
183 was not specified).
184 """
185 if self._upgrade_strategy == "eager":
186 return True
187 elif self._upgrade_strategy == "only-if-needed":
188 return name in self._user_requested
189 return False
190
191 return self._factory.find_candidates(
192 identifier=identifier,
193 requirements=requirements,
194 constraint=self._get_constraint(identifier),
195 prefers_installed=(not _eligible_for_upgrade(identifier)),
196 incompatibilities=incompatibilities,
197 )
198
199 def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
200 return requirement.is_satisfied_by(candidate)
201
202 def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]:
203 with_requires = not self._ignore_dependencies
204 return [r for r in candidate.iter_dependencies(with_requires) if r is not None]
205
206 @staticmethod
207 def is_backtrack_cause(
208 identifier: str, backtrack_causes: Sequence["PreferenceInformation"]
209 ) -> bool:
210 for backtrack_cause in backtrack_causes:
211 if identifier == backtrack_cause.requirement.name:
212 return True
213 if backtrack_cause.parent and identifier == backtrack_cause.parent.name:
214 return True
215 return False
216
[end of src/pip/_internal/resolution/resolvelib/provider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pip/_internal/resolution/resolvelib/provider.py b/src/pip/_internal/resolution/resolvelib/provider.py
--- a/src/pip/_internal/resolution/resolvelib/provider.py
+++ b/src/pip/_internal/resolution/resolvelib/provider.py
@@ -1,6 +1,15 @@
import collections
import math
-from typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+ TypeVar,
+ Union,
+)
from pip._vendor.resolvelib.providers import AbstractProvider
@@ -37,6 +46,35 @@
# services to those objects (access to pip's finder and preparer).
+D = TypeVar("D")
+V = TypeVar("V")
+
+
+def _get_with_identifier(
+ mapping: Mapping[str, V],
+ identifier: str,
+ default: D,
+) -> Union[D, V]:
+ """Get item from a package name lookup mapping with a resolver identifier.
+
+ This extra logic is needed when the target mapping is keyed by package
+ name, which cannot be directly looked up with an identifier (which may
+ contain requested extras). Additional logic is added to also look up a value
+ by "cleaning up" the extras from the identifier.
+ """
+ if identifier in mapping:
+ return mapping[identifier]
+ # HACK: Theoretically we should check whether this identifier is a valid
+ # "NAME[EXTRAS]" format, and parse out the name part with packaging or
+ # some regular expression. But since pip's resolver only spits out three
+ # kinds of identifiers: normalized PEP 503 names, normalized names plus
+ # extras, and Requires-Python, we can cheat a bit here.
+ name, open_bracket, _ = identifier.partition("[")
+ if open_bracket and name in mapping:
+ return mapping[name]
+ return default
+
+
class PipProvider(_ProviderBase):
"""Pip's provider implementation for resolvelib.
@@ -150,28 +188,13 @@
identifier,
)
- def _get_constraint(self, identifier: str) -> Constraint:
- if identifier in self._constraints:
- return self._constraints[identifier]
-
- # HACK: Theoretically we should check whether this identifier is a valid
- # "NAME[EXTRAS]" format, and parse out the name part with packaging or
- # some regular expression. But since pip's resolver only spits out
- # three kinds of identifiers: normalized PEP 503 names, normalized names
- # plus extras, and Requires-Python, we can cheat a bit here.
- name, open_bracket, _ = identifier.partition("[")
- if open_bracket and name in self._constraints:
- return self._constraints[name]
-
- return Constraint.empty()
-
def find_matches(
self,
identifier: str,
requirements: Mapping[str, Iterator[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
) -> Iterable[Candidate]:
- def _eligible_for_upgrade(name: str) -> bool:
+ def _eligible_for_upgrade(identifier: str) -> bool:
"""Are upgrades allowed for this project?
This checks the upgrade strategy, and whether the project was one
@@ -185,13 +208,23 @@
if self._upgrade_strategy == "eager":
return True
elif self._upgrade_strategy == "only-if-needed":
- return name in self._user_requested
+ user_order = _get_with_identifier(
+ self._user_requested,
+ identifier,
+ default=None,
+ )
+ return user_order is not None
return False
+ constraint = _get_with_identifier(
+ self._constraints,
+ identifier,
+ default=Constraint.empty(),
+ )
return self._factory.find_candidates(
identifier=identifier,
requirements=requirements,
- constraint=self._get_constraint(identifier),
+ constraint=constraint,
prefers_installed=(not _eligible_for_upgrade(identifier)),
incompatibilities=incompatibilities,
)
| {"golden_diff": "diff --git a/src/pip/_internal/resolution/resolvelib/provider.py b/src/pip/_internal/resolution/resolvelib/provider.py\n--- a/src/pip/_internal/resolution/resolvelib/provider.py\n+++ b/src/pip/_internal/resolution/resolvelib/provider.py\n@@ -1,6 +1,15 @@\n import collections\n import math\n-from typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union\n+from typing import (\n+ TYPE_CHECKING,\n+ Dict,\n+ Iterable,\n+ Iterator,\n+ Mapping,\n+ Sequence,\n+ TypeVar,\n+ Union,\n+)\n \n from pip._vendor.resolvelib.providers import AbstractProvider\n \n@@ -37,6 +46,35 @@\n # services to those objects (access to pip's finder and preparer).\n \n \n+D = TypeVar(\"D\")\n+V = TypeVar(\"V\")\n+\n+\n+def _get_with_identifier(\n+ mapping: Mapping[str, V],\n+ identifier: str,\n+ default: D,\n+) -> Union[D, V]:\n+ \"\"\"Get item from a package name lookup mapping with a resolver identifier.\n+\n+ This extra logic is needed when the target mapping is keyed by package\n+ name, which cannot be directly looked up with an identifier (which may\n+ contain requested extras). Additional logic is added to also look up a value\n+ by \"cleaning up\" the extras from the identifier.\n+ \"\"\"\n+ if identifier in mapping:\n+ return mapping[identifier]\n+ # HACK: Theoretically we should check whether this identifier is a valid\n+ # \"NAME[EXTRAS]\" format, and parse out the name part with packaging or\n+ # some regular expression. But since pip's resolver only spits out three\n+ # kinds of identifiers: normalized PEP 503 names, normalized names plus\n+ # extras, and Requires-Python, we can cheat a bit here.\n+ name, open_bracket, _ = identifier.partition(\"[\")\n+ if open_bracket and name in mapping:\n+ return mapping[name]\n+ return default\n+\n+\n class PipProvider(_ProviderBase):\n \"\"\"Pip's provider implementation for resolvelib.\n \n@@ -150,28 +188,13 @@\n identifier,\n )\n \n- def _get_constraint(self, identifier: str) -> Constraint:\n- if identifier in self._constraints:\n- return self._constraints[identifier]\n-\n- # HACK: Theoretically we should check whether this identifier is a valid\n- # \"NAME[EXTRAS]\" format, and parse out the name part with packaging or\n- # some regular expression. But since pip's resolver only spits out\n- # three kinds of identifiers: normalized PEP 503 names, normalized names\n- # plus extras, and Requires-Python, we can cheat a bit here.\n- name, open_bracket, _ = identifier.partition(\"[\")\n- if open_bracket and name in self._constraints:\n- return self._constraints[name]\n-\n- return Constraint.empty()\n-\n def find_matches(\n self,\n identifier: str,\n requirements: Mapping[str, Iterator[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n ) -> Iterable[Candidate]:\n- def _eligible_for_upgrade(name: str) -> bool:\n+ def _eligible_for_upgrade(identifier: str) -> bool:\n \"\"\"Are upgrades allowed for this project?\n \n This checks the upgrade strategy, and whether the project was one\n@@ -185,13 +208,23 @@\n if self._upgrade_strategy == \"eager\":\n return True\n elif self._upgrade_strategy == \"only-if-needed\":\n- return name in self._user_requested\n+ user_order = _get_with_identifier(\n+ self._user_requested,\n+ identifier,\n+ default=None,\n+ )\n+ return user_order is not None\n return False\n \n+ constraint = _get_with_identifier(\n+ self._constraints,\n+ identifier,\n+ default=Constraint.empty(),\n+ )\n return self._factory.find_candidates(\n identifier=identifier,\n requirements=requirements,\n- constraint=self._get_constraint(identifier),\n+ constraint=constraint,\n prefers_installed=(not _eligible_for_upgrade(identifier)),\n incompatibilities=incompatibilities,\n )\n", "issue": "Package left un-upgraded with `pip install --upgrade`\n### Description\r\n\r\nWhen upgrading both `datasets` and `fsspec` in an environment, only `datasets` was upgraded to the latest version even though there was nothing constraining `fsspec` from being updated. `fsspec` is a dependency of `datasets` (`fsspec[http]>=2021.05.0` is specified in its [`setup.py`](https://github.com/huggingface/datasets/blob/1.14.0/setup.py)), and from looking at the resolver's debug output, it seems that pip is preferring to use the pre-installed version of `fsspec` instead of the newer one.\r\n\r\nThe documentation for `--upgrade` says, \"Upgrade all specified packages to the newest available version.\" Unless I'm misunderstanding, all packages explicitly listed on the command line should have been updated if possible.\r\n\r\n### Expected behavior\r\n\r\n`fsspec` should be upgraded to `2021.10.1`.\r\n\r\n### pip version\r\n\r\n21.3.1\r\n\r\n### Python version\r\n\r\n3.7.12\r\n\r\n### OS\r\n\r\nmacOS 10.15.7\r\n\r\n### How to Reproduce\r\n\r\n1. Create a file named `pip_fsspec_test.sh` with the following contents:\r\n\r\n```bash\r\n#!/usr/bin/env bash\r\n\r\nset -euxo pipefail\r\n\r\n# Prepare environment\r\npython --version\r\npip install -U pip setuptools wheel\r\npip list\r\n\r\n# Install outdated packages\r\npip install datasets==1.13.3 fsspec==2021.10.0\r\npip list --outdated\r\n\r\n# Attempt to upgrade both packages\r\npip install -U 'datasets<=1.14.0' 'fsspec<=2021.10.1'\r\npip list --outdated\r\n```\r\n\r\n2. Run `bash pip_fsspec_test.sh &> pip_fsspec_log.txt` in a new environment to create [pip_fsspec_log.txt](https://github.com/pypa/pip/files/7402239/pip_fsspec_log.txt).\r\n3. Change the penultimate line of the script to `PIP_RESOLVER_DEBUG=1 pip -vvv install -U 'datasets<=1.14.0' 'fsspec<=2021.10.1'`.\r\n4. Run `bash pip_fsspec_test.sh &> pip_fsspec_verbose_log.txt` in a new environment to create [pip_fsspec_verbose_log.txt](https://github.com/pypa/pip/files/7402240/pip_fsspec_verbose_log.txt).\r\n\r\n### Output\r\n\r\n_See files under \"How to Reproduce\"._\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the [PSF Code of Conduct](https://www.python.org/psf/conduct/).\n", "before_files": [{"content": "import collections\nimport math\nfrom typing import TYPE_CHECKING, Dict, Iterable, Iterator, Mapping, Sequence, Union\n\nfrom pip._vendor.resolvelib.providers import AbstractProvider\n\nfrom .base import Candidate, Constraint, Requirement\nfrom .candidates import REQUIRES_PYTHON_IDENTIFIER\nfrom .factory import Factory\n\nif TYPE_CHECKING:\n from pip._vendor.resolvelib.providers import Preference\n from pip._vendor.resolvelib.resolvers import RequirementInformation\n\n PreferenceInformation = RequirementInformation[Requirement, Candidate]\n\n _ProviderBase = AbstractProvider[Requirement, Candidate, str]\nelse:\n _ProviderBase = AbstractProvider\n\n# Notes on the relationship between the provider, the factory, and the\n# candidate and requirement classes.\n#\n# The provider is a direct implementation of the resolvelib class. Its role\n# is to deliver the API that resolvelib expects.\n#\n# Rather than work with completely abstract \"requirement\" and \"candidate\"\n# concepts as resolvelib does, pip has concrete classes implementing these two\n# ideas. The API of Requirement and Candidate objects are defined in the base\n# classes, but essentially map fairly directly to the equivalent provider\n# methods. In particular, `find_matches` and `is_satisfied_by` are\n# requirement methods, and `get_dependencies` is a candidate method.\n#\n# The factory is the interface to pip's internal mechanisms. It is stateless,\n# and is created by the resolver and held as a property of the provider. It is\n# responsible for creating Requirement and Candidate objects, and provides\n# services to those objects (access to pip's finder and preparer).\n\n\nclass PipProvider(_ProviderBase):\n \"\"\"Pip's provider implementation for resolvelib.\n\n :params constraints: A mapping of constraints specified by the user. Keys\n are canonicalized project names.\n :params ignore_dependencies: Whether the user specified ``--no-deps``.\n :params upgrade_strategy: The user-specified upgrade strategy.\n :params user_requested: A set of canonicalized package names that the user\n supplied for pip to install/upgrade.\n \"\"\"\n\n def __init__(\n self,\n factory: Factory,\n constraints: Dict[str, Constraint],\n ignore_dependencies: bool,\n upgrade_strategy: str,\n user_requested: Dict[str, int],\n ) -> None:\n self._factory = factory\n self._constraints = constraints\n self._ignore_dependencies = ignore_dependencies\n self._upgrade_strategy = upgrade_strategy\n self._user_requested = user_requested\n self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf)\n\n def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:\n return requirement_or_candidate.name\n\n def get_preference( # type: ignore\n self,\n identifier: str,\n resolutions: Mapping[str, Candidate],\n candidates: Mapping[str, Iterator[Candidate]],\n information: Mapping[str, Iterable[\"PreferenceInformation\"]],\n backtrack_causes: Sequence[\"PreferenceInformation\"],\n ) -> \"Preference\":\n \"\"\"Produce a sort key for given requirement based on preference.\n\n The lower the return value is, the more preferred this group of\n arguments is.\n\n Currently pip considers the followings in order:\n\n * Prefer if any of the known requirements is \"direct\", e.g. points to an\n explicit URL.\n * If equal, prefer if any requirement is \"pinned\", i.e. contains\n operator ``===`` or ``==``.\n * If equal, calculate an approximate \"depth\" and resolve requirements\n closer to the user-specified requirements first.\n * Order user-specified requirements by the order they are specified.\n * If equal, prefers \"non-free\" requirements, i.e. contains at least one\n operator, such as ``>=`` or ``<``.\n * If equal, order alphabetically for consistency (helps debuggability).\n \"\"\"\n lookups = (r.get_candidate_lookup() for r, _ in information[identifier])\n candidate, ireqs = zip(*lookups)\n operators = [\n specifier.operator\n for specifier_set in (ireq.specifier for ireq in ireqs if ireq)\n for specifier in specifier_set\n ]\n\n direct = candidate is not None\n pinned = any(op[:2] == \"==\" for op in operators)\n unfree = bool(operators)\n\n try:\n requested_order: Union[int, float] = self._user_requested[identifier]\n except KeyError:\n requested_order = math.inf\n parent_depths = (\n self._known_depths[parent.name] if parent is not None else 0.0\n for _, parent in information[identifier]\n )\n inferred_depth = min(d for d in parent_depths) + 1.0\n else:\n inferred_depth = 1.0\n self._known_depths[identifier] = inferred_depth\n\n requested_order = self._user_requested.get(identifier, math.inf)\n\n # Requires-Python has only one candidate and the check is basically\n # free, so we always do it first to avoid needless work if it fails.\n requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER\n\n # HACK: Setuptools have a very long and solid backward compatibility\n # track record, and extremely few projects would request a narrow,\n # non-recent version range of it since that would break a lot things.\n # (Most projects specify it only to request for an installer feature,\n # which does not work, but that's another topic.) Intentionally\n # delaying Setuptools helps reduce branches the resolver has to check.\n # This serves as a temporary fix for issues like \"apache-airflow[all]\"\n # while we work on \"proper\" branch pruning techniques.\n delay_this = identifier == \"setuptools\"\n\n # Prefer the causes of backtracking on the assumption that the problem\n # resolving the dependency tree is related to the failures that caused\n # the backtracking\n backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes)\n\n return (\n not requires_python,\n delay_this,\n not direct,\n not pinned,\n not backtrack_cause,\n inferred_depth,\n requested_order,\n not unfree,\n identifier,\n )\n\n def _get_constraint(self, identifier: str) -> Constraint:\n if identifier in self._constraints:\n return self._constraints[identifier]\n\n # HACK: Theoretically we should check whether this identifier is a valid\n # \"NAME[EXTRAS]\" format, and parse out the name part with packaging or\n # some regular expression. But since pip's resolver only spits out\n # three kinds of identifiers: normalized PEP 503 names, normalized names\n # plus extras, and Requires-Python, we can cheat a bit here.\n name, open_bracket, _ = identifier.partition(\"[\")\n if open_bracket and name in self._constraints:\n return self._constraints[name]\n\n return Constraint.empty()\n\n def find_matches(\n self,\n identifier: str,\n requirements: Mapping[str, Iterator[Requirement]],\n incompatibilities: Mapping[str, Iterator[Candidate]],\n ) -> Iterable[Candidate]:\n def _eligible_for_upgrade(name: str) -> bool:\n \"\"\"Are upgrades allowed for this project?\n\n This checks the upgrade strategy, and whether the project was one\n that the user specified in the command line, in order to decide\n whether we should upgrade if there's a newer version available.\n\n (Note that we don't need access to the `--upgrade` flag, because\n an upgrade strategy of \"to-satisfy-only\" means that `--upgrade`\n was not specified).\n \"\"\"\n if self._upgrade_strategy == \"eager\":\n return True\n elif self._upgrade_strategy == \"only-if-needed\":\n return name in self._user_requested\n return False\n\n return self._factory.find_candidates(\n identifier=identifier,\n requirements=requirements,\n constraint=self._get_constraint(identifier),\n prefers_installed=(not _eligible_for_upgrade(identifier)),\n incompatibilities=incompatibilities,\n )\n\n def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:\n return requirement.is_satisfied_by(candidate)\n\n def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]:\n with_requires = not self._ignore_dependencies\n return [r for r in candidate.iter_dependencies(with_requires) if r is not None]\n\n @staticmethod\n def is_backtrack_cause(\n identifier: str, backtrack_causes: Sequence[\"PreferenceInformation\"]\n ) -> bool:\n for backtrack_cause in backtrack_causes:\n if identifier == backtrack_cause.requirement.name:\n return True\n if backtrack_cause.parent and identifier == backtrack_cause.parent.name:\n return True\n return False\n", "path": "src/pip/_internal/resolution/resolvelib/provider.py"}]} | 3,612 | 956 |
gh_patches_debug_22526 | rasdani/github-patches | git_diff | ethereum__web3.py-578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IPCProvider timeout paramater
* Version: 3.13.5
* Python: 2.7
* OS: Ubuntu 16.04
### What was wrong?
A request has timed out.
Please include any of the following that are applicable:
* The code which produced the error
`web3.manager.request_blocking('parity_pendingTransactions', [])`
* The full output of the error
`
Traceback (most recent call last):
File "/home/user/clone/mempool/eth.py", line 78, in get_mempool
'parity_pendingTransactions', []
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/manager.py", line 112, in request_blocking
response = self._make_request(method, params)
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/manager.py", line 95, in _make_request
return make_request_fn(method, params)
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/middleware/attrdict.py", line 20, in middleware
response = make_request(method, params)
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/middleware/formatting.py", line 25, in middleware
response = make_request(method, params)
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/providers/ipc.py", line 103, in make_request
timeout.sleep(0)
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/utils/compat/compat_stdlib.py", line 74, in sleep
self.check()
File "/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/utils/compat/compat_stdlib.py", line 67, in check
raise self
Timeout: 10 seconds`
* What type of node you were connecting to
Parity
### How can it be fixed?
Add timeout parameter to IPCProvider class to be able to retry sooner, when a timeout occurs. Right know 10 seconds timeout is hardcoded: https://github.com/pipermerriam/web3.py/blob/master/web3/providers/ipc.py
</issue>
<code>
[start of web3/providers/ipc.py]
1 import os
2 import socket
3 import sys
4 import threading
5
6 try:
7 from json import JSONDecodeError
8 except ImportError:
9 JSONDecodeError = ValueError
10
11 from web3.utils.threads import (
12 Timeout,
13 )
14
15 from .base import JSONBaseProvider
16
17
18 def get_ipc_socket(ipc_path, timeout=0.1):
19 if sys.platform == 'win32':
20 # On Windows named pipe is used. Simulate socket with it.
21 from web3.utils.windows import NamedPipe
22
23 return NamedPipe(ipc_path)
24 else:
25 sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
26 sock.connect(ipc_path)
27 sock.settimeout(timeout)
28 return sock
29
30
31 class PersistantSocket(object):
32 sock = None
33
34 def __init__(self, ipc_path):
35 self.ipc_path = ipc_path
36
37 def __enter__(self):
38 if not self.ipc_path:
39 raise FileNotFoundError("cannot connect to IPC socket at path: %r" % self.ipc_path)
40
41 if not self.sock:
42 self.sock = self._open()
43 return self.sock
44
45 def __exit__(self, exc_type, exc_value, traceback):
46 # only close the socket if there was an error
47 if exc_value is not None:
48 try:
49 self.sock.close()
50 except Exception:
51 pass
52 self.sock = None
53
54 def _open(self):
55 return get_ipc_socket(self.ipc_path)
56
57 def reset(self):
58 self.sock.close()
59 self.sock = self._open()
60 return self.sock
61
62
63 def get_default_ipc_path(testnet=False):
64 if testnet:
65 testnet = "testnet"
66 else:
67 testnet = ""
68
69 if sys.platform == 'darwin':
70 ipc_path = os.path.expanduser(os.path.join(
71 "~",
72 "Library",
73 "Ethereum",
74 testnet,
75 "geth.ipc"
76 ))
77 if os.path.exists(ipc_path):
78 return ipc_path
79
80 ipc_path = os.path.expanduser(os.path.join(
81 "~",
82 "Library",
83 "Application Support",
84 "io.parity.ethereum",
85 "jsonrpc.ipc"
86 ))
87 if os.path.exists(ipc_path):
88 return ipc_path
89
90 elif sys.platform.startswith('linux'):
91 ipc_path = os.path.expanduser(os.path.join(
92 "~",
93 ".ethereum",
94 testnet,
95 "geth.ipc"
96 ))
97 if os.path.exists(ipc_path):
98 return ipc_path
99
100 ipc_path = os.path.expanduser(os.path.join(
101 "~",
102 ".local",
103 "share",
104 "io.parity.ethereum",
105 "jsonrpc.ipc"
106 ))
107 if os.path.exists(ipc_path):
108 return ipc_path
109
110 elif sys.platform == 'win32':
111 ipc_path = os.path.join(
112 "\\\\",
113 ".",
114 "pipe",
115 "geth.ipc"
116 )
117 if os.path.exists(ipc_path):
118 return ipc_path
119
120 ipc_path = os.path.join(
121 "\\\\",
122 ".",
123 "pipe",
124 "jsonrpc.ipc"
125 )
126 if os.path.exists(ipc_path):
127 return ipc_path
128
129 else:
130 raise ValueError(
131 "Unsupported platform '{0}'. Only darwin/linux2/win32 are "
132 "supported. You must specify the ipc_path".format(sys.platform)
133 )
134
135
136 class IPCProvider(JSONBaseProvider):
137 _socket = None
138
139 def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):
140 if ipc_path is None:
141 self.ipc_path = get_default_ipc_path(testnet)
142 else:
143 self.ipc_path = ipc_path
144
145 self._lock = threading.Lock()
146 self._socket = PersistantSocket(self.ipc_path)
147 super(IPCProvider, self).__init__(*args, **kwargs)
148
149 def make_request(self, method, params):
150 request = self.encode_rpc_request(method, params)
151
152 with self._lock, self._socket as sock:
153 try:
154 sock.sendall(request)
155 except BrokenPipeError:
156 # one extra attempt, then give up
157 sock = self._socket.reset()
158 sock.sendall(request)
159
160 raw_response = b""
161 with Timeout(10) as timeout:
162 while True:
163 try:
164 raw_response += sock.recv(4096)
165 except socket.timeout:
166 timeout.sleep(0)
167 continue
168 if raw_response == b"":
169 timeout.sleep(0)
170 else:
171 try:
172 response = self.decode_rpc_response(raw_response)
173 except JSONDecodeError:
174 timeout.sleep(0)
175 continue
176 else:
177 return response
178
[end of web3/providers/ipc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py
--- a/web3/providers/ipc.py
+++ b/web3/providers/ipc.py
@@ -136,12 +136,13 @@
class IPCProvider(JSONBaseProvider):
_socket = None
- def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):
+ def __init__(self, ipc_path=None, testnet=False, timeout=10, *args, **kwargs):
if ipc_path is None:
self.ipc_path = get_default_ipc_path(testnet)
else:
self.ipc_path = ipc_path
+ self.timeout = timeout
self._lock = threading.Lock()
self._socket = PersistantSocket(self.ipc_path)
super(IPCProvider, self).__init__(*args, **kwargs)
@@ -158,7 +159,7 @@
sock.sendall(request)
raw_response = b""
- with Timeout(10) as timeout:
+ with Timeout(self.timeout) as timeout:
while True:
try:
raw_response += sock.recv(4096)
| {"golden_diff": "diff --git a/web3/providers/ipc.py b/web3/providers/ipc.py\n--- a/web3/providers/ipc.py\n+++ b/web3/providers/ipc.py\n@@ -136,12 +136,13 @@\n class IPCProvider(JSONBaseProvider):\n _socket = None\n \n- def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):\n+ def __init__(self, ipc_path=None, testnet=False, timeout=10, *args, **kwargs):\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path(testnet)\n else:\n self.ipc_path = ipc_path\n \n+ self.timeout = timeout\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super(IPCProvider, self).__init__(*args, **kwargs)\n@@ -158,7 +159,7 @@\n sock.sendall(request)\n \n raw_response = b\"\"\n- with Timeout(10) as timeout:\n+ with Timeout(self.timeout) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n", "issue": "IPCProvider timeout paramater\n* Version: 3.13.5\r\n* Python: 2.7\r\n* OS: Ubuntu 16.04\r\n\r\n### What was wrong?\r\nA request has timed out.\r\n\r\nPlease include any of the following that are applicable:\r\n* The code which produced the error\r\n`web3.manager.request_blocking('parity_pendingTransactions', [])`\r\n* The full output of the error\r\n`\r\nTraceback (most recent call last):\r\n File \"/home/user/clone/mempool/eth.py\", line 78, in get_mempool\r\n 'parity_pendingTransactions', []\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/manager.py\", line 112, in request_blocking\r\n response = self._make_request(method, params)\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/manager.py\", line 95, in _make_request\r\n return make_request_fn(method, params)\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/middleware/attrdict.py\", line 20, in middleware\r\n response = make_request(method, params)\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/middleware/formatting.py\", line 25, in middleware\r\n response = make_request(method, params)\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/providers/ipc.py\", line 103, in make_request\r\n timeout.sleep(0)\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/utils/compat/compat_stdlib.py\", line 74, in sleep\r\n self.check()\r\n File \"/home/user/.virtualenvs/prune/local/lib/python2.7/site-packages/web3/utils/compat/compat_stdlib.py\", line 67, in check\r\n raise self\r\nTimeout: 10 seconds`\r\n* What type of node you were connecting to\r\nParity\r\n\r\n### How can it be fixed?\r\nAdd timeout parameter to IPCProvider class to be able to retry sooner, when a timeout occurs. Right know 10 seconds timeout is hardcoded: https://github.com/pipermerriam/web3.py/blob/master/web3/providers/ipc.py\r\n\n", "before_files": [{"content": "import os\nimport socket\nimport sys\nimport threading\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\nfrom web3.utils.threads import (\n Timeout,\n)\n\nfrom .base import JSONBaseProvider\n\n\ndef get_ipc_socket(ipc_path, timeout=0.1):\n if sys.platform == 'win32':\n # On Windows named pipe is used. Simulate socket with it.\n from web3.utils.windows import NamedPipe\n\n return NamedPipe(ipc_path)\n else:\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(ipc_path)\n sock.settimeout(timeout)\n return sock\n\n\nclass PersistantSocket(object):\n sock = None\n\n def __init__(self, ipc_path):\n self.ipc_path = ipc_path\n\n def __enter__(self):\n if not self.ipc_path:\n raise FileNotFoundError(\"cannot connect to IPC socket at path: %r\" % self.ipc_path)\n\n if not self.sock:\n self.sock = self._open()\n return self.sock\n\n def __exit__(self, exc_type, exc_value, traceback):\n # only close the socket if there was an error\n if exc_value is not None:\n try:\n self.sock.close()\n except Exception:\n pass\n self.sock = None\n\n def _open(self):\n return get_ipc_socket(self.ipc_path)\n\n def reset(self):\n self.sock.close()\n self.sock = self._open()\n return self.sock\n\n\ndef get_default_ipc_path(testnet=False):\n if testnet:\n testnet = \"testnet\"\n else:\n testnet = \"\"\n\n if sys.platform == 'darwin':\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \"Library\",\n \"Application Support\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform.startswith('linux'):\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".ethereum\",\n testnet,\n \"geth.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.expanduser(os.path.join(\n \"~\",\n \".local\",\n \"share\",\n \"io.parity.ethereum\",\n \"jsonrpc.ipc\"\n ))\n if os.path.exists(ipc_path):\n return ipc_path\n\n elif sys.platform == 'win32':\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"geth.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n ipc_path = os.path.join(\n \"\\\\\\\\\",\n \".\",\n \"pipe\",\n \"jsonrpc.ipc\"\n )\n if os.path.exists(ipc_path):\n return ipc_path\n\n else:\n raise ValueError(\n \"Unsupported platform '{0}'. Only darwin/linux2/win32 are \"\n \"supported. You must specify the ipc_path\".format(sys.platform)\n )\n\n\nclass IPCProvider(JSONBaseProvider):\n _socket = None\n\n def __init__(self, ipc_path=None, testnet=False, *args, **kwargs):\n if ipc_path is None:\n self.ipc_path = get_default_ipc_path(testnet)\n else:\n self.ipc_path = ipc_path\n\n self._lock = threading.Lock()\n self._socket = PersistantSocket(self.ipc_path)\n super(IPCProvider, self).__init__(*args, **kwargs)\n\n def make_request(self, method, params):\n request = self.encode_rpc_request(method, params)\n\n with self._lock, self._socket as sock:\n try:\n sock.sendall(request)\n except BrokenPipeError:\n # one extra attempt, then give up\n sock = self._socket.reset()\n sock.sendall(request)\n\n raw_response = b\"\"\n with Timeout(10) as timeout:\n while True:\n try:\n raw_response += sock.recv(4096)\n except socket.timeout:\n timeout.sleep(0)\n continue\n if raw_response == b\"\":\n timeout.sleep(0)\n else:\n try:\n response = self.decode_rpc_response(raw_response)\n except JSONDecodeError:\n timeout.sleep(0)\n continue\n else:\n return response\n", "path": "web3/providers/ipc.py"}]} | 2,491 | 256 |
gh_patches_debug_13190 | rasdani/github-patches | git_diff | tornadoweb__tornado-1821 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TCP connection fails on systems without IPv6 support when connecting to a host that has an IPv6 address
`getaddrinfo` will return IPv6 addresses when the system doesn't support it, and Tornado will then fail to connect to the IPv6 address with this error (I get [a report for my project here](https://github.com/lilydjwg/nvchecker/issues/41)):
```
OSError: [Errno 97] Address family not supported by protocol
```
I see in #593 `AI_ADDRCONFIG` was removed from `bind_sockets`, and in #823 Tornado just ignores the unsupported protocol. But this is about `connect`, not `bind`. What do you think, to use `AI_ADDRCONFIG` or just ignore the error? There's currently no way to pass the flags to resolvers though.
</issue>
<code>
[start of tornado/tcpclient.py]
1 #!/usr/bin/env python
2 #
3 # Copyright 2014 Facebook
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 # not use this file except in compliance with the License. You may obtain
7 # a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 # License for the specific language governing permissions and limitations
15 # under the License.
16
17 """A non-blocking TCP connection factory.
18 """
19 from __future__ import absolute_import, division, print_function, with_statement
20
21 import functools
22 import socket
23
24 from tornado.concurrent import Future
25 from tornado.ioloop import IOLoop
26 from tornado.iostream import IOStream
27 from tornado import gen
28 from tornado.netutil import Resolver
29
30 _INITIAL_CONNECT_TIMEOUT = 0.3
31
32
33 class _Connector(object):
34 """A stateless implementation of the "Happy Eyeballs" algorithm.
35
36 "Happy Eyeballs" is documented in RFC6555 as the recommended practice
37 for when both IPv4 and IPv6 addresses are available.
38
39 In this implementation, we partition the addresses by family, and
40 make the first connection attempt to whichever address was
41 returned first by ``getaddrinfo``. If that connection fails or
42 times out, we begin a connection in parallel to the first address
43 of the other family. If there are additional failures we retry
44 with other addresses, keeping one connection attempt per family
45 in flight at a time.
46
47 http://tools.ietf.org/html/rfc6555
48
49 """
50 def __init__(self, addrinfo, io_loop, connect):
51 self.io_loop = io_loop
52 self.connect = connect
53
54 self.future = Future()
55 self.timeout = None
56 self.last_error = None
57 self.remaining = len(addrinfo)
58 self.primary_addrs, self.secondary_addrs = self.split(addrinfo)
59
60 @staticmethod
61 def split(addrinfo):
62 """Partition the ``addrinfo`` list by address family.
63
64 Returns two lists. The first list contains the first entry from
65 ``addrinfo`` and all others with the same family, and the
66 second list contains all other addresses (normally one list will
67 be AF_INET and the other AF_INET6, although non-standard resolvers
68 may return additional families).
69 """
70 primary = []
71 secondary = []
72 primary_af = addrinfo[0][0]
73 for af, addr in addrinfo:
74 if af == primary_af:
75 primary.append((af, addr))
76 else:
77 secondary.append((af, addr))
78 return primary, secondary
79
80 def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):
81 self.try_connect(iter(self.primary_addrs))
82 self.set_timout(timeout)
83 return self.future
84
85 def try_connect(self, addrs):
86 try:
87 af, addr = next(addrs)
88 except StopIteration:
89 # We've reached the end of our queue, but the other queue
90 # might still be working. Send a final error on the future
91 # only when both queues are finished.
92 if self.remaining == 0 and not self.future.done():
93 self.future.set_exception(self.last_error or
94 IOError("connection failed"))
95 return
96 future = self.connect(af, addr)
97 future.add_done_callback(functools.partial(self.on_connect_done,
98 addrs, af, addr))
99
100 def on_connect_done(self, addrs, af, addr, future):
101 self.remaining -= 1
102 try:
103 stream = future.result()
104 except Exception as e:
105 if self.future.done():
106 return
107 # Error: try again (but remember what happened so we have an
108 # error to raise in the end)
109 self.last_error = e
110 self.try_connect(addrs)
111 if self.timeout is not None:
112 # If the first attempt failed, don't wait for the
113 # timeout to try an address from the secondary queue.
114 self.io_loop.remove_timeout(self.timeout)
115 self.on_timeout()
116 return
117 self.clear_timeout()
118 if self.future.done():
119 # This is a late arrival; just drop it.
120 stream.close()
121 else:
122 self.future.set_result((af, addr, stream))
123
124 def set_timout(self, timeout):
125 self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,
126 self.on_timeout)
127
128 def on_timeout(self):
129 self.timeout = None
130 self.try_connect(iter(self.secondary_addrs))
131
132 def clear_timeout(self):
133 if self.timeout is not None:
134 self.io_loop.remove_timeout(self.timeout)
135
136
137 class TCPClient(object):
138 """A non-blocking TCP connection factory.
139
140 .. versionchanged:: 4.1
141 The ``io_loop`` argument is deprecated.
142 """
143 def __init__(self, resolver=None, io_loop=None):
144 self.io_loop = io_loop or IOLoop.current()
145 if resolver is not None:
146 self.resolver = resolver
147 self._own_resolver = False
148 else:
149 self.resolver = Resolver(io_loop=io_loop)
150 self._own_resolver = True
151
152 def close(self):
153 if self._own_resolver:
154 self.resolver.close()
155
156 @gen.coroutine
157 def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
158 max_buffer_size=None):
159 """Connect to the given host and port.
160
161 Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
162 ``ssl_options`` is not None).
163 """
164 addrinfo = yield self.resolver.resolve(host, port, af)
165 connector = _Connector(
166 addrinfo, self.io_loop,
167 functools.partial(self._create_stream, max_buffer_size))
168 af, addr, stream = yield connector.start()
169 # TODO: For better performance we could cache the (af, addr)
170 # information here and re-use it on subsequent connections to
171 # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
172 if ssl_options is not None:
173 stream = yield stream.start_tls(False, ssl_options=ssl_options,
174 server_hostname=host)
175 raise gen.Return(stream)
176
177 def _create_stream(self, max_buffer_size, af, addr):
178 # Always connect in plaintext; we'll convert to ssl if necessary
179 # after one connection has completed.
180 stream = IOStream(socket.socket(af),
181 io_loop=self.io_loop,
182 max_buffer_size=max_buffer_size)
183 return stream.connect(addr)
184
[end of tornado/tcpclient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tornado/tcpclient.py b/tornado/tcpclient.py
--- a/tornado/tcpclient.py
+++ b/tornado/tcpclient.py
@@ -177,7 +177,13 @@
def _create_stream(self, max_buffer_size, af, addr):
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
- stream = IOStream(socket.socket(af),
- io_loop=self.io_loop,
- max_buffer_size=max_buffer_size)
- return stream.connect(addr)
+ try:
+ stream = IOStream(socket.socket(af),
+ io_loop=self.io_loop,
+ max_buffer_size=max_buffer_size)
+ except socket.error as e:
+ fu = Future()
+ fu.set_exception(e)
+ return fu
+ else:
+ return stream.connect(addr)
| {"golden_diff": "diff --git a/tornado/tcpclient.py b/tornado/tcpclient.py\n--- a/tornado/tcpclient.py\n+++ b/tornado/tcpclient.py\n@@ -177,7 +177,13 @@\n def _create_stream(self, max_buffer_size, af, addr):\n # Always connect in plaintext; we'll convert to ssl if necessary\n # after one connection has completed.\n- stream = IOStream(socket.socket(af),\n- io_loop=self.io_loop,\n- max_buffer_size=max_buffer_size)\n- return stream.connect(addr)\n+ try:\n+ stream = IOStream(socket.socket(af),\n+ io_loop=self.io_loop,\n+ max_buffer_size=max_buffer_size)\n+ except socket.error as e:\n+ fu = Future()\n+ fu.set_exception(e)\n+ return fu\n+ else:\n+ return stream.connect(addr)\n", "issue": "TCP connection fails on systems without IPv6 support when connecting to a host that has an IPv6 address\n`getaddrinfo` will return IPv6 addresses when the system doesn't support it, and Tornado will then fail to connect to the IPv6 address with this error (I get [a report for my project here](https://github.com/lilydjwg/nvchecker/issues/41)):\n\n```\nOSError: [Errno 97] Address family not supported by protocol\n```\n\nI see in #593 `AI_ADDRCONFIG` was removed from `bind_sockets`, and in #823 Tornado just ignores the unsupported protocol. But this is about `connect`, not `bind`. What do you think, to use `AI_ADDRCONFIG` or just ignore the error? There's currently no way to pass the flags to resolvers though.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# Copyright 2014 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"A non-blocking TCP connection factory.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, with_statement\n\nimport functools\nimport socket\n\nfrom tornado.concurrent import Future\nfrom tornado.ioloop import IOLoop\nfrom tornado.iostream import IOStream\nfrom tornado import gen\nfrom tornado.netutil import Resolver\n\n_INITIAL_CONNECT_TIMEOUT = 0.3\n\n\nclass _Connector(object):\n \"\"\"A stateless implementation of the \"Happy Eyeballs\" algorithm.\n\n \"Happy Eyeballs\" is documented in RFC6555 as the recommended practice\n for when both IPv4 and IPv6 addresses are available.\n\n In this implementation, we partition the addresses by family, and\n make the first connection attempt to whichever address was\n returned first by ``getaddrinfo``. If that connection fails or\n times out, we begin a connection in parallel to the first address\n of the other family. If there are additional failures we retry\n with other addresses, keeping one connection attempt per family\n in flight at a time.\n\n http://tools.ietf.org/html/rfc6555\n\n \"\"\"\n def __init__(self, addrinfo, io_loop, connect):\n self.io_loop = io_loop\n self.connect = connect\n\n self.future = Future()\n self.timeout = None\n self.last_error = None\n self.remaining = len(addrinfo)\n self.primary_addrs, self.secondary_addrs = self.split(addrinfo)\n\n @staticmethod\n def split(addrinfo):\n \"\"\"Partition the ``addrinfo`` list by address family.\n\n Returns two lists. The first list contains the first entry from\n ``addrinfo`` and all others with the same family, and the\n second list contains all other addresses (normally one list will\n be AF_INET and the other AF_INET6, although non-standard resolvers\n may return additional families).\n \"\"\"\n primary = []\n secondary = []\n primary_af = addrinfo[0][0]\n for af, addr in addrinfo:\n if af == primary_af:\n primary.append((af, addr))\n else:\n secondary.append((af, addr))\n return primary, secondary\n\n def start(self, timeout=_INITIAL_CONNECT_TIMEOUT):\n self.try_connect(iter(self.primary_addrs))\n self.set_timout(timeout)\n return self.future\n\n def try_connect(self, addrs):\n try:\n af, addr = next(addrs)\n except StopIteration:\n # We've reached the end of our queue, but the other queue\n # might still be working. Send a final error on the future\n # only when both queues are finished.\n if self.remaining == 0 and not self.future.done():\n self.future.set_exception(self.last_error or\n IOError(\"connection failed\"))\n return\n future = self.connect(af, addr)\n future.add_done_callback(functools.partial(self.on_connect_done,\n addrs, af, addr))\n\n def on_connect_done(self, addrs, af, addr, future):\n self.remaining -= 1\n try:\n stream = future.result()\n except Exception as e:\n if self.future.done():\n return\n # Error: try again (but remember what happened so we have an\n # error to raise in the end)\n self.last_error = e\n self.try_connect(addrs)\n if self.timeout is not None:\n # If the first attempt failed, don't wait for the\n # timeout to try an address from the secondary queue.\n self.io_loop.remove_timeout(self.timeout)\n self.on_timeout()\n return\n self.clear_timeout()\n if self.future.done():\n # This is a late arrival; just drop it.\n stream.close()\n else:\n self.future.set_result((af, addr, stream))\n\n def set_timout(self, timeout):\n self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout,\n self.on_timeout)\n\n def on_timeout(self):\n self.timeout = None\n self.try_connect(iter(self.secondary_addrs))\n\n def clear_timeout(self):\n if self.timeout is not None:\n self.io_loop.remove_timeout(self.timeout)\n\n\nclass TCPClient(object):\n \"\"\"A non-blocking TCP connection factory.\n\n .. versionchanged:: 4.1\n The ``io_loop`` argument is deprecated.\n \"\"\"\n def __init__(self, resolver=None, io_loop=None):\n self.io_loop = io_loop or IOLoop.current()\n if resolver is not None:\n self.resolver = resolver\n self._own_resolver = False\n else:\n self.resolver = Resolver(io_loop=io_loop)\n self._own_resolver = True\n\n def close(self):\n if self._own_resolver:\n self.resolver.close()\n\n @gen.coroutine\n def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,\n max_buffer_size=None):\n \"\"\"Connect to the given host and port.\n\n Asynchronously returns an `.IOStream` (or `.SSLIOStream` if\n ``ssl_options`` is not None).\n \"\"\"\n addrinfo = yield self.resolver.resolve(host, port, af)\n connector = _Connector(\n addrinfo, self.io_loop,\n functools.partial(self._create_stream, max_buffer_size))\n af, addr, stream = yield connector.start()\n # TODO: For better performance we could cache the (af, addr)\n # information here and re-use it on subsequent connections to\n # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)\n if ssl_options is not None:\n stream = yield stream.start_tls(False, ssl_options=ssl_options,\n server_hostname=host)\n raise gen.Return(stream)\n\n def _create_stream(self, max_buffer_size, af, addr):\n # Always connect in plaintext; we'll convert to ssl if necessary\n # after one connection has completed.\n stream = IOStream(socket.socket(af),\n io_loop=self.io_loop,\n max_buffer_size=max_buffer_size)\n return stream.connect(addr)\n", "path": "tornado/tcpclient.py"}]} | 2,616 | 189 |
gh_patches_debug_23463 | rasdani/github-patches | git_diff | elastic__apm-agent-python-312 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HTTP 400: data validation error: Problem validating JSON document against schema
Sometimes when sending an event to the APM instance a HTTP 400 response is returned:
HTTP 400: data validation error: Problem validating JSON document against schema:
I[#] S[#] doesn't validate with "error#" I[#/errors/0/log/message]
S[#/properties/errors/items/properties/log/properties/message/type]
expected string, but got array
I've also seen a similar, but subtly different message:
HTTP 400: data validation error: Problem validating JSON document against schema:
I[#] S[#] doesn't validate with "error#" I[#/errors/0/log/param_message]
S[#/properties/errors/items/properties/log/properties/param_message/type]
expected string or null, but got array
This is then followed by a log message:
Failed to submit message: '<no message value>'
I'm using the latest elastic-apm client available from PyPI (3.0.1) with Python 3.6.5 and the APM server is using version 6.4.1.
(This seems similar to #135, but not quite the same).
</issue>
<code>
[start of elasticapm/handlers/logging.py]
1 """
2 elasticapm.handlers.logging
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2011-2017 Elasticsearch
6
7 Large portions are
8 :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
9 :license: BSD, see LICENSE for more details.
10 """
11
12 from __future__ import absolute_import
13
14 import datetime
15 import logging
16 import sys
17 import traceback
18
19 from elasticapm.base import Client
20 from elasticapm.utils import compat
21 from elasticapm.utils.encoding import to_unicode
22 from elasticapm.utils.stacks import iter_stack_frames
23
24
25 class LoggingHandler(logging.Handler):
26 def __init__(self, *args, **kwargs):
27 client = kwargs.pop("client_cls", Client)
28 if len(args) == 1:
29 arg = args[0]
30 args = args[1:]
31 if isinstance(arg, Client):
32 self.client = arg
33 else:
34 raise ValueError(
35 "The first argument to %s must be a Client instance, "
36 "got %r instead." % (self.__class__.__name__, arg)
37 )
38 elif "client" in kwargs:
39 self.client = kwargs.pop("client")
40 else:
41 self.client = client(*args, **kwargs)
42
43 logging.Handler.__init__(self, level=kwargs.get("level", logging.NOTSET))
44
45 def emit(self, record):
46 self.format(record)
47
48 # Avoid typical config issues by overriding loggers behavior
49 if record.name.startswith("elasticapm.errors"):
50 sys.stderr.write(to_unicode(record.message) + "\n")
51 return
52
53 try:
54 return self._emit(record)
55 except Exception:
56 sys.stderr.write("Top level ElasticAPM exception caught - failed creating log record.\n")
57 sys.stderr.write(to_unicode(record.msg + "\n"))
58 sys.stderr.write(to_unicode(traceback.format_exc() + "\n"))
59
60 try:
61 self.client.capture("Exception")
62 except Exception:
63 pass
64
65 def _emit(self, record, **kwargs):
66 data = {}
67
68 for k, v in compat.iteritems(record.__dict__):
69 if "." not in k and k not in ("culprit",):
70 continue
71 data[k] = v
72
73 stack = getattr(record, "stack", None)
74 if stack is True:
75 stack = iter_stack_frames()
76
77 if stack:
78 frames = []
79 started = False
80 last_mod = ""
81 for item in stack:
82 if isinstance(item, (list, tuple)):
83 frame, lineno = item
84 else:
85 frame, lineno = item, item.f_lineno
86
87 if not started:
88 f_globals = getattr(frame, "f_globals", {})
89 module_name = f_globals.get("__name__", "")
90 if last_mod.startswith("logging") and not module_name.startswith("logging"):
91 started = True
92 else:
93 last_mod = module_name
94 continue
95 frames.append((frame, lineno))
96 stack = frames
97
98 custom = getattr(record, "data", {})
99 # Add in all of the data from the record that we aren't already capturing
100 for k in record.__dict__.keys():
101 if k in (
102 "stack",
103 "name",
104 "args",
105 "msg",
106 "levelno",
107 "exc_text",
108 "exc_info",
109 "data",
110 "created",
111 "levelname",
112 "msecs",
113 "relativeCreated",
114 ):
115 continue
116 if k.startswith("_"):
117 continue
118 custom[k] = record.__dict__[k]
119
120 date = datetime.datetime.utcfromtimestamp(record.created)
121
122 # If there's no exception being processed,
123 # exc_info may be a 3-tuple of None
124 # http://docs.python.org/library/sys.html#sys.exc_info
125 if record.exc_info and all(record.exc_info):
126 handler = self.client.get_handler("elasticapm.events.Exception")
127 exception = handler.capture(self.client, exc_info=record.exc_info)
128 else:
129 exception = None
130
131 return self.client.capture(
132 "Message",
133 param_message={"message": record.msg, "params": record.args},
134 stack=stack,
135 custom=custom,
136 exception=exception,
137 date=date,
138 level=record.levelno,
139 logger_name=record.name,
140 **kwargs
141 )
142
[end of elasticapm/handlers/logging.py]
[start of elasticapm/handlers/logbook.py]
1 """
2 elasticapm.handlers.logbook
3 ~~~~~~~~~~~~~~~~~~~~~~
4
5 :copyright: (c) 2011-2017 Elasticsearch
6
7 Large portions are
8 :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
9 :license: BSD, see LICENSE for more details.
10 """
11 from __future__ import absolute_import
12
13 import sys
14 import traceback
15
16 import logbook
17
18 from elasticapm.base import Client
19 from elasticapm.utils.encoding import to_unicode
20
21 LOOKBOOK_LEVELS = {
22 logbook.DEBUG: "debug",
23 logbook.INFO: "info",
24 logbook.NOTICE: "info",
25 logbook.WARNING: "warning",
26 logbook.ERROR: "error",
27 logbook.CRITICAL: "fatal",
28 }
29
30
31 class LogbookHandler(logbook.Handler):
32 def __init__(self, *args, **kwargs):
33 if len(args) == 1:
34 arg = args[0]
35 # if isinstance(arg, compat.string_types):
36 # self.client = kwargs.pop('client_cls', Client)(dsn=arg)
37 if isinstance(arg, Client):
38 self.client = arg
39 else:
40 raise ValueError(
41 "The first argument to %s must be a Client instance, "
42 "got %r instead." % (self.__class__.__name__, arg)
43 )
44 args = []
45 else:
46 try:
47 self.client = kwargs.pop("client")
48 except KeyError:
49 raise TypeError("Expected keyword argument for LoggingHandler: client")
50 super(LogbookHandler, self).__init__(*args, **kwargs)
51
52 def emit(self, record):
53 self.format(record)
54
55 # Avoid typical config issues by overriding loggers behavior
56 if record.channel.startswith("elasticapm.errors"):
57 sys.stderr.write(to_unicode(record.message + "\n"))
58 return
59
60 try:
61 return self._emit(record)
62 except Exception:
63 sys.stderr.write("Top level ElasticAPM exception caught - failed creating log record.\n")
64 sys.stderr.write(to_unicode(record.msg + "\n"))
65 sys.stderr.write(to_unicode(traceback.format_exc() + "\n"))
66
67 try:
68 self.client.capture("Exception")
69 except Exception:
70 pass
71
72 def _emit(self, record):
73 # If there's no exception being processed,
74 # exc_info may be a 3-tuple of None
75 # http://docs.python.org/library/sys.html#sys.exc_info
76 if record.exc_info is True or (record.exc_info and all(record.exc_info)):
77 handler = self.client.get_handler("elasticapm.events.Exception")
78 exception = handler.capture(self.client, exc_info=record.exc_info)
79 else:
80 exception = None
81
82 return self.client.capture_message(
83 param_message={"message": record.msg, "params": record.args},
84 exception=exception,
85 level=LOOKBOOK_LEVELS[record.level],
86 logger_name=record.channel,
87 custom=record.extra,
88 stack=record.kwargs.get("stack"),
89 )
90
[end of elasticapm/handlers/logbook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/handlers/logbook.py b/elasticapm/handlers/logbook.py
--- a/elasticapm/handlers/logbook.py
+++ b/elasticapm/handlers/logbook.py
@@ -16,6 +16,7 @@
import logbook
from elasticapm.base import Client
+from elasticapm.utils import compat
from elasticapm.utils.encoding import to_unicode
LOOKBOOK_LEVELS = {
@@ -80,7 +81,7 @@
exception = None
return self.client.capture_message(
- param_message={"message": record.msg, "params": record.args},
+ param_message={"message": compat.text_type(record.msg), "params": record.args},
exception=exception,
level=LOOKBOOK_LEVELS[record.level],
logger_name=record.channel,
diff --git a/elasticapm/handlers/logging.py b/elasticapm/handlers/logging.py
--- a/elasticapm/handlers/logging.py
+++ b/elasticapm/handlers/logging.py
@@ -130,7 +130,7 @@
return self.client.capture(
"Message",
- param_message={"message": record.msg, "params": record.args},
+ param_message={"message": compat.text_type(record.msg), "params": record.args},
stack=stack,
custom=custom,
exception=exception,
| {"golden_diff": "diff --git a/elasticapm/handlers/logbook.py b/elasticapm/handlers/logbook.py\n--- a/elasticapm/handlers/logbook.py\n+++ b/elasticapm/handlers/logbook.py\n@@ -16,6 +16,7 @@\n import logbook\n \n from elasticapm.base import Client\n+from elasticapm.utils import compat\n from elasticapm.utils.encoding import to_unicode\n \n LOOKBOOK_LEVELS = {\n@@ -80,7 +81,7 @@\n exception = None\n \n return self.client.capture_message(\n- param_message={\"message\": record.msg, \"params\": record.args},\n+ param_message={\"message\": compat.text_type(record.msg), \"params\": record.args},\n exception=exception,\n level=LOOKBOOK_LEVELS[record.level],\n logger_name=record.channel,\ndiff --git a/elasticapm/handlers/logging.py b/elasticapm/handlers/logging.py\n--- a/elasticapm/handlers/logging.py\n+++ b/elasticapm/handlers/logging.py\n@@ -130,7 +130,7 @@\n \n return self.client.capture(\n \"Message\",\n- param_message={\"message\": record.msg, \"params\": record.args},\n+ param_message={\"message\": compat.text_type(record.msg), \"params\": record.args},\n stack=stack,\n custom=custom,\n exception=exception,\n", "issue": "HTTP 400: data validation error: Problem validating JSON document against schema\nSometimes when sending an event to the APM instance a HTTP 400 response is returned:\r\n\r\n HTTP 400: data validation error: Problem validating JSON document against schema: \r\n I[#] S[#] doesn't validate with \"error#\" I[#/errors/0/log/message] \r\n S[#/properties/errors/items/properties/log/properties/message/type] \r\n expected string, but got array\r\n\r\nI've also seen a similar, but subtly different message:\r\n\r\n HTTP 400: data validation error: Problem validating JSON document against schema: \r\n I[#] S[#] doesn't validate with \"error#\" I[#/errors/0/log/param_message] \r\n S[#/properties/errors/items/properties/log/properties/param_message/type] \r\n expected string or null, but got array\r\n\r\nThis is then followed by a log message:\r\n\r\n Failed to submit message: '<no message value>'\r\n\r\nI'm using the latest elastic-apm client available from PyPI (3.0.1) with Python 3.6.5 and the APM server is using version 6.4.1.\r\n\r\n(This seems similar to #135, but not quite the same).\n", "before_files": [{"content": "\"\"\"\nelasticapm.handlers.logging\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2011-2017 Elasticsearch\n\nLarge portions are\n:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport datetime\nimport logging\nimport sys\nimport traceback\n\nfrom elasticapm.base import Client\nfrom elasticapm.utils import compat\nfrom elasticapm.utils.encoding import to_unicode\nfrom elasticapm.utils.stacks import iter_stack_frames\n\n\nclass LoggingHandler(logging.Handler):\n def __init__(self, *args, **kwargs):\n client = kwargs.pop(\"client_cls\", Client)\n if len(args) == 1:\n arg = args[0]\n args = args[1:]\n if isinstance(arg, Client):\n self.client = arg\n else:\n raise ValueError(\n \"The first argument to %s must be a Client instance, \"\n \"got %r instead.\" % (self.__class__.__name__, arg)\n )\n elif \"client\" in kwargs:\n self.client = kwargs.pop(\"client\")\n else:\n self.client = client(*args, **kwargs)\n\n logging.Handler.__init__(self, level=kwargs.get(\"level\", logging.NOTSET))\n\n def emit(self, record):\n self.format(record)\n\n # Avoid typical config issues by overriding loggers behavior\n if record.name.startswith(\"elasticapm.errors\"):\n sys.stderr.write(to_unicode(record.message) + \"\\n\")\n return\n\n try:\n return self._emit(record)\n except Exception:\n sys.stderr.write(\"Top level ElasticAPM exception caught - failed creating log record.\\n\")\n sys.stderr.write(to_unicode(record.msg + \"\\n\"))\n sys.stderr.write(to_unicode(traceback.format_exc() + \"\\n\"))\n\n try:\n self.client.capture(\"Exception\")\n except Exception:\n pass\n\n def _emit(self, record, **kwargs):\n data = {}\n\n for k, v in compat.iteritems(record.__dict__):\n if \".\" not in k and k not in (\"culprit\",):\n continue\n data[k] = v\n\n stack = getattr(record, \"stack\", None)\n if stack is True:\n stack = iter_stack_frames()\n\n if stack:\n frames = []\n started = False\n last_mod = \"\"\n for item in stack:\n if isinstance(item, (list, tuple)):\n frame, lineno = item\n else:\n frame, lineno = item, item.f_lineno\n\n if not started:\n f_globals = getattr(frame, \"f_globals\", {})\n module_name = f_globals.get(\"__name__\", \"\")\n if last_mod.startswith(\"logging\") and not module_name.startswith(\"logging\"):\n started = True\n else:\n last_mod = module_name\n continue\n frames.append((frame, lineno))\n stack = frames\n\n custom = getattr(record, \"data\", {})\n # Add in all of the data from the record that we aren't already capturing\n for k in record.__dict__.keys():\n if k in (\n \"stack\",\n \"name\",\n \"args\",\n \"msg\",\n \"levelno\",\n \"exc_text\",\n \"exc_info\",\n \"data\",\n \"created\",\n \"levelname\",\n \"msecs\",\n \"relativeCreated\",\n ):\n continue\n if k.startswith(\"_\"):\n continue\n custom[k] = record.__dict__[k]\n\n date = datetime.datetime.utcfromtimestamp(record.created)\n\n # If there's no exception being processed,\n # exc_info may be a 3-tuple of None\n # http://docs.python.org/library/sys.html#sys.exc_info\n if record.exc_info and all(record.exc_info):\n handler = self.client.get_handler(\"elasticapm.events.Exception\")\n exception = handler.capture(self.client, exc_info=record.exc_info)\n else:\n exception = None\n\n return self.client.capture(\n \"Message\",\n param_message={\"message\": record.msg, \"params\": record.args},\n stack=stack,\n custom=custom,\n exception=exception,\n date=date,\n level=record.levelno,\n logger_name=record.name,\n **kwargs\n )\n", "path": "elasticapm/handlers/logging.py"}, {"content": "\"\"\"\nelasticapm.handlers.logbook\n~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2011-2017 Elasticsearch\n\nLarge portions are\n:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport traceback\n\nimport logbook\n\nfrom elasticapm.base import Client\nfrom elasticapm.utils.encoding import to_unicode\n\nLOOKBOOK_LEVELS = {\n logbook.DEBUG: \"debug\",\n logbook.INFO: \"info\",\n logbook.NOTICE: \"info\",\n logbook.WARNING: \"warning\",\n logbook.ERROR: \"error\",\n logbook.CRITICAL: \"fatal\",\n}\n\n\nclass LogbookHandler(logbook.Handler):\n def __init__(self, *args, **kwargs):\n if len(args) == 1:\n arg = args[0]\n # if isinstance(arg, compat.string_types):\n # self.client = kwargs.pop('client_cls', Client)(dsn=arg)\n if isinstance(arg, Client):\n self.client = arg\n else:\n raise ValueError(\n \"The first argument to %s must be a Client instance, \"\n \"got %r instead.\" % (self.__class__.__name__, arg)\n )\n args = []\n else:\n try:\n self.client = kwargs.pop(\"client\")\n except KeyError:\n raise TypeError(\"Expected keyword argument for LoggingHandler: client\")\n super(LogbookHandler, self).__init__(*args, **kwargs)\n\n def emit(self, record):\n self.format(record)\n\n # Avoid typical config issues by overriding loggers behavior\n if record.channel.startswith(\"elasticapm.errors\"):\n sys.stderr.write(to_unicode(record.message + \"\\n\"))\n return\n\n try:\n return self._emit(record)\n except Exception:\n sys.stderr.write(\"Top level ElasticAPM exception caught - failed creating log record.\\n\")\n sys.stderr.write(to_unicode(record.msg + \"\\n\"))\n sys.stderr.write(to_unicode(traceback.format_exc() + \"\\n\"))\n\n try:\n self.client.capture(\"Exception\")\n except Exception:\n pass\n\n def _emit(self, record):\n # If there's no exception being processed,\n # exc_info may be a 3-tuple of None\n # http://docs.python.org/library/sys.html#sys.exc_info\n if record.exc_info is True or (record.exc_info and all(record.exc_info)):\n handler = self.client.get_handler(\"elasticapm.events.Exception\")\n exception = handler.capture(self.client, exc_info=record.exc_info)\n else:\n exception = None\n\n return self.client.capture_message(\n param_message={\"message\": record.msg, \"params\": record.args},\n exception=exception,\n level=LOOKBOOK_LEVELS[record.level],\n logger_name=record.channel,\n custom=record.extra,\n stack=record.kwargs.get(\"stack\"),\n )\n", "path": "elasticapm/handlers/logbook.py"}]} | 2,882 | 306 |
gh_patches_debug_16496 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-2096 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Do not count invalid attributes for dropped
https://github.com/open-telemetry/opentelemetry-python/blob/653207dd2181db1a766a4a703dcda78fd7703bb2/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L175-L183
Existing attribute is deleted and new attribute is counted for dropped. This should be fixed. Also we can use `.popitem()` which is optimised for deleting item on ends when we need to drop an attribute.
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # type: ignore
15
16 import logging
17 import threading
18 from collections import OrderedDict
19 from collections.abc import MutableMapping
20 from typing import Optional, Sequence, Union
21
22 from opentelemetry.util import types
23
24 # bytes are accepted as a user supplied value for attributes but
25 # decoded to strings internally.
26 _VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)
27
28
29 _logger = logging.getLogger(__name__)
30
31
32 def _clean_attribute(
33 key: str, value: types.AttributeValue, max_len: Optional[int]
34 ) -> Optional[types.AttributeValue]:
35 """Checks if attribute value is valid and cleans it if required.
36
37 The function returns the cleaned value or None if the value is not valid.
38
39 An attribute value is valid if it is either:
40 - A primitive type: string, boolean, double precision floating
41 point (IEEE 754-1985) or integer.
42 - An array of primitive type values. The array MUST be homogeneous,
43 i.e. it MUST NOT contain values of different types.
44
45 An attribute needs cleansing if:
46 - Its length is greater than the maximum allowed length.
47 - It needs to be encoded/decoded e.g, bytes to strings.
48 """
49
50 if not (key and isinstance(key, str)):
51 _logger.warning("invalid key `%s`. must be non-empty string.", key)
52 return None
53
54 if isinstance(value, _VALID_ATTR_VALUE_TYPES):
55 return _clean_attribute_value(value, max_len)
56
57 if isinstance(value, Sequence):
58 sequence_first_valid_type = None
59 cleaned_seq = []
60
61 for element in value:
62 # None is considered valid in any sequence
63 if element is None:
64 cleaned_seq.append(element)
65
66 element = _clean_attribute_value(element, max_len)
67 # reject invalid elements
68 if element is None:
69 continue
70
71 element_type = type(element)
72 # Reject attribute value if sequence contains a value with an incompatible type.
73 if element_type not in _VALID_ATTR_VALUE_TYPES:
74 _logger.warning(
75 "Invalid type %s in attribute value sequence. Expected one of "
76 "%s or None",
77 element_type.__name__,
78 [
79 valid_type.__name__
80 for valid_type in _VALID_ATTR_VALUE_TYPES
81 ],
82 )
83 return None
84
85 # The type of the sequence must be homogeneous. The first non-None
86 # element determines the type of the sequence
87 if sequence_first_valid_type is None:
88 sequence_first_valid_type = element_type
89 # use equality instead of isinstance as isinstance(True, int) evaluates to True
90 elif element_type != sequence_first_valid_type:
91 _logger.warning(
92 "Mixed types %s and %s in attribute value sequence",
93 sequence_first_valid_type.__name__,
94 type(element).__name__,
95 )
96 return None
97
98 cleaned_seq.append(element)
99
100 # Freeze mutable sequences defensively
101 return tuple(cleaned_seq)
102
103 _logger.warning(
104 "Invalid type %s for attribute value. Expected one of %s or a "
105 "sequence of those types",
106 type(value).__name__,
107 [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
108 )
109 return None
110
111
112 def _clean_attribute_value(
113 value: types.AttributeValue, limit: Optional[int]
114 ) -> Union[types.AttributeValue, None]:
115 if value is None:
116 return None
117
118 if isinstance(value, bytes):
119 try:
120 value = value.decode()
121 except UnicodeDecodeError:
122 _logger.warning("Byte attribute could not be decoded.")
123 return None
124
125 if limit is not None and isinstance(value, str):
126 value = value[:limit]
127 return value
128
129
130 class BoundedAttributes(MutableMapping):
131 """An ordered dict with a fixed max capacity.
132
133 Oldest elements are dropped when the dict is full and a new element is
134 added.
135 """
136
137 def __init__(
138 self,
139 maxlen: Optional[int] = None,
140 attributes: types.Attributes = None,
141 immutable: bool = True,
142 max_value_len: Optional[int] = None,
143 ):
144 if maxlen is not None:
145 if not isinstance(maxlen, int) or maxlen < 0:
146 raise ValueError(
147 "maxlen must be valid int greater or equal to 0"
148 )
149 self.maxlen = maxlen
150 self.dropped = 0
151 self.max_value_len = max_value_len
152 self._dict = OrderedDict() # type: OrderedDict
153 self._lock = threading.Lock() # type: threading.Lock
154 if attributes:
155 for key, value in attributes.items():
156 self[key] = value
157 self._immutable = immutable
158
159 def __repr__(self):
160 return "{}({}, maxlen={})".format(
161 type(self).__name__, dict(self._dict), self.maxlen
162 )
163
164 def __getitem__(self, key):
165 return self._dict[key]
166
167 def __setitem__(self, key, value):
168 if getattr(self, "_immutable", False):
169 raise TypeError
170 with self._lock:
171 if self.maxlen is not None and self.maxlen == 0:
172 self.dropped += 1
173 return
174
175 if key in self._dict:
176 del self._dict[key]
177 elif self.maxlen is not None and len(self._dict) == self.maxlen:
178 del self._dict[next(iter(self._dict.keys()))]
179 self.dropped += 1
180
181 value = _clean_attribute(key, value, self.max_value_len)
182 if value is not None:
183 self._dict[key] = value
184
185 def __delitem__(self, key):
186 if getattr(self, "_immutable", False):
187 raise TypeError
188 with self._lock:
189 del self._dict[key]
190
191 def __iter__(self):
192 with self._lock:
193 return iter(self._dict.copy())
194
195 def __len__(self):
196 return len(self._dict)
197
198 def copy(self):
199 return self._dict.copy()
200
[end of opentelemetry-api/src/opentelemetry/attributes/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py
@@ -172,14 +172,16 @@
self.dropped += 1
return
- if key in self._dict:
- del self._dict[key]
- elif self.maxlen is not None and len(self._dict) == self.maxlen:
- del self._dict[next(iter(self._dict.keys()))]
- self.dropped += 1
-
value = _clean_attribute(key, value, self.max_value_len)
if value is not None:
+ if key in self._dict:
+ del self._dict[key]
+ elif (
+ self.maxlen is not None and len(self._dict) == self.maxlen
+ ):
+ self._dict.popitem(last=False)
+ self.dropped += 1
+
self._dict[key] = value
def __delitem__(self, key):
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/attributes/__init__.py b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/attributes/__init__.py\n@@ -172,14 +172,16 @@\n self.dropped += 1\n return\n \n- if key in self._dict:\n- del self._dict[key]\n- elif self.maxlen is not None and len(self._dict) == self.maxlen:\n- del self._dict[next(iter(self._dict.keys()))]\n- self.dropped += 1\n-\n value = _clean_attribute(key, value, self.max_value_len)\n if value is not None:\n+ if key in self._dict:\n+ del self._dict[key]\n+ elif (\n+ self.maxlen is not None and len(self._dict) == self.maxlen\n+ ):\n+ self._dict.popitem(last=False)\n+ self.dropped += 1\n+\n self._dict[key] = value\n \n def __delitem__(self, key):\n", "issue": "Do not count invalid attributes for dropped\nhttps://github.com/open-telemetry/opentelemetry-python/blob/653207dd2181db1a766a4a703dcda78fd7703bb2/opentelemetry-api/src/opentelemetry/attributes/__init__.py#L175-L183\r\n\r\nExisting attribute is deleted and new attribute is counted for dropped. This should be fixed. Also we can use `.popitem()` which is optimised for deleting item on ends when we need to drop an attribute.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\nimport logging\nimport threading\nfrom collections import OrderedDict\nfrom collections.abc import MutableMapping\nfrom typing import Optional, Sequence, Union\n\nfrom opentelemetry.util import types\n\n# bytes are accepted as a user supplied value for attributes but\n# decoded to strings internally.\n_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)\n\n\n_logger = logging.getLogger(__name__)\n\n\ndef _clean_attribute(\n key: str, value: types.AttributeValue, max_len: Optional[int]\n) -> Optional[types.AttributeValue]:\n \"\"\"Checks if attribute value is valid and cleans it if required.\n\n The function returns the cleaned value or None if the value is not valid.\n\n An attribute value is valid if it is either:\n - A primitive type: string, boolean, double precision floating\n point (IEEE 754-1985) or integer.\n - An array of primitive type values. The array MUST be homogeneous,\n i.e. it MUST NOT contain values of different types.\n\n An attribute needs cleansing if:\n - Its length is greater than the maximum allowed length.\n - It needs to be encoded/decoded e.g, bytes to strings.\n \"\"\"\n\n if not (key and isinstance(key, str)):\n _logger.warning(\"invalid key `%s`. must be non-empty string.\", key)\n return None\n\n if isinstance(value, _VALID_ATTR_VALUE_TYPES):\n return _clean_attribute_value(value, max_len)\n\n if isinstance(value, Sequence):\n sequence_first_valid_type = None\n cleaned_seq = []\n\n for element in value:\n # None is considered valid in any sequence\n if element is None:\n cleaned_seq.append(element)\n\n element = _clean_attribute_value(element, max_len)\n # reject invalid elements\n if element is None:\n continue\n\n element_type = type(element)\n # Reject attribute value if sequence contains a value with an incompatible type.\n if element_type not in _VALID_ATTR_VALUE_TYPES:\n _logger.warning(\n \"Invalid type %s in attribute value sequence. Expected one of \"\n \"%s or None\",\n element_type.__name__,\n [\n valid_type.__name__\n for valid_type in _VALID_ATTR_VALUE_TYPES\n ],\n )\n return None\n\n # The type of the sequence must be homogeneous. The first non-None\n # element determines the type of the sequence\n if sequence_first_valid_type is None:\n sequence_first_valid_type = element_type\n # use equality instead of isinstance as isinstance(True, int) evaluates to True\n elif element_type != sequence_first_valid_type:\n _logger.warning(\n \"Mixed types %s and %s in attribute value sequence\",\n sequence_first_valid_type.__name__,\n type(element).__name__,\n )\n return None\n\n cleaned_seq.append(element)\n\n # Freeze mutable sequences defensively\n return tuple(cleaned_seq)\n\n _logger.warning(\n \"Invalid type %s for attribute value. Expected one of %s or a \"\n \"sequence of those types\",\n type(value).__name__,\n [valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],\n )\n return None\n\n\ndef _clean_attribute_value(\n value: types.AttributeValue, limit: Optional[int]\n) -> Union[types.AttributeValue, None]:\n if value is None:\n return None\n\n if isinstance(value, bytes):\n try:\n value = value.decode()\n except UnicodeDecodeError:\n _logger.warning(\"Byte attribute could not be decoded.\")\n return None\n\n if limit is not None and isinstance(value, str):\n value = value[:limit]\n return value\n\n\nclass BoundedAttributes(MutableMapping):\n \"\"\"An ordered dict with a fixed max capacity.\n\n Oldest elements are dropped when the dict is full and a new element is\n added.\n \"\"\"\n\n def __init__(\n self,\n maxlen: Optional[int] = None,\n attributes: types.Attributes = None,\n immutable: bool = True,\n max_value_len: Optional[int] = None,\n ):\n if maxlen is not None:\n if not isinstance(maxlen, int) or maxlen < 0:\n raise ValueError(\n \"maxlen must be valid int greater or equal to 0\"\n )\n self.maxlen = maxlen\n self.dropped = 0\n self.max_value_len = max_value_len\n self._dict = OrderedDict() # type: OrderedDict\n self._lock = threading.Lock() # type: threading.Lock\n if attributes:\n for key, value in attributes.items():\n self[key] = value\n self._immutable = immutable\n\n def __repr__(self):\n return \"{}({}, maxlen={})\".format(\n type(self).__name__, dict(self._dict), self.maxlen\n )\n\n def __getitem__(self, key):\n return self._dict[key]\n\n def __setitem__(self, key, value):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n if self.maxlen is not None and self.maxlen == 0:\n self.dropped += 1\n return\n\n if key in self._dict:\n del self._dict[key]\n elif self.maxlen is not None and len(self._dict) == self.maxlen:\n del self._dict[next(iter(self._dict.keys()))]\n self.dropped += 1\n\n value = _clean_attribute(key, value, self.max_value_len)\n if value is not None:\n self._dict[key] = value\n\n def __delitem__(self, key):\n if getattr(self, \"_immutable\", False):\n raise TypeError\n with self._lock:\n del self._dict[key]\n\n def __iter__(self):\n with self._lock:\n return iter(self._dict.copy())\n\n def __len__(self):\n return len(self._dict)\n\n def copy(self):\n return self._dict.copy()\n", "path": "opentelemetry-api/src/opentelemetry/attributes/__init__.py"}]} | 2,584 | 260 |
gh_patches_debug_26021 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Instrument the Django REST Framework (DRF)
Provide more detailed instrumentation of the Django REST Framework (DRF).
http://www.django-rest-framework.org/
</issue>
<code>
[start of src/scout_apm/django/middleware.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import sys
5
6 import django
7 from django.conf import settings
8
9 from scout_apm.compat import string_types
10 from scout_apm.core.config import scout_config
11 from scout_apm.core.tracked_request import TrackedRequest
12 from scout_apm.core.web_requests import (
13 create_filtered_path,
14 ignore_path,
15 track_amazon_request_queue_time,
16 track_request_queue_time,
17 )
18
19 if django.VERSION >= (1, 11):
20 from django.urls import get_urlconf
21 else:
22 from django.core.urlresolvers import get_urlconf
23
24
25 def get_operation_name(request):
26 view_func = request.resolver_match.func
27 view_name = request.resolver_match._func_path
28
29 if hasattr(view_func, "model_admin"):
30 # Seems to comes from Django admin (attribute only set on Django 1.9+)
31 admin_class = view_func.model_admin.__class__
32 view_name = (
33 admin_class.__module__
34 + "."
35 + admin_class.__name__
36 + "."
37 + view_func.__name__
38 )
39
40 # Seems to be a Tastypie Resource. Need to resort to some stack inspection
41 # to find a better name since its decorators don't wrap very well
42 if view_name == "tastypie.resources.wrapper":
43 tastypie_name = _get_tastypie_operation_name(request, view_func)
44 if tastypie_name is not None:
45 return tastypie_name
46
47 return "Controller/" + view_name
48
49
50 def _get_tastypie_operation_name(request, view_func):
51 try:
52 from tastypie.resources import Resource
53 except ImportError:
54 return None
55
56 if sys.version_info[0] == 2: # pragma: no cover
57 try:
58 wrapper = view_func.__closure__[0].cell_contents
59 except (AttributeError, IndexError):
60 return None
61 elif sys.version_info[0] == 3:
62 try:
63 wrapper = view_func.__wrapped__
64 except AttributeError:
65 return None
66
67 if not hasattr(wrapper, "__closure__") or len(wrapper.__closure__) != 2:
68 return None
69
70 instance = wrapper.__closure__[0].cell_contents
71 if not isinstance(instance, Resource): # pragma: no cover
72 return None
73
74 method_name = wrapper.__closure__[1].cell_contents
75 if not isinstance(method_name, string_types): # pragma: no cover
76 return None
77
78 if method_name.startswith("dispatch_"): # pragma: no cover
79 method_name = request.method.lower() + method_name.split("dispatch", 1)[1]
80
81 return "Controller/{}.{}.{}".format(
82 instance.__module__, instance.__class__.__name__, method_name
83 )
84
85
86 def track_request_view_data(request, tracked_request):
87 path = request.path
88 tracked_request.tag(
89 "path",
90 create_filtered_path(
91 path, [(k, v) for k, vs in request.GET.lists() for v in vs]
92 ),
93 )
94 if ignore_path(path):
95 tracked_request.tag("ignore_transaction", True)
96
97 try:
98 # Determine a remote IP to associate with the request. The value is
99 # spoofable by the requester so this is not suitable to use in any
100 # security sensitive context.
101 user_ip = (
102 request.META.get("HTTP_X_FORWARDED_FOR", "").split(",")[0]
103 or request.META.get("HTTP_CLIENT_IP", "").split(",")[0]
104 or request.META.get("REMOTE_ADDR", None)
105 )
106 tracked_request.tag("user_ip", user_ip)
107 except Exception:
108 pass
109
110 user = getattr(request, "user", None)
111 if user is not None:
112 try:
113 tracked_request.tag("username", user.get_username())
114 except Exception:
115 pass
116
117 tracked_request.tag("urlconf", get_urlconf(settings.ROOT_URLCONF))
118
119
120 class MiddlewareTimingMiddleware(object):
121 """
122 Insert as early into the Middleware stack as possible (outermost layers),
123 so that other middlewares called after can be timed.
124 """
125
126 def __init__(self, get_response):
127 self.get_response = get_response
128
129 def __call__(self, request):
130 if not scout_config.value("monitor"):
131 return self.get_response(request)
132
133 tracked_request = TrackedRequest.instance()
134
135 tracked_request.start_span(
136 operation="Middleware", should_capture_backtrace=False
137 )
138 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
139 "HTTP_X_REQUEST_START", ""
140 )
141 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
142 if not queue_time_tracked:
143 track_amazon_request_queue_time(
144 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
145 )
146
147 try:
148 return self.get_response(request)
149 finally:
150 tracked_request.stop_span()
151
152
153 class ViewTimingMiddleware(object):
154 """
155 Insert as deep into the middleware stack as possible, ideally wrapping no
156 other middleware. Designed to time the View itself
157 """
158
159 def __init__(self, get_response):
160 self.get_response = get_response
161
162 def __call__(self, request):
163 """
164 Wrap a single incoming request with start and stop calls.
165 This will start timing, but relies on the process_view callback to
166 capture more details about what view was really called, and other
167 similar info.
168
169 If process_view isn't called, then the request will not
170 be recorded. This can happen if a middleware further along the stack
171 doesn't call onward, and instead returns a response directly.
172 """
173 if not scout_config.value("monitor"):
174 return self.get_response(request)
175
176 tracked_request = TrackedRequest.instance()
177
178 # This operation name won't be recorded unless changed later in
179 # process_view
180 tracked_request.start_span(operation="Unknown", should_capture_backtrace=False)
181 try:
182 response = self.get_response(request)
183 if 500 <= response.status_code <= 599:
184 tracked_request.tag("error", "true")
185 return response
186 finally:
187 tracked_request.stop_span()
188
189 def process_view(self, request, view_func, view_args, view_kwargs):
190 """
191 Capture details about the view_func that is about to execute
192 """
193 if not scout_config.value("monitor"):
194 return
195 tracked_request = TrackedRequest.instance()
196 tracked_request.is_real_request = True
197
198 track_request_view_data(request, tracked_request)
199
200 span = tracked_request.current_span()
201 if span is not None:
202 span.operation = get_operation_name(request)
203
204 def process_exception(self, request, exception):
205 """
206 Mark this request as having errored out
207
208 Does not modify or catch or otherwise change the exception thrown
209 """
210 if not scout_config.value("monitor"):
211 return
212 TrackedRequest.instance().tag("error", "true")
213
214
215 class OldStyleMiddlewareTimingMiddleware(object):
216 """
217 Insert as early into the Middleware stack as possible (outermost layers),
218 so that other middlewares called after can be timed.
219 """
220
221 def process_request(self, request):
222 if not scout_config.value("monitor"):
223 return
224 tracked_request = TrackedRequest.instance()
225 request._scout_tracked_request = tracked_request
226
227 queue_time = request.META.get("HTTP_X_QUEUE_START") or request.META.get(
228 "HTTP_X_REQUEST_START", ""
229 )
230 queue_time_tracked = track_request_queue_time(queue_time, tracked_request)
231 if not queue_time_tracked:
232 track_amazon_request_queue_time(
233 request.META.get("HTTP_X_AMZN_TRACE_ID", ""), tracked_request
234 )
235
236 tracked_request.start_span(
237 operation="Middleware", should_capture_backtrace=False
238 )
239
240 def process_response(self, request, response):
241 # Only stop span if there's a request, but presume we are balanced,
242 # i.e. that custom instrumentation within the application is not
243 # causing errors
244 tracked_request = getattr(request, "_scout_tracked_request", None)
245 if 500 <= response.status_code <= 599:
246 tracked_request.tag("error", "true")
247 if tracked_request is not None:
248 tracked_request.stop_span()
249 return response
250
251
252 class OldStyleViewMiddleware(object):
253 def process_view(self, request, view_func, view_func_args, view_func_kwargs):
254 tracked_request = getattr(request, "_scout_tracked_request", None)
255 if tracked_request is None:
256 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
257 # don't do anything
258 return
259
260 tracked_request.is_real_request = True
261
262 track_request_view_data(request, tracked_request)
263
264 span = tracked_request.start_span(
265 operation=get_operation_name(request), should_capture_backtrace=False
266 )
267 # Save the span into the request, so we can check
268 # if we're matched up when stopping
269 request._scout_view_span = span
270
271 def process_response(self, request, response):
272 tracked_request = getattr(request, "_scout_tracked_request", None)
273 if tracked_request is None:
274 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
275 # don't do anything
276 return response
277
278 # Only stop span if we started, but presume we are balanced, i.e. that
279 # custom instrumentation within the application is not causing errors
280 span = getattr(request, "_scout_view_span", None)
281 if span is not None:
282 tracked_request.stop_span()
283 return response
284
285 def process_exception(self, request, exception):
286 tracked_request = getattr(request, "_scout_tracked_request", None)
287 if tracked_request is None:
288 # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so
289 # don't do anything
290 return
291
292 tracked_request.tag("error", "true")
293
[end of src/scout_apm/django/middleware.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py
--- a/src/scout_apm/django/middleware.py
+++ b/src/scout_apm/django/middleware.py
@@ -37,6 +37,12 @@
+ view_func.__name__
)
+ django_rest_framework_name = _get_django_rest_framework_name(
+ request, view_func, view_name
+ )
+ if django_rest_framework_name is not None:
+ return django_rest_framework_name
+
# Seems to be a Tastypie Resource. Need to resort to some stack inspection
# to find a better name since its decorators don't wrap very well
if view_name == "tastypie.resources.wrapper":
@@ -47,6 +53,28 @@
return "Controller/" + view_name
+def _get_django_rest_framework_name(request, view_func, view_name):
+ try:
+ from rest_framework.viewsets import ViewSetMixin
+ except ImportError:
+ return None
+
+ kls = getattr(view_func, "cls", None)
+ if isinstance(kls, type) and not issubclass(kls, ViewSetMixin):
+ return None
+
+ # Get 'actions' set in ViewSetMixin.as_view
+ actions = getattr(view_func, "actions", None)
+ if not actions or not isinstance(actions, dict):
+ return None
+
+ method_lower = request.method.lower()
+ if method_lower not in actions:
+ return None
+
+ return "Controller/{}.{}".format(view_name, actions[method_lower])
+
+
def _get_tastypie_operation_name(request, view_func):
try:
from tastypie.resources import Resource
| {"golden_diff": "diff --git a/src/scout_apm/django/middleware.py b/src/scout_apm/django/middleware.py\n--- a/src/scout_apm/django/middleware.py\n+++ b/src/scout_apm/django/middleware.py\n@@ -37,6 +37,12 @@\n + view_func.__name__\n )\n \n+ django_rest_framework_name = _get_django_rest_framework_name(\n+ request, view_func, view_name\n+ )\n+ if django_rest_framework_name is not None:\n+ return django_rest_framework_name\n+\n # Seems to be a Tastypie Resource. Need to resort to some stack inspection\n # to find a better name since its decorators don't wrap very well\n if view_name == \"tastypie.resources.wrapper\":\n@@ -47,6 +53,28 @@\n return \"Controller/\" + view_name\n \n \n+def _get_django_rest_framework_name(request, view_func, view_name):\n+ try:\n+ from rest_framework.viewsets import ViewSetMixin\n+ except ImportError:\n+ return None\n+\n+ kls = getattr(view_func, \"cls\", None)\n+ if isinstance(kls, type) and not issubclass(kls, ViewSetMixin):\n+ return None\n+\n+ # Get 'actions' set in ViewSetMixin.as_view\n+ actions = getattr(view_func, \"actions\", None)\n+ if not actions or not isinstance(actions, dict):\n+ return None\n+\n+ method_lower = request.method.lower()\n+ if method_lower not in actions:\n+ return None\n+\n+ return \"Controller/{}.{}\".format(view_name, actions[method_lower])\n+\n+\n def _get_tastypie_operation_name(request, view_func):\n try:\n from tastypie.resources import Resource\n", "issue": "Instrument the Django REST Framework (DRF)\nProvide more detailed instrumentation of the Django REST Framework (DRF).\r\n\r\nhttp://www.django-rest-framework.org/\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\n\nimport django\nfrom django.conf import settings\n\nfrom scout_apm.compat import string_types\nfrom scout_apm.core.config import scout_config\nfrom scout_apm.core.tracked_request import TrackedRequest\nfrom scout_apm.core.web_requests import (\n create_filtered_path,\n ignore_path,\n track_amazon_request_queue_time,\n track_request_queue_time,\n)\n\nif django.VERSION >= (1, 11):\n from django.urls import get_urlconf\nelse:\n from django.core.urlresolvers import get_urlconf\n\n\ndef get_operation_name(request):\n view_func = request.resolver_match.func\n view_name = request.resolver_match._func_path\n\n if hasattr(view_func, \"model_admin\"):\n # Seems to comes from Django admin (attribute only set on Django 1.9+)\n admin_class = view_func.model_admin.__class__\n view_name = (\n admin_class.__module__\n + \".\"\n + admin_class.__name__\n + \".\"\n + view_func.__name__\n )\n\n # Seems to be a Tastypie Resource. Need to resort to some stack inspection\n # to find a better name since its decorators don't wrap very well\n if view_name == \"tastypie.resources.wrapper\":\n tastypie_name = _get_tastypie_operation_name(request, view_func)\n if tastypie_name is not None:\n return tastypie_name\n\n return \"Controller/\" + view_name\n\n\ndef _get_tastypie_operation_name(request, view_func):\n try:\n from tastypie.resources import Resource\n except ImportError:\n return None\n\n if sys.version_info[0] == 2: # pragma: no cover\n try:\n wrapper = view_func.__closure__[0].cell_contents\n except (AttributeError, IndexError):\n return None\n elif sys.version_info[0] == 3:\n try:\n wrapper = view_func.__wrapped__\n except AttributeError:\n return None\n\n if not hasattr(wrapper, \"__closure__\") or len(wrapper.__closure__) != 2:\n return None\n\n instance = wrapper.__closure__[0].cell_contents\n if not isinstance(instance, Resource): # pragma: no cover\n return None\n\n method_name = wrapper.__closure__[1].cell_contents\n if not isinstance(method_name, string_types): # pragma: no cover\n return None\n\n if method_name.startswith(\"dispatch_\"): # pragma: no cover\n method_name = request.method.lower() + method_name.split(\"dispatch\", 1)[1]\n\n return \"Controller/{}.{}.{}\".format(\n instance.__module__, instance.__class__.__name__, method_name\n )\n\n\ndef track_request_view_data(request, tracked_request):\n path = request.path\n tracked_request.tag(\n \"path\",\n create_filtered_path(\n path, [(k, v) for k, vs in request.GET.lists() for v in vs]\n ),\n )\n if ignore_path(path):\n tracked_request.tag(\"ignore_transaction\", True)\n\n try:\n # Determine a remote IP to associate with the request. The value is\n # spoofable by the requester so this is not suitable to use in any\n # security sensitive context.\n user_ip = (\n request.META.get(\"HTTP_X_FORWARDED_FOR\", \"\").split(\",\")[0]\n or request.META.get(\"HTTP_CLIENT_IP\", \"\").split(\",\")[0]\n or request.META.get(\"REMOTE_ADDR\", None)\n )\n tracked_request.tag(\"user_ip\", user_ip)\n except Exception:\n pass\n\n user = getattr(request, \"user\", None)\n if user is not None:\n try:\n tracked_request.tag(\"username\", user.get_username())\n except Exception:\n pass\n\n tracked_request.tag(\"urlconf\", get_urlconf(settings.ROOT_URLCONF))\n\n\nclass MiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n try:\n return self.get_response(request)\n finally:\n tracked_request.stop_span()\n\n\nclass ViewTimingMiddleware(object):\n \"\"\"\n Insert as deep into the middleware stack as possible, ideally wrapping no\n other middleware. Designed to time the View itself\n \"\"\"\n\n def __init__(self, get_response):\n self.get_response = get_response\n\n def __call__(self, request):\n \"\"\"\n Wrap a single incoming request with start and stop calls.\n This will start timing, but relies on the process_view callback to\n capture more details about what view was really called, and other\n similar info.\n\n If process_view isn't called, then the request will not\n be recorded. This can happen if a middleware further along the stack\n doesn't call onward, and instead returns a response directly.\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return self.get_response(request)\n\n tracked_request = TrackedRequest.instance()\n\n # This operation name won't be recorded unless changed later in\n # process_view\n tracked_request.start_span(operation=\"Unknown\", should_capture_backtrace=False)\n try:\n response = self.get_response(request)\n if 500 <= response.status_code <= 599:\n tracked_request.tag(\"error\", \"true\")\n return response\n finally:\n tracked_request.stop_span()\n\n def process_view(self, request, view_func, view_args, view_kwargs):\n \"\"\"\n Capture details about the view_func that is about to execute\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n tracked_request.is_real_request = True\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.current_span()\n if span is not None:\n span.operation = get_operation_name(request)\n\n def process_exception(self, request, exception):\n \"\"\"\n Mark this request as having errored out\n\n Does not modify or catch or otherwise change the exception thrown\n \"\"\"\n if not scout_config.value(\"monitor\"):\n return\n TrackedRequest.instance().tag(\"error\", \"true\")\n\n\nclass OldStyleMiddlewareTimingMiddleware(object):\n \"\"\"\n Insert as early into the Middleware stack as possible (outermost layers),\n so that other middlewares called after can be timed.\n \"\"\"\n\n def process_request(self, request):\n if not scout_config.value(\"monitor\"):\n return\n tracked_request = TrackedRequest.instance()\n request._scout_tracked_request = tracked_request\n\n queue_time = request.META.get(\"HTTP_X_QUEUE_START\") or request.META.get(\n \"HTTP_X_REQUEST_START\", \"\"\n )\n queue_time_tracked = track_request_queue_time(queue_time, tracked_request)\n if not queue_time_tracked:\n track_amazon_request_queue_time(\n request.META.get(\"HTTP_X_AMZN_TRACE_ID\", \"\"), tracked_request\n )\n\n tracked_request.start_span(\n operation=\"Middleware\", should_capture_backtrace=False\n )\n\n def process_response(self, request, response):\n # Only stop span if there's a request, but presume we are balanced,\n # i.e. that custom instrumentation within the application is not\n # causing errors\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if 500 <= response.status_code <= 599:\n tracked_request.tag(\"error\", \"true\")\n if tracked_request is not None:\n tracked_request.stop_span()\n return response\n\n\nclass OldStyleViewMiddleware(object):\n def process_view(self, request, view_func, view_func_args, view_func_kwargs):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.is_real_request = True\n\n track_request_view_data(request, tracked_request)\n\n span = tracked_request.start_span(\n operation=get_operation_name(request), should_capture_backtrace=False\n )\n # Save the span into the request, so we can check\n # if we're matched up when stopping\n request._scout_view_span = span\n\n def process_response(self, request, response):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return response\n\n # Only stop span if we started, but presume we are balanced, i.e. that\n # custom instrumentation within the application is not causing errors\n span = getattr(request, \"_scout_view_span\", None)\n if span is not None:\n tracked_request.stop_span()\n return response\n\n def process_exception(self, request, exception):\n tracked_request = getattr(request, \"_scout_tracked_request\", None)\n if tracked_request is None:\n # Looks like OldStyleMiddlewareTimingMiddleware didn't run, so\n # don't do anything\n return\n\n tracked_request.tag(\"error\", \"true\")\n", "path": "src/scout_apm/django/middleware.py"}]} | 3,498 | 393 |
gh_patches_debug_35482 | rasdani/github-patches | git_diff | ephios-dev__ephios-855 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticI18N catalog not loaded on some views
The shift edit views tries to load `/de.js` while other views like the instance settings are loading `/static/jsi18n/de/djangojs.js` correctly, despite originating from the same `<script type="text/javascript" src="{% statici18n LANGUAGE_CODE %}"></script>` in base.html
</issue>
<code>
[start of ephios/plugins/simpleresource/models.py]
1 from django.db import models
2
3 from ephios.core.models import Shift
4
5
6 class ResourceCategory(models.Model):
7 name = models.CharField(max_length=50)
8
9 def __str__(self):
10 # pylint: disable=invalid-str-returned
11 return self.name
12
13
14 class Resource(models.Model):
15 title = models.CharField(max_length=100)
16 category = models.ForeignKey(ResourceCategory, on_delete=models.CASCADE)
17
18 def __str__(self):
19 # pylint: disable=invalid-str-returned
20 return self.title
21
22
23 class ResourceAllocation(models.Model):
24 shift = models.ForeignKey(Shift, on_delete=models.CASCADE)
25 resources = models.ManyToManyField(Resource, blank=True)
26
27 def __str__(self):
28 return f"Resource allocation for {self.shift}"
29
[end of ephios/plugins/simpleresource/models.py]
[start of ephios/plugins/simpleresource/forms.py]
1 from django.forms import BaseModelFormSet, BooleanField, ModelForm, modelformset_factory
2 from django.forms.formsets import DELETION_FIELD_NAME
3 from django.utils.translation import gettext as _
4 from django_select2.forms import Select2MultipleWidget
5
6 from ephios.core.forms.events import BasePluginFormMixin
7 from ephios.plugins.simpleresource.models import ResourceAllocation, ResourceCategory
8
9
10 class ResourceAllocationForm(BasePluginFormMixin, ModelForm):
11 class Meta:
12 model = ResourceAllocation
13 fields = ["resources"]
14 widgets = {
15 "resources": Select2MultipleWidget,
16 }
17
18 def __init__(self, *args, shift, **kwargs):
19 self.shift = shift
20 try:
21 kwargs.setdefault("instance", ResourceAllocation.objects.get(shift=shift))
22 except ResourceAllocation.DoesNotExist:
23 pass
24 super().__init__(*args, **kwargs)
25
26 def save(self, commit=True):
27 if self.cleaned_data.get("resources"):
28 self.instance.shift = self.shift
29 super().save(commit)
30 elif self.instance.pk:
31 self.instance.delete()
32
33 @property
34 def heading(self):
35 return _("Resource allocation")
36
37 def is_function_active(self):
38 return bool(self.instance.resources.exists())
39
40
41 class BaseResourceCategoryFormset(BaseModelFormSet):
42 def add_fields(self, form, index):
43 super().add_fields(form, index)
44 initial_form_count = self.initial_form_count()
45 if self.can_delete and (self.can_delete_extra or index < initial_form_count):
46 category: ResourceCategory = form.instance
47 form.fields[DELETION_FIELD_NAME] = BooleanField(
48 label=_("Delete"),
49 required=False,
50 disabled=category.pk and category.resource_set.exists(),
51 )
52
53
54 ResourceCategoryFormset = modelformset_factory(
55 ResourceCategory,
56 formset=BaseResourceCategoryFormset,
57 can_delete=True,
58 extra=0,
59 fields=["name"],
60 )
61
[end of ephios/plugins/simpleresource/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ephios/plugins/simpleresource/forms.py b/ephios/plugins/simpleresource/forms.py
--- a/ephios/plugins/simpleresource/forms.py
+++ b/ephios/plugins/simpleresource/forms.py
@@ -1,3 +1,4 @@
+from crispy_forms.helper import FormHelper
from django.forms import BaseModelFormSet, BooleanField, ModelForm, modelformset_factory
from django.forms.formsets import DELETION_FIELD_NAME
from django.utils.translation import gettext as _
@@ -17,6 +18,8 @@
def __init__(self, *args, shift, **kwargs):
self.shift = shift
+ self.helper = FormHelper()
+ self.helper.include_media = False
try:
kwargs.setdefault("instance", ResourceAllocation.objects.get(shift=shift))
except ResourceAllocation.DoesNotExist:
diff --git a/ephios/plugins/simpleresource/models.py b/ephios/plugins/simpleresource/models.py
--- a/ephios/plugins/simpleresource/models.py
+++ b/ephios/plugins/simpleresource/models.py
@@ -1,10 +1,11 @@
from django.db import models
+from django.utils.translation import gettext_lazy as _
from ephios.core.models import Shift
class ResourceCategory(models.Model):
- name = models.CharField(max_length=50)
+ name = models.CharField(max_length=50, verbose_name=_("Name"))
def __str__(self):
# pylint: disable=invalid-str-returned
@@ -12,8 +13,10 @@
class Resource(models.Model):
- title = models.CharField(max_length=100)
- category = models.ForeignKey(ResourceCategory, on_delete=models.CASCADE)
+ title = models.CharField(max_length=100, verbose_name=_("Title"))
+ category = models.ForeignKey(
+ ResourceCategory, on_delete=models.CASCADE, verbose_name=_("Category")
+ )
def __str__(self):
# pylint: disable=invalid-str-returned
@@ -22,7 +25,7 @@
class ResourceAllocation(models.Model):
shift = models.ForeignKey(Shift, on_delete=models.CASCADE)
- resources = models.ManyToManyField(Resource, blank=True)
+ resources = models.ManyToManyField(Resource, blank=True, verbose_name=_("Resources"))
def __str__(self):
return f"Resource allocation for {self.shift}"
| {"golden_diff": "diff --git a/ephios/plugins/simpleresource/forms.py b/ephios/plugins/simpleresource/forms.py\n--- a/ephios/plugins/simpleresource/forms.py\n+++ b/ephios/plugins/simpleresource/forms.py\n@@ -1,3 +1,4 @@\n+from crispy_forms.helper import FormHelper\n from django.forms import BaseModelFormSet, BooleanField, ModelForm, modelformset_factory\n from django.forms.formsets import DELETION_FIELD_NAME\n from django.utils.translation import gettext as _\n@@ -17,6 +18,8 @@\n \n def __init__(self, *args, shift, **kwargs):\n self.shift = shift\n+ self.helper = FormHelper()\n+ self.helper.include_media = False\n try:\n kwargs.setdefault(\"instance\", ResourceAllocation.objects.get(shift=shift))\n except ResourceAllocation.DoesNotExist:\ndiff --git a/ephios/plugins/simpleresource/models.py b/ephios/plugins/simpleresource/models.py\n--- a/ephios/plugins/simpleresource/models.py\n+++ b/ephios/plugins/simpleresource/models.py\n@@ -1,10 +1,11 @@\n from django.db import models\n+from django.utils.translation import gettext_lazy as _\n \n from ephios.core.models import Shift\n \n \n class ResourceCategory(models.Model):\n- name = models.CharField(max_length=50)\n+ name = models.CharField(max_length=50, verbose_name=_(\"Name\"))\n \n def __str__(self):\n # pylint: disable=invalid-str-returned\n@@ -12,8 +13,10 @@\n \n \n class Resource(models.Model):\n- title = models.CharField(max_length=100)\n- category = models.ForeignKey(ResourceCategory, on_delete=models.CASCADE)\n+ title = models.CharField(max_length=100, verbose_name=_(\"Title\"))\n+ category = models.ForeignKey(\n+ ResourceCategory, on_delete=models.CASCADE, verbose_name=_(\"Category\")\n+ )\n \n def __str__(self):\n # pylint: disable=invalid-str-returned\n@@ -22,7 +25,7 @@\n \n class ResourceAllocation(models.Model):\n shift = models.ForeignKey(Shift, on_delete=models.CASCADE)\n- resources = models.ManyToManyField(Resource, blank=True)\n+ resources = models.ManyToManyField(Resource, blank=True, verbose_name=_(\"Resources\"))\n \n def __str__(self):\n return f\"Resource allocation for {self.shift}\"\n", "issue": "StaticI18N catalog not loaded on some views\nThe shift edit views tries to load `/de.js` while other views like the instance settings are loading `/static/jsi18n/de/djangojs.js` correctly, despite originating from the same `<script type=\"text/javascript\" src=\"{% statici18n LANGUAGE_CODE %}\"></script>` in base.html\n", "before_files": [{"content": "from django.db import models\n\nfrom ephios.core.models import Shift\n\n\nclass ResourceCategory(models.Model):\n name = models.CharField(max_length=50)\n\n def __str__(self):\n # pylint: disable=invalid-str-returned\n return self.name\n\n\nclass Resource(models.Model):\n title = models.CharField(max_length=100)\n category = models.ForeignKey(ResourceCategory, on_delete=models.CASCADE)\n\n def __str__(self):\n # pylint: disable=invalid-str-returned\n return self.title\n\n\nclass ResourceAllocation(models.Model):\n shift = models.ForeignKey(Shift, on_delete=models.CASCADE)\n resources = models.ManyToManyField(Resource, blank=True)\n\n def __str__(self):\n return f\"Resource allocation for {self.shift}\"\n", "path": "ephios/plugins/simpleresource/models.py"}, {"content": "from django.forms import BaseModelFormSet, BooleanField, ModelForm, modelformset_factory\nfrom django.forms.formsets import DELETION_FIELD_NAME\nfrom django.utils.translation import gettext as _\nfrom django_select2.forms import Select2MultipleWidget\n\nfrom ephios.core.forms.events import BasePluginFormMixin\nfrom ephios.plugins.simpleresource.models import ResourceAllocation, ResourceCategory\n\n\nclass ResourceAllocationForm(BasePluginFormMixin, ModelForm):\n class Meta:\n model = ResourceAllocation\n fields = [\"resources\"]\n widgets = {\n \"resources\": Select2MultipleWidget,\n }\n\n def __init__(self, *args, shift, **kwargs):\n self.shift = shift\n try:\n kwargs.setdefault(\"instance\", ResourceAllocation.objects.get(shift=shift))\n except ResourceAllocation.DoesNotExist:\n pass\n super().__init__(*args, **kwargs)\n\n def save(self, commit=True):\n if self.cleaned_data.get(\"resources\"):\n self.instance.shift = self.shift\n super().save(commit)\n elif self.instance.pk:\n self.instance.delete()\n\n @property\n def heading(self):\n return _(\"Resource allocation\")\n\n def is_function_active(self):\n return bool(self.instance.resources.exists())\n\n\nclass BaseResourceCategoryFormset(BaseModelFormSet):\n def add_fields(self, form, index):\n super().add_fields(form, index)\n initial_form_count = self.initial_form_count()\n if self.can_delete and (self.can_delete_extra or index < initial_form_count):\n category: ResourceCategory = form.instance\n form.fields[DELETION_FIELD_NAME] = BooleanField(\n label=_(\"Delete\"),\n required=False,\n disabled=category.pk and category.resource_set.exists(),\n )\n\n\nResourceCategoryFormset = modelformset_factory(\n ResourceCategory,\n formset=BaseResourceCategoryFormset,\n can_delete=True,\n extra=0,\n fields=[\"name\"],\n)\n", "path": "ephios/plugins/simpleresource/forms.py"}]} | 1,385 | 516 |
gh_patches_debug_20464 | rasdani/github-patches | git_diff | elastic__apm-agent-python-578 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__init__() got an unexpected keyword argument 'assert_hostname'
After upgrading to Version 5.1.1 (recent version 4.2.1) the flask app running on below error:
```shell
Traceback (most recent call last):
File "\elasticapm\transport\http.py", line 79, in send
"POST", url, body=data, headers=self._headers, timeout=self._timeout, preload_content=False
File "\urllib3\poolmanager.py", line 324, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "\elasticapm\instrumentation\packages\base.py", line 136, in call_if_sampling
return wrapped(*args, **kwargs)
File "\elasticapm\instrumentation\packages\base.py", line 136, in call_if_sampling
return wrapped(*args, **kwargs)
File "\urllib3\connectionpool.py", line 588, in urlopen
conn = self._get_conn(timeout=pool_timeout)
File "\urllib3\connectionpool.py", line 248, in _get_conn
return conn or self._new_conn()
File "\urllib3\connectionpool.py", line 209, in _new_conn
strict=self.strict, **self.conn_kw)
File "\urllib3\connection.py", line 114, in __init__
_HTTPConnection.__init__(self, *args, **kw)
TypeError: __init__() got an unexpected keyword argument 'assert_hostname'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "\elasticapm\transport\base.py", line 208, in _flush
self.send(data)
File "\elasticapm\transport\http.py", line 92, in send
raise TransportException(message, data, print_trace=print_trace)
elasticapm.transport.base.TransportException: Unable to reach APM Server: __init__() got an unexpected keyword argument 'assert_hostname' (url: http://<apm-server url>:8200/intake/v2/events)
```
The NO_PROXY env variable is used.
**To Reproduce**
1. apm.init_app()
2. Running on TransportException
**Expected behavior**:
Connect to apm-server host and add apm data.
**Environment (please complete the following information)**
- OS: Windows/Linux (Conda environment)
- Python version: 3.6.8
- Framework and version: Flask 1.1.1
- APM Server version: 7.3.0
- Agent version: 5.1.1
**Additional context**
Add any other context about the problem here.
- Agent config options
ELASTIC_APM = {
'SERVICE_NAME': '<name>',
'SERVER_URL': 'http://%s:8200' % ELASTIC_HOST,
'SECRET_TOKEN': '',
'DEBUG': True,
'ENVIRONMENT': 'development',
'VERIFY_SERVER_CERT': False,
'SERVICE_VERSION': app_version
}
- `requirements.txt`:
aniso8601==6.0.0
apispec==2.0.2
asn1crypto==0.24.0
atomicwrites==1.3.0
attrs==19.1.0
blinker==1.4
blueprint==3.4.2
certifi==2019.3.9
cffi==1.12.2
chardet==3.0.4
Click==7.0
colorama==0.4.1
cryptography==2.6.1
ecdsa==0.13
elastic-apm==4.2.1
elasticsearch==7.0.2
Flask==1.1.1
flask-apispec==0.8.1
Flask-JWT==0.3.2
Flask-PyMongo==2.2.0
flask-restplus==0.12.1
future==0.17.1
hvac==0.7.2
idna==2.8
itsdangerous==1.1.0
Jinja2==2.10.1
jsonschema==3.0.1
MarkupSafe==1.1.1
marshmallow==2.19.2
more-itertools==7.0.0
pluggy==0.9.0
psutil==5.6.1
py==1.8.0
pyasn1==0.4.5
pycparser==2.19
PyJWT==1.4.2
pymongo==3.7.2
pyOpenSSL==19.0.0
pyrsistent==0.14.11
pytest==4.4.0
pytest-flask==0.14.0
pytest-mock==1.10.3
python-dateutil==2.8.0
python3-logstash==0.4.80
pytz==2018.9
PyYAML==5.1.1
requests==2.21.0
rsa==4.0
six==1.12.0
urllib3==1.24.1
webargs==5.2.0
Werkzeug==0.15.1
wincertstore==0.2
</issue>
<code>
[start of elasticapm/transport/http.py]
1 # -*- coding: utf-8 -*-
2
3 # BSD 3-Clause License
4 #
5 # Copyright (c) 2019, Elasticsearch BV
6 # All rights reserved.
7 #
8 # Redistribution and use in source and binary forms, with or without
9 # modification, are permitted provided that the following conditions are met:
10 #
11 # * Redistributions of source code must retain the above copyright notice, this
12 # list of conditions and the following disclaimer.
13 #
14 # * Redistributions in binary form must reproduce the above copyright notice,
15 # this list of conditions and the following disclaimer in the documentation
16 # and/or other materials provided with the distribution.
17 #
18 # * Neither the name of the copyright holder nor the names of its
19 # contributors may be used to endorse or promote products derived from
20 # this software without specific prior written permission.
21 #
22 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
26 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 import hashlib
34 import logging
35 import re
36 import ssl
37
38 import certifi
39 import urllib3
40 from urllib3.exceptions import MaxRetryError, TimeoutError
41
42 from elasticapm.transport.base import TransportException
43 from elasticapm.transport.http_base import AsyncHTTPTransportBase, HTTPTransportBase
44 from elasticapm.utils import compat, json_encoder, read_pem_file
45
46 logger = logging.getLogger("elasticapm.transport.http")
47
48
49 class Transport(HTTPTransportBase):
50 def __init__(self, url, **kwargs):
51 super(Transport, self).__init__(url, **kwargs)
52 url_parts = compat.urlparse.urlparse(url)
53 pool_kwargs = {"cert_reqs": "CERT_REQUIRED", "ca_certs": certifi.where(), "block": True}
54 if self._server_cert:
55 pool_kwargs.update(
56 {"assert_fingerprint": self.cert_fingerprint, "assert_hostname": False, "cert_reqs": ssl.CERT_NONE}
57 )
58 del pool_kwargs["ca_certs"]
59 elif not self._verify_server_cert:
60 pool_kwargs["cert_reqs"] = ssl.CERT_NONE
61 pool_kwargs["assert_hostname"] = False
62 proxies = compat.getproxies_environment()
63 proxy_url = proxies.get("https", proxies.get("http", None))
64 if proxy_url and not compat.proxy_bypass_environment(url_parts.netloc):
65 self.http = urllib3.ProxyManager(proxy_url, **pool_kwargs)
66 else:
67 self.http = urllib3.PoolManager(**pool_kwargs)
68
69 def send(self, data):
70 response = None
71
72 if compat.PY2 and isinstance(self._url, compat.text_type):
73 url = self._url.encode("utf-8")
74 else:
75 url = self._url
76 try:
77 try:
78 response = self.http.urlopen(
79 "POST", url, body=data, headers=self._headers, timeout=self._timeout, preload_content=False
80 )
81 logger.debug("Sent request, url=%s size=%.2fkb status=%s", url, len(data) / 1024.0, response.status)
82 except Exception as e:
83 print_trace = True
84 if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):
85 message = "Connection to APM Server timed out " "(url: %s, timeout: %s seconds)" % (
86 self._url,
87 self._timeout,
88 )
89 print_trace = False
90 else:
91 message = "Unable to reach APM Server: %s (url: %s)" % (e, self._url)
92 raise TransportException(message, data, print_trace=print_trace)
93 body = response.read()
94 if response.status >= 400:
95 if response.status == 429: # rate-limited
96 message = "Temporarily rate limited: "
97 print_trace = False
98 else:
99 message = "HTTP %s: " % response.status
100 print_trace = True
101 message += body.decode("utf8", errors="replace")
102 raise TransportException(message, data, print_trace=print_trace)
103 return response.getheader("Location")
104 finally:
105 if response:
106 response.close()
107
108 def get_config(self, current_version=None, keys=None):
109 """
110 Gets configuration from a remote APM Server
111
112 :param current_version: version of the current configuration
113 :param keys: a JSON-serializable dict to identify this instance, e.g.
114 {
115 "service": {
116 "name": "foo",
117 "environment": "bar"
118 }
119 }
120 :return: a three-tuple of new version, config dictionary and validity in seconds.
121 Any element of the tuple can be None.
122 """
123 url = self._config_url
124 data = json_encoder.dumps(keys).encode("utf-8")
125 headers = self._headers.copy()
126 max_age = 300
127 if current_version:
128 headers["If-None-Match"] = current_version
129 try:
130 response = self.http.urlopen(
131 "POST", url, body=data, headers=headers, timeout=self._timeout, preload_content=False
132 )
133 except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:
134 logger.debug("HTTP error while fetching remote config: %s", compat.text_type(e))
135 return current_version, None, max_age
136 body = response.read()
137 if "Cache-Control" in response.headers:
138 try:
139 max_age = int(next(re.finditer(r"max-age=(\d+)", response.headers["Cache-Control"])).groups()[0])
140 except StopIteration:
141 logger.debug("Could not parse Cache-Control header: %s", response.headers["Cache-Control"])
142 if response.status == 304:
143 # config is unchanged, return
144 logger.debug("Configuration unchanged")
145 return current_version, None, max_age
146 elif response.status >= 400:
147 return None, None, max_age
148
149 if not body:
150 logger.debug("APM Server answered with empty body and status code %s", response.status)
151 return current_version, None, max_age
152
153 return response.headers.get("Etag"), json_encoder.loads(body.decode("utf-8")), max_age
154
155 @property
156 def cert_fingerprint(self):
157 if self._server_cert:
158 with open(self._server_cert, "rb") as f:
159 cert_data = read_pem_file(f)
160 digest = hashlib.sha256()
161 digest.update(cert_data)
162 return digest.hexdigest()
163 return None
164
165
166 class AsyncTransport(AsyncHTTPTransportBase, Transport):
167 async_mode = True
168 sync_transport = Transport
169
[end of elasticapm/transport/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py
--- a/elasticapm/transport/http.py
+++ b/elasticapm/transport/http.py
@@ -51,12 +51,12 @@
super(Transport, self).__init__(url, **kwargs)
url_parts = compat.urlparse.urlparse(url)
pool_kwargs = {"cert_reqs": "CERT_REQUIRED", "ca_certs": certifi.where(), "block": True}
- if self._server_cert:
+ if self._server_cert and url_parts.scheme != "http":
pool_kwargs.update(
{"assert_fingerprint": self.cert_fingerprint, "assert_hostname": False, "cert_reqs": ssl.CERT_NONE}
)
del pool_kwargs["ca_certs"]
- elif not self._verify_server_cert:
+ elif not self._verify_server_cert and url_parts.scheme != "http":
pool_kwargs["cert_reqs"] = ssl.CERT_NONE
pool_kwargs["assert_hostname"] = False
proxies = compat.getproxies_environment()
| {"golden_diff": "diff --git a/elasticapm/transport/http.py b/elasticapm/transport/http.py\n--- a/elasticapm/transport/http.py\n+++ b/elasticapm/transport/http.py\n@@ -51,12 +51,12 @@\n super(Transport, self).__init__(url, **kwargs)\n url_parts = compat.urlparse.urlparse(url)\n pool_kwargs = {\"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": certifi.where(), \"block\": True}\n- if self._server_cert:\n+ if self._server_cert and url_parts.scheme != \"http\":\n pool_kwargs.update(\n {\"assert_fingerprint\": self.cert_fingerprint, \"assert_hostname\": False, \"cert_reqs\": ssl.CERT_NONE}\n )\n del pool_kwargs[\"ca_certs\"]\n- elif not self._verify_server_cert:\n+ elif not self._verify_server_cert and url_parts.scheme != \"http\":\n pool_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n pool_kwargs[\"assert_hostname\"] = False\n proxies = compat.getproxies_environment()\n", "issue": "__init__() got an unexpected keyword argument 'assert_hostname'\nAfter upgrading to Version 5.1.1 (recent version 4.2.1) the flask app running on below error:\r\n```shell\r\nTraceback (most recent call last):\r\n File \"\\elasticapm\\transport\\http.py\", line 79, in send\r\n \"POST\", url, body=data, headers=self._headers, timeout=self._timeout, preload_content=False\r\n File \"\\urllib3\\poolmanager.py\", line 324, in urlopen\r\n response = conn.urlopen(method, u.request_uri, **kw)\r\n File \"\\elasticapm\\instrumentation\\packages\\base.py\", line 136, in call_if_sampling\r\n return wrapped(*args, **kwargs)\r\n File \"\\elasticapm\\instrumentation\\packages\\base.py\", line 136, in call_if_sampling\r\n return wrapped(*args, **kwargs)\r\n File \"\\urllib3\\connectionpool.py\", line 588, in urlopen\r\n conn = self._get_conn(timeout=pool_timeout)\r\n File \"\\urllib3\\connectionpool.py\", line 248, in _get_conn\r\n return conn or self._new_conn()\r\n File \"\\urllib3\\connectionpool.py\", line 209, in _new_conn\r\n strict=self.strict, **self.conn_kw)\r\n File \"\\urllib3\\connection.py\", line 114, in __init__\r\n _HTTPConnection.__init__(self, *args, **kw)\r\nTypeError: __init__() got an unexpected keyword argument 'assert_hostname'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"\\elasticapm\\transport\\base.py\", line 208, in _flush\r\n self.send(data)\r\n File \"\\elasticapm\\transport\\http.py\", line 92, in send\r\n raise TransportException(message, data, print_trace=print_trace)\r\nelasticapm.transport.base.TransportException: Unable to reach APM Server: __init__() got an unexpected keyword argument 'assert_hostname' (url: http://<apm-server url>:8200/intake/v2/events)\r\n```\r\n\r\nThe NO_PROXY env variable is used.\r\n\r\n\r\n**To Reproduce**\r\n\r\n1. apm.init_app()\r\n2. Running on TransportException\r\n\r\n**Expected behavior**: \r\nConnect to apm-server host and add apm data.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Windows/Linux (Conda environment)\r\n- Python version: 3.6.8\r\n- Framework and version: Flask 1.1.1\r\n- APM Server version: 7.3.0\r\n- Agent version: 5.1.1\r\n\r\n\r\n**Additional context**\r\n\r\nAdd any other context about the problem here.\r\n\r\n- Agent config options \r\n ELASTIC_APM = {\r\n 'SERVICE_NAME': '<name>',\r\n 'SERVER_URL': 'http://%s:8200' % ELASTIC_HOST,\r\n 'SECRET_TOKEN': '',\r\n 'DEBUG': True,\r\n 'ENVIRONMENT': 'development',\r\n 'VERIFY_SERVER_CERT': False,\r\n 'SERVICE_VERSION': app_version\r\n }\r\n- `requirements.txt`:\r\naniso8601==6.0.0\r\napispec==2.0.2\r\nasn1crypto==0.24.0\r\natomicwrites==1.3.0\r\nattrs==19.1.0\r\nblinker==1.4\r\nblueprint==3.4.2\r\ncertifi==2019.3.9\r\ncffi==1.12.2\r\nchardet==3.0.4\r\nClick==7.0\r\ncolorama==0.4.1\r\ncryptography==2.6.1\r\necdsa==0.13\r\nelastic-apm==4.2.1\r\nelasticsearch==7.0.2\r\nFlask==1.1.1\r\nflask-apispec==0.8.1\r\nFlask-JWT==0.3.2\r\nFlask-PyMongo==2.2.0\r\nflask-restplus==0.12.1\r\nfuture==0.17.1\r\nhvac==0.7.2\r\nidna==2.8\r\nitsdangerous==1.1.0\r\nJinja2==2.10.1\r\njsonschema==3.0.1\r\nMarkupSafe==1.1.1\r\nmarshmallow==2.19.2\r\nmore-itertools==7.0.0\r\npluggy==0.9.0\r\npsutil==5.6.1\r\npy==1.8.0\r\npyasn1==0.4.5\r\npycparser==2.19\r\nPyJWT==1.4.2\r\npymongo==3.7.2\r\npyOpenSSL==19.0.0\r\npyrsistent==0.14.11\r\npytest==4.4.0\r\npytest-flask==0.14.0\r\npytest-mock==1.10.3\r\npython-dateutil==2.8.0\r\npython3-logstash==0.4.80\r\npytz==2018.9\r\nPyYAML==5.1.1\r\nrequests==2.21.0\r\nrsa==4.0\r\nsix==1.12.0\r\nurllib3==1.24.1\r\nwebargs==5.2.0\r\nWerkzeug==0.15.1\r\nwincertstore==0.2\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport hashlib\nimport logging\nimport re\nimport ssl\n\nimport certifi\nimport urllib3\nfrom urllib3.exceptions import MaxRetryError, TimeoutError\n\nfrom elasticapm.transport.base import TransportException\nfrom elasticapm.transport.http_base import AsyncHTTPTransportBase, HTTPTransportBase\nfrom elasticapm.utils import compat, json_encoder, read_pem_file\n\nlogger = logging.getLogger(\"elasticapm.transport.http\")\n\n\nclass Transport(HTTPTransportBase):\n def __init__(self, url, **kwargs):\n super(Transport, self).__init__(url, **kwargs)\n url_parts = compat.urlparse.urlparse(url)\n pool_kwargs = {\"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": certifi.where(), \"block\": True}\n if self._server_cert:\n pool_kwargs.update(\n {\"assert_fingerprint\": self.cert_fingerprint, \"assert_hostname\": False, \"cert_reqs\": ssl.CERT_NONE}\n )\n del pool_kwargs[\"ca_certs\"]\n elif not self._verify_server_cert:\n pool_kwargs[\"cert_reqs\"] = ssl.CERT_NONE\n pool_kwargs[\"assert_hostname\"] = False\n proxies = compat.getproxies_environment()\n proxy_url = proxies.get(\"https\", proxies.get(\"http\", None))\n if proxy_url and not compat.proxy_bypass_environment(url_parts.netloc):\n self.http = urllib3.ProxyManager(proxy_url, **pool_kwargs)\n else:\n self.http = urllib3.PoolManager(**pool_kwargs)\n\n def send(self, data):\n response = None\n\n if compat.PY2 and isinstance(self._url, compat.text_type):\n url = self._url.encode(\"utf-8\")\n else:\n url = self._url\n try:\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=self._headers, timeout=self._timeout, preload_content=False\n )\n logger.debug(\"Sent request, url=%s size=%.2fkb status=%s\", url, len(data) / 1024.0, response.status)\n except Exception as e:\n print_trace = True\n if isinstance(e, MaxRetryError) and isinstance(e.reason, TimeoutError):\n message = \"Connection to APM Server timed out \" \"(url: %s, timeout: %s seconds)\" % (\n self._url,\n self._timeout,\n )\n print_trace = False\n else:\n message = \"Unable to reach APM Server: %s (url: %s)\" % (e, self._url)\n raise TransportException(message, data, print_trace=print_trace)\n body = response.read()\n if response.status >= 400:\n if response.status == 429: # rate-limited\n message = \"Temporarily rate limited: \"\n print_trace = False\n else:\n message = \"HTTP %s: \" % response.status\n print_trace = True\n message += body.decode(\"utf8\", errors=\"replace\")\n raise TransportException(message, data, print_trace=print_trace)\n return response.getheader(\"Location\")\n finally:\n if response:\n response.close()\n\n def get_config(self, current_version=None, keys=None):\n \"\"\"\n Gets configuration from a remote APM Server\n\n :param current_version: version of the current configuration\n :param keys: a JSON-serializable dict to identify this instance, e.g.\n {\n \"service\": {\n \"name\": \"foo\",\n \"environment\": \"bar\"\n }\n }\n :return: a three-tuple of new version, config dictionary and validity in seconds.\n Any element of the tuple can be None.\n \"\"\"\n url = self._config_url\n data = json_encoder.dumps(keys).encode(\"utf-8\")\n headers = self._headers.copy()\n max_age = 300\n if current_version:\n headers[\"If-None-Match\"] = current_version\n try:\n response = self.http.urlopen(\n \"POST\", url, body=data, headers=headers, timeout=self._timeout, preload_content=False\n )\n except (urllib3.exceptions.RequestError, urllib3.exceptions.HTTPError) as e:\n logger.debug(\"HTTP error while fetching remote config: %s\", compat.text_type(e))\n return current_version, None, max_age\n body = response.read()\n if \"Cache-Control\" in response.headers:\n try:\n max_age = int(next(re.finditer(r\"max-age=(\\d+)\", response.headers[\"Cache-Control\"])).groups()[0])\n except StopIteration:\n logger.debug(\"Could not parse Cache-Control header: %s\", response.headers[\"Cache-Control\"])\n if response.status == 304:\n # config is unchanged, return\n logger.debug(\"Configuration unchanged\")\n return current_version, None, max_age\n elif response.status >= 400:\n return None, None, max_age\n\n if not body:\n logger.debug(\"APM Server answered with empty body and status code %s\", response.status)\n return current_version, None, max_age\n\n return response.headers.get(\"Etag\"), json_encoder.loads(body.decode(\"utf-8\")), max_age\n\n @property\n def cert_fingerprint(self):\n if self._server_cert:\n with open(self._server_cert, \"rb\") as f:\n cert_data = read_pem_file(f)\n digest = hashlib.sha256()\n digest.update(cert_data)\n return digest.hexdigest()\n return None\n\n\nclass AsyncTransport(AsyncHTTPTransportBase, Transport):\n async_mode = True\n sync_transport = Transport\n", "path": "elasticapm/transport/http.py"}]} | 3,690 | 236 |
gh_patches_debug_53974 | rasdani/github-patches | git_diff | pre-commit__pre-commit-438 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cygwin python checking should happen after setup code
Regressed as part of #436
Before:
```
$ ./pre-commit/venv_cygwin_python/bin/pre-commit clean
An error has occurred: FatalError: git failed. Is it installed, and are you in a Git repository directory?
Check the log at ~/.pre-commit/pre-commit.log
```
Current master:
```
$ ./pre-commit/venv_cygwin_python/bin/pre-commit clean
An unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'rev-parse', '--show-toplevel')
Return code: 128
Expected return code: 0
Output: (none)
Errors:
fatal: Not a git repository (or any of the parent directories): .git
Check the log at ~/.pre-commit/pre-commit.log
```
</issue>
<code>
[start of pre_commit/main.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pkg_resources
8
9 from pre_commit import color
10 from pre_commit import five
11 from pre_commit import git
12 from pre_commit.commands.autoupdate import autoupdate
13 from pre_commit.commands.clean import clean
14 from pre_commit.commands.install_uninstall import install
15 from pre_commit.commands.install_uninstall import uninstall
16 from pre_commit.commands.run import run
17 from pre_commit.error_handler import error_handler
18 from pre_commit.logging_handler import add_logging_handler
19 from pre_commit.runner import Runner
20
21
22 # https://github.com/pre-commit/pre-commit/issues/217
23 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
24 # to install packages to the wrong place. We don't want anything to deal with
25 # pyvenv
26 os.environ.pop('__PYVENV_LAUNCHER__', None)
27
28
29 def _add_color_option(parser):
30 parser.add_argument(
31 '--color', default='auto', type=color.use_color,
32 metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
33 help='Whether to use color in output. Defaults to `%(default)s`.',
34 )
35
36
37 def main(argv=None):
38 argv = argv if argv is not None else sys.argv[1:]
39 argv = [five.to_text(arg) for arg in argv]
40 parser = argparse.ArgumentParser()
41
42 # http://stackoverflow.com/a/8521644/812183
43 parser.add_argument(
44 '-V', '--version',
45 action='version',
46 version='%(prog)s {}'.format(
47 pkg_resources.get_distribution('pre-commit').version
48 )
49 )
50
51 subparsers = parser.add_subparsers(dest='command')
52
53 install_parser = subparsers.add_parser(
54 'install', help='Install the pre-commit script.',
55 )
56 _add_color_option(install_parser)
57 install_parser.add_argument(
58 '-f', '--overwrite', action='store_true',
59 help='Overwrite existing hooks / remove migration mode.',
60 )
61 install_parser.add_argument(
62 '--install-hooks', action='store_true',
63 help=(
64 'Whether to install hook environments for all environments '
65 'in the config file.'
66 ),
67 )
68 install_parser.add_argument(
69 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
70 default='pre-commit',
71 )
72
73 uninstall_parser = subparsers.add_parser(
74 'uninstall', help='Uninstall the pre-commit script.',
75 )
76 _add_color_option(uninstall_parser)
77 uninstall_parser.add_argument(
78 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
79 default='pre-commit',
80 )
81
82 clean_parser = subparsers.add_parser(
83 'clean', help='Clean out pre-commit files.',
84 )
85 _add_color_option(clean_parser)
86
87 autoupdate_parser = subparsers.add_parser(
88 'autoupdate',
89 help="Auto-update pre-commit config to the latest repos' versions.",
90 )
91 _add_color_option(autoupdate_parser)
92
93 run_parser = subparsers.add_parser('run', help='Run hooks.')
94 _add_color_option(run_parser)
95 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
96 run_parser.add_argument(
97 '--no-stash', default=False, action='store_true',
98 help='Use this option to prevent auto stashing of unstaged files.',
99 )
100 run_parser.add_argument(
101 '--verbose', '-v', action='store_true', default=False,
102 )
103 run_parser.add_argument(
104 '--origin', '-o',
105 help="The origin branch's commit_id when using `git push`.",
106 )
107 run_parser.add_argument(
108 '--source', '-s',
109 help="The remote branch's commit_id when using `git push`.",
110 )
111 run_parser.add_argument(
112 '--allow-unstaged-config', default=False, action='store_true',
113 help=(
114 'Allow an unstaged config to be present. Note that this will '
115 'be stashed before parsing unless --no-stash is specified.'
116 ),
117 )
118 run_parser.add_argument(
119 '--hook-stage', choices=('commit', 'push'), default='commit',
120 help='The stage during which the hook is fired e.g. commit or push.',
121 )
122 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
123 run_mutex_group.add_argument(
124 '--all-files', '-a', action='store_true', default=False,
125 help='Run on all the files in the repo. Implies --no-stash.',
126 )
127 run_mutex_group.add_argument(
128 '--files', nargs='*', default=[],
129 help='Specific filenames to run hooks on.',
130 )
131
132 help = subparsers.add_parser(
133 'help', help='Show help for a specific command.',
134 )
135 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
136
137 # Argparse doesn't really provide a way to use a `default` subparser
138 if len(argv) == 0:
139 argv = ['run']
140 args = parser.parse_args(argv)
141 if args.command == 'run':
142 args.files = [
143 os.path.relpath(os.path.abspath(filename), git.get_root())
144 for filename in args.files
145 ]
146
147 if args.command == 'help':
148 if args.help_cmd:
149 parser.parse_args([args.help_cmd, '--help'])
150 else:
151 parser.parse_args(['--help'])
152
153 with error_handler():
154 add_logging_handler(args.color)
155 git.check_for_cygwin_mismatch()
156 runner = Runner.create()
157
158 if args.command == 'install':
159 return install(
160 runner, overwrite=args.overwrite, hooks=args.install_hooks,
161 hook_type=args.hook_type,
162 )
163 elif args.command == 'uninstall':
164 return uninstall(runner, hook_type=args.hook_type)
165 elif args.command == 'clean':
166 return clean(runner)
167 elif args.command == 'autoupdate':
168 return autoupdate(runner)
169 elif args.command == 'run':
170 return run(runner, args)
171 else:
172 raise NotImplementedError(
173 'Command {} not implemented.'.format(args.command)
174 )
175
176 raise AssertionError(
177 'Command {} failed to exit with a returncode'.format(args.command)
178 )
179
180
181 if __name__ == '__main__':
182 exit(main())
183
[end of pre_commit/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -152,8 +152,8 @@
with error_handler():
add_logging_handler(args.color)
- git.check_for_cygwin_mismatch()
runner = Runner.create()
+ git.check_for_cygwin_mismatch()
if args.command == 'install':
return install(
| {"golden_diff": "diff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -152,8 +152,8 @@\n \n with error_handler():\n add_logging_handler(args.color)\n- git.check_for_cygwin_mismatch()\n runner = Runner.create()\n+ git.check_for_cygwin_mismatch()\n \n if args.command == 'install':\n return install(\n", "issue": "cygwin python checking should happen after setup code\nRegressed as part of #436\r\n\r\nBefore:\r\n\r\n```\r\n$ ./pre-commit/venv_cygwin_python/bin/pre-commit clean\r\nAn error has occurred: FatalError: git failed. Is it installed, and are you in a Git repository directory?\r\nCheck the log at ~/.pre-commit/pre-commit.log\r\n```\r\n\r\nCurrent master:\r\n\r\n```\r\n$ ./pre-commit/venv_cygwin_python/bin/pre-commit clean\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/usr/bin/git', 'rev-parse', '--show-toplevel')\r\nReturn code: 128\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n fatal: Not a git repository (or any of the parent directories): .git\r\n\r\n\r\nCheck the log at ~/.pre-commit/pre-commit.log\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.logging_handler import add_logging_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default='auto', type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help=(\n 'Allow an unstaged config to be present. Note that this will '\n 'be stashed before parsing unless --no-stash is specified.'\n ),\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push.',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n if args.command == 'run':\n args.files = [\n os.path.relpath(os.path.abspath(filename), git.get_root())\n for filename in args.files\n ]\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n add_logging_handler(args.color)\n git.check_for_cygwin_mismatch()\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]} | 2,523 | 96 |
gh_patches_debug_734 | rasdani/github-patches | git_diff | docker__docker-py-806 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auth fails with long passwords
See https://github.com/docker/docker/issues/16840
docker-py is encoding `X-Registry-Auth` with regular base64 and not the url safe version of base64 that jwt tokens use.
</issue>
<code>
[start of docker/auth/auth.py]
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import fileinput
17 import json
18 import logging
19 import os
20 import warnings
21
22 import six
23
24 from .. import constants
25 from .. import errors
26
27 INDEX_NAME = 'index.docker.io'
28 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
29 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
30 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
31
32 log = logging.getLogger(__name__)
33
34
35 def resolve_repository_name(repo_name, insecure=False):
36 if insecure:
37 warnings.warn(
38 constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(
39 'resolve_repository_name()'
40 ), DeprecationWarning
41 )
42
43 if '://' in repo_name:
44 raise errors.InvalidRepository(
45 'Repository name cannot contain a scheme ({0})'.format(repo_name))
46 parts = repo_name.split('/', 1)
47 if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':
48 # This is a docker index repo (ex: foo/bar or ubuntu)
49 return INDEX_NAME, repo_name
50 if len(parts) < 2:
51 raise errors.InvalidRepository(
52 'Invalid repository name ({0})'.format(repo_name))
53
54 if 'index.docker.io' in parts[0]:
55 raise errors.InvalidRepository(
56 'Invalid repository name, try "{0}" instead'.format(parts[1])
57 )
58
59 return parts[0], parts[1]
60
61
62 def resolve_authconfig(authconfig, registry=None):
63 """
64 Returns the authentication data from the given auth configuration for a
65 specific registry. As with the Docker client, legacy entries in the config
66 with full URLs are stripped down to hostnames before checking for a match.
67 Returns None if no match was found.
68 """
69 # Default to the public index server
70 registry = convert_to_hostname(registry) if registry else INDEX_NAME
71 log.debug("Looking for auth entry for {0}".format(repr(registry)))
72
73 if registry in authconfig:
74 log.debug("Found {0}".format(repr(registry)))
75 return authconfig[registry]
76
77 for key, config in six.iteritems(authconfig):
78 if convert_to_hostname(key) == registry:
79 log.debug("Found {0}".format(repr(key)))
80 return config
81
82 log.debug("No entry found")
83 return None
84
85
86 def convert_to_hostname(url):
87 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
88
89
90 def encode_auth(auth_info):
91 return base64.b64encode(auth_info.get('username', '') + b':' +
92 auth_info.get('password', ''))
93
94
95 def decode_auth(auth):
96 if isinstance(auth, six.string_types):
97 auth = auth.encode('ascii')
98 s = base64.b64decode(auth)
99 login, pwd = s.split(b':', 1)
100 return login.decode('ascii'), pwd.decode('ascii')
101
102
103 def encode_header(auth):
104 auth_json = json.dumps(auth).encode('ascii')
105 return base64.b64encode(auth_json)
106
107
108 def parse_auth(entries):
109 """
110 Parses authentication entries
111
112 Args:
113 entries: Dict of authentication entries.
114
115 Returns:
116 Authentication registry.
117 """
118
119 conf = {}
120 for registry, entry in six.iteritems(entries):
121 username, password = decode_auth(entry['auth'])
122 log.debug(
123 'Found entry (registry={0}, username={1})'
124 .format(repr(registry), repr(username))
125 )
126 conf[registry] = {
127 'username': username,
128 'password': password,
129 'email': entry['email'],
130 'serveraddress': registry,
131 }
132 return conf
133
134
135 def load_config(config_path=None):
136 """
137 Loads authentication data from a Docker configuration file in the given
138 root directory or if config_path is passed use given path.
139 """
140 conf = {}
141 data = None
142
143 # Prefer ~/.docker/config.json.
144 config_file = config_path or os.path.join(os.path.expanduser('~'),
145 DOCKER_CONFIG_FILENAME)
146
147 log.debug("Trying {0}".format(config_file))
148
149 if os.path.exists(config_file):
150 try:
151 with open(config_file) as f:
152 for section, data in six.iteritems(json.load(f)):
153 if section != 'auths':
154 continue
155 log.debug("Found 'auths' section")
156 return parse_auth(data)
157 log.debug("Couldn't find 'auths' section")
158 except (IOError, KeyError, ValueError) as e:
159 # Likely missing new Docker config file or it's in an
160 # unknown format, continue to attempt to read old location
161 # and format.
162 log.debug(e)
163 pass
164 else:
165 log.debug("File doesn't exist")
166
167 config_file = config_path or os.path.join(os.path.expanduser('~'),
168 LEGACY_DOCKER_CONFIG_FILENAME)
169
170 log.debug("Trying {0}".format(config_file))
171
172 if not os.path.exists(config_file):
173 log.debug("File doesn't exist - returning empty config")
174 return {}
175
176 log.debug("Attempting to parse as JSON")
177 try:
178 with open(config_file) as f:
179 return parse_auth(json.load(f))
180 except Exception as e:
181 log.debug(e)
182 pass
183
184 # If that fails, we assume the configuration file contains a single
185 # authentication token for the public registry in the following format:
186 #
187 # auth = AUTH_TOKEN
188 # email = [email protected]
189 log.debug("Attempting to parse legacy auth file format")
190 try:
191 data = []
192 for line in fileinput.input(config_file):
193 data.append(line.strip().split(' = ')[1])
194 if len(data) < 2:
195 # Not enough data
196 raise errors.InvalidConfigFile(
197 'Invalid or empty configuration file!')
198
199 username, password = decode_auth(data[0])
200 conf[INDEX_NAME] = {
201 'username': username,
202 'password': password,
203 'email': data[1],
204 'serveraddress': INDEX_URL,
205 }
206 return conf
207 except Exception as e:
208 log.debug(e)
209 pass
210
211 log.debug("All parsing attempts failed - returning empty config")
212 return {}
213
[end of docker/auth/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docker/auth/auth.py b/docker/auth/auth.py
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -102,7 +102,7 @@
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
- return base64.b64encode(auth_json)
+ return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries):
| {"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -102,7 +102,7 @@\n \n def encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n- return base64.b64encode(auth_json)\n+ return base64.urlsafe_b64encode(auth_json)\n \n \n def parse_auth(entries):\n", "issue": "Auth fails with long passwords\nSee https://github.com/docker/docker/issues/16840\n\ndocker-py is encoding `X-Registry-Auth` with regular base64 and not the url safe version of base64 that jwt tokens use.\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport fileinput\nimport json\nimport logging\nimport os\nimport warnings\n\nimport six\n\nfrom .. import constants\nfrom .. import errors\n\nINDEX_NAME = 'index.docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name, insecure=False):\n if insecure:\n warnings.warn(\n constants.INSECURE_REGISTRY_DEPRECATION_WARNING.format(\n 'resolve_repository_name()'\n ), DeprecationWarning\n )\n\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name))\n parts = repo_name.split('/', 1)\n if '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost':\n # This is a docker index repo (ex: foo/bar or ubuntu)\n return INDEX_NAME, repo_name\n if len(parts) < 2:\n raise errors.InvalidRepository(\n 'Invalid repository name ({0})'.format(repo_name))\n\n if 'index.docker.io' in parts[0]:\n raise errors.InvalidRepository(\n 'Invalid repository name, try \"{0}\" instead'.format(parts[1])\n )\n\n return parts[0], parts[1]\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = convert_to_hostname(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if convert_to_hostname(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef encode_auth(auth_info):\n return base64.b64encode(auth_info.get('username', '') + b':' +\n auth_info.get('password', ''))\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('ascii'), pwd.decode('ascii')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.b64encode(auth_json)\n\n\ndef parse_auth(entries):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry['email'],\n 'serveraddress': registry,\n }\n return conf\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n \"\"\"\n conf = {}\n data = None\n\n # Prefer ~/.docker/config.json.\n config_file = config_path or os.path.join(os.path.expanduser('~'),\n DOCKER_CONFIG_FILENAME)\n\n log.debug(\"Trying {0}\".format(config_file))\n\n if os.path.exists(config_file):\n try:\n with open(config_file) as f:\n for section, data in six.iteritems(json.load(f)):\n if section != 'auths':\n continue\n log.debug(\"Found 'auths' section\")\n return parse_auth(data)\n log.debug(\"Couldn't find 'auths' section\")\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n pass\n else:\n log.debug(\"File doesn't exist\")\n\n config_file = config_path or os.path.join(os.path.expanduser('~'),\n LEGACY_DOCKER_CONFIG_FILENAME)\n\n log.debug(\"Trying {0}\".format(config_file))\n\n if not os.path.exists(config_file):\n log.debug(\"File doesn't exist - returning empty config\")\n return {}\n\n log.debug(\"Attempting to parse as JSON\")\n try:\n with open(config_file) as f:\n return parse_auth(json.load(f))\n except Exception as e:\n log.debug(e)\n pass\n\n # If that fails, we assume the configuration file contains a single\n # authentication token for the public registry in the following format:\n #\n # auth = AUTH_TOKEN\n # email = [email protected]\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n for line in fileinput.input(config_file):\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!')\n\n username, password = decode_auth(data[0])\n conf[INDEX_NAME] = {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n return conf\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]} | 2,599 | 94 |
gh_patches_debug_37131 | rasdani/github-patches | git_diff | bokeh__bokeh-4963 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Updating of image ColorMapper
The ability to change the low/high of a ColorMapper for an already existing image (adjusting contrast/brightness) through widget callbacks on a server is desired. I could imagine this being accomplished by the following code, which incidentally doesn't throw any errors as is, it just doesn't do anything ;)
```
import numpy as np
from bokeh.plotting import figure, curdoc
from bokeh.layouts import row, layout
from bokeh.models import Slider
from bokeh.models.mappers import LinearColorMapper
from bokeh.palettes import Greys9
def change_image_contrast(attr, old, new):
fig_im.glyph.color_mapper.update(low=graph_min_slider.value, high=graph_max_slider.value)
fig_im.trigger('glyph', fig_im.glyph, fig_im.glyph)
graph_min_slider = Slider(title="Min", start=0, end=99, step=1, value=0)
graph_max_slider = Slider(title="Max", start=1, end=100, step=1, value=100)
graph_min_slider.on_change('value', change_image_contrast)
graph_max_slider.on_change('value', change_image_contrast)
fig = figure(plot_width=500, plot_height=500, x_range=(0, 10), y_range=(0, 10))
fig_im = fig.image(image=[np.random.randint(0, 100, (10, 10), dtype='int16')], x=[0], y=[0], dw=[10], dh=[10],
color_mapper=LinearColorMapper(low=0, high=100, palette=Greys9))
curdoc().add_root(layout([fig], [row([graph_min_slider, graph_max_slider])]))
```
</issue>
<code>
[start of bokeh/models/mappers.py]
1 """ Models for mapping values from one range or space to another.
2
3 """
4 from __future__ import absolute_import
5
6 from ..model import Model
7 from ..core.properties import abstract
8 from ..core.properties import Float, Color, Enum, Seq
9 from ..core.enums import Palette
10 from .. import palettes
11
12 @abstract
13 class ColorMapper(Model):
14 """ Base class for color mapper types. `ColorMapper`` is not
15 generally useful to instantiate on its own.
16
17 """
18
19 class LinearColorMapper(ColorMapper):
20 """ Map numbers in a range [*low*, *high*] linearly into a
21 sequence of colors (a palette).
22
23 For example, if the range is [0, 99] and the palette is
24 ``['red', 'green', 'blue']``, the values would be mapped as
25 follows::
26
27 x < 0 : 'red' # values < low are clamped
28 0 >= x < 33 : 'red'
29 33 >= x < 66 : 'green'
30 66 >= x < 99 : 'blue'
31 99 >= x : 'blue' # values > high are clamped
32
33 """
34
35 palette = Seq(Color, help="""
36 A sequence of colors to use as the target palette for mapping.
37
38 This property can also be set as a ``String``, to the name of
39 any of the palettes shown in :ref:`bokeh.palettes`.
40 """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
41
42 low = Float(help="""
43 The minimum value of the range to map into the palette. Values below
44 this are clamped to ``low``.
45 """)
46
47 high = Float(help="""
48 The maximum value of the range to map into the palette. Values above
49 this are clamped to ``high``.
50 """)
51
52 # TODO: (jc) what is the color code for transparent?
53 # TODO: (bev) better docstring
54 reserve_color = Color("#ffffff", help="""
55 Used by Abstract Rendering.
56 """)
57
58 # TODO: (bev) better docstring
59 reserve_val = Float(default=None, help="""
60 Used by Abstract Rendering.
61 """)
62
63 def __init__(self, palette=None, **kwargs):
64 if palette is not None: kwargs['palette'] = palette
65 super(LinearColorMapper, self).__init__(**kwargs)
66
67 class LogColorMapper(ColorMapper):
68 """ Map numbers in a range [*low*, *high*] into a
69 sequence of colors (a palette) on a natural logarithm scale.
70
71 For example, if the range is [0, 25] and the palette is
72 ``['red', 'green', 'blue']``, the values would be mapped as
73 follows::
74
75 x < 0 : 'red' # values < low are clamped
76 0 >= x < 2.72 : 'red' # math.e ** 1
77 2.72 >= x < 7.39 : 'green' # math.e ** 2
78 7.39 >= x < 20.09 : 'blue' # math.e ** 3
79 20.09 >= x : 'blue' # values > high are clamped
80
81 .. warning::
82 The LogColorMapper only works for images with scalar values that are
83 non-negative.
84
85 """
86
87 palette = Seq(Color, help="""
88 A sequence of colors to use as the target palette for mapping.
89
90 This property can also be set as a ``String``, to the name of
91 any of the palettes shown in :ref:`bokeh.palettes`.
92 """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
93
94 low = Float(help="""
95 The minimum value of the range to map into the palette. Values below
96 this are clamped to ``low``.
97 """)
98
99 high = Float(help="""
100 The maximum value of the range to map into the palette. Values above
101 this are clamped to ``high``.
102 """)
103
104 def __init__(self, palette=None, **kwargs):
105 if palette is not None: kwargs['palette'] = palette
106 super(LogColorMapper, self).__init__(**kwargs)
107
[end of bokeh/models/mappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py
--- a/bokeh/models/mappers.py
+++ b/bokeh/models/mappers.py
@@ -16,6 +16,17 @@
"""
+ palette = Seq(Color, help="""
+ A sequence of colors to use as the target palette for mapping.
+
+ This property can also be set as a ``String``, to the name of
+ any of the palettes shown in :ref:`bokeh.palettes`.
+ """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
+
+ def __init__(self, palette=None, **kwargs):
+ if palette is not None: kwargs['palette'] = palette
+ super(ColorMapper, self).__init__(**kwargs)
+
class LinearColorMapper(ColorMapper):
""" Map numbers in a range [*low*, *high*] linearly into a
sequence of colors (a palette).
@@ -32,13 +43,6 @@
"""
- palette = Seq(Color, help="""
- A sequence of colors to use as the target palette for mapping.
-
- This property can also be set as a ``String``, to the name of
- any of the palettes shown in :ref:`bokeh.palettes`.
- """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
-
low = Float(help="""
The minimum value of the range to map into the palette. Values below
this are clamped to ``low``.
@@ -60,10 +64,6 @@
Used by Abstract Rendering.
""")
- def __init__(self, palette=None, **kwargs):
- if palette is not None: kwargs['palette'] = palette
- super(LinearColorMapper, self).__init__(**kwargs)
-
class LogColorMapper(ColorMapper):
""" Map numbers in a range [*low*, *high*] into a
sequence of colors (a palette) on a natural logarithm scale.
@@ -84,13 +84,6 @@
"""
- palette = Seq(Color, help="""
- A sequence of colors to use as the target palette for mapping.
-
- This property can also be set as a ``String``, to the name of
- any of the palettes shown in :ref:`bokeh.palettes`.
- """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
-
low = Float(help="""
The minimum value of the range to map into the palette. Values below
this are clamped to ``low``.
@@ -100,7 +93,3 @@
The maximum value of the range to map into the palette. Values above
this are clamped to ``high``.
""")
-
- def __init__(self, palette=None, **kwargs):
- if palette is not None: kwargs['palette'] = palette
- super(LogColorMapper, self).__init__(**kwargs)
| {"golden_diff": "diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py\n--- a/bokeh/models/mappers.py\n+++ b/bokeh/models/mappers.py\n@@ -16,6 +16,17 @@\n \n \"\"\"\n \n+ palette = Seq(Color, help=\"\"\"\n+ A sequence of colors to use as the target palette for mapping.\n+\n+ This property can also be set as a ``String``, to the name of\n+ any of the palettes shown in :ref:`bokeh.palettes`.\n+ \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n+\n+ def __init__(self, palette=None, **kwargs):\n+ if palette is not None: kwargs['palette'] = palette\n+ super(ColorMapper, self).__init__(**kwargs)\n+\n class LinearColorMapper(ColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n sequence of colors (a palette).\n@@ -32,13 +43,6 @@\n \n \"\"\"\n \n- palette = Seq(Color, help=\"\"\"\n- A sequence of colors to use as the target palette for mapping.\n-\n- This property can also be set as a ``String``, to the name of\n- any of the palettes shown in :ref:`bokeh.palettes`.\n- \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n-\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n@@ -60,10 +64,6 @@\n Used by Abstract Rendering.\n \"\"\")\n \n- def __init__(self, palette=None, **kwargs):\n- if palette is not None: kwargs['palette'] = palette\n- super(LinearColorMapper, self).__init__(**kwargs)\n-\n class LogColorMapper(ColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\n@@ -84,13 +84,6 @@\n \n \"\"\"\n \n- palette = Seq(Color, help=\"\"\"\n- A sequence of colors to use as the target palette for mapping.\n-\n- This property can also be set as a ``String``, to the name of\n- any of the palettes shown in :ref:`bokeh.palettes`.\n- \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n-\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n@@ -100,7 +93,3 @@\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n-\n- def __init__(self, palette=None, **kwargs):\n- if palette is not None: kwargs['palette'] = palette\n- super(LogColorMapper, self).__init__(**kwargs)\n", "issue": "Updating of image ColorMapper\nThe ability to change the low/high of a ColorMapper for an already existing image (adjusting contrast/brightness) through widget callbacks on a server is desired. I could imagine this being accomplished by the following code, which incidentally doesn't throw any errors as is, it just doesn't do anything ;)\n\n```\nimport numpy as np\nfrom bokeh.plotting import figure, curdoc\nfrom bokeh.layouts import row, layout\nfrom bokeh.models import Slider\nfrom bokeh.models.mappers import LinearColorMapper\nfrom bokeh.palettes import Greys9\n\ndef change_image_contrast(attr, old, new):\n fig_im.glyph.color_mapper.update(low=graph_min_slider.value, high=graph_max_slider.value)\n fig_im.trigger('glyph', fig_im.glyph, fig_im.glyph)\n\ngraph_min_slider = Slider(title=\"Min\", start=0, end=99, step=1, value=0)\ngraph_max_slider = Slider(title=\"Max\", start=1, end=100, step=1, value=100)\n\ngraph_min_slider.on_change('value', change_image_contrast)\ngraph_max_slider.on_change('value', change_image_contrast)\n\nfig = figure(plot_width=500, plot_height=500, x_range=(0, 10), y_range=(0, 10))\n\nfig_im = fig.image(image=[np.random.randint(0, 100, (10, 10), dtype='int16')], x=[0], y=[0], dw=[10], dh=[10],\n color_mapper=LinearColorMapper(low=0, high=100, palette=Greys9))\n\ncurdoc().add_root(layout([fig], [row([graph_min_slider, graph_max_slider])]))\n```\n\n", "before_files": [{"content": "\"\"\" Models for mapping values from one range or space to another.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom ..model import Model\nfrom ..core.properties import abstract\nfrom ..core.properties import Float, Color, Enum, Seq\nfrom ..core.enums import Palette\nfrom .. import palettes\n\n@abstract\nclass ColorMapper(Model):\n \"\"\" Base class for color mapper types. `ColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\nclass LinearColorMapper(ColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n sequence of colors (a palette).\n\n For example, if the range is [0, 99] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 33 : 'red'\n 33 >= x < 66 : 'green'\n 66 >= x < 99 : 'blue'\n 99 >= x : 'blue' # values > high are clamped\n\n \"\"\"\n\n palette = Seq(Color, help=\"\"\"\n A sequence of colors to use as the target palette for mapping.\n\n This property can also be set as a ``String``, to the name of\n any of the palettes shown in :ref:`bokeh.palettes`.\n \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n \"\"\")\n\n high = Float(help=\"\"\"\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n\n # TODO: (jc) what is the color code for transparent?\n # TODO: (bev) better docstring\n reserve_color = Color(\"#ffffff\", help=\"\"\"\n Used by Abstract Rendering.\n \"\"\")\n\n # TODO: (bev) better docstring\n reserve_val = Float(default=None, help=\"\"\"\n Used by Abstract Rendering.\n \"\"\")\n\n def __init__(self, palette=None, **kwargs):\n if palette is not None: kwargs['palette'] = palette\n super(LinearColorMapper, self).__init__(**kwargs)\n\nclass LogColorMapper(ColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\n\n For example, if the range is [0, 25] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 2.72 : 'red' # math.e ** 1\n 2.72 >= x < 7.39 : 'green' # math.e ** 2\n 7.39 >= x < 20.09 : 'blue' # math.e ** 3\n 20.09 >= x : 'blue' # values > high are clamped\n\n .. warning::\n The LogColorMapper only works for images with scalar values that are\n non-negative.\n\n \"\"\"\n\n palette = Seq(Color, help=\"\"\"\n A sequence of colors to use as the target palette for mapping.\n\n This property can also be set as a ``String``, to the name of\n any of the palettes shown in :ref:`bokeh.palettes`.\n \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n \"\"\")\n\n high = Float(help=\"\"\"\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n\n def __init__(self, palette=None, **kwargs):\n if palette is not None: kwargs['palette'] = palette\n super(LogColorMapper, self).__init__(**kwargs)\n", "path": "bokeh/models/mappers.py"}]} | 2,095 | 670 |
gh_patches_debug_13867 | rasdani/github-patches | git_diff | litestar-org__litestar-2885 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs: Channels run_in_background.py example does not work
### Summary
Hi,
i just tried to run the channels example "run_in_background.py" from the [website](https://docs.litestar.dev/2/usage/channels.html) (which translates to [run_in_background.py](https://github.com/litestar-org/litestar/blob/main/docs/examples/channels/run_in_background.py)) and found that it didn't work for me.
I'm running Python 3.8.10 in a virtual environment with Litestar 2.4.1.
The application contains exacly the same source code that is provided in the example file and was run using `uvicorn app:app --reload`
Log output:
```
INFO: Started reloader process [219108] using WatchFiles
Process SpawnProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/home/.../venv/lib/python3.8/site-packages/uvicorn/_subprocess.py", line 76, in subprocess_started
target(sockets=sockets)
File "/home/.../venv/lib/python3.8/site-packages/uvicorn/server.py", line 61, in run
return asyncio.run(self.serve(sockets=sockets))
File "/usr/lib/python3.8/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "uvloop/loop.pyx", line 1517, in uvloop.loop.Loop.run_until_complete
File "/home/.../venv/lib/python3.8/site-packages/uvicorn/server.py", line 68, in serve
config.load()
File "/home/.../venv/lib/python3.8/site-packages/uvicorn/config.py", line 467, in load
self.loaded_app = import_from_string(self.app)
File "/home/.../venv/lib/python3.8/site-packages/uvicorn/importer.py", line 21, in import_from_string
module = importlib.import_module(module_str)
File "/usr/lib/python3.8/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1014, in _gcd_import
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 848, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/.../source/app.py", line 18, in <module>
plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],
File "/home/.../venv/lib/python3.8/site-packages/litestar/channels/plugin.py", line 82, in __init__
raise ImproperlyConfiguredException("Must define either channels or set arbitrary_channels_allowed=True")
litestar.exceptions.http_exceptions.ImproperlyConfiguredException: 500: Must define either channels or set arbitrary_channels_allowed=True
```
This error is easy to resolve. I've changed
plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],
to
plugins=[ChannelsPlugin(backend=MemoryChannelsBackend(), channels=["general"])],
and the app started up successfully.
But there seems to be another problem. As soon as my browser connects to the websocket, I get the following warning:
```
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: Started reloader process [219218] using WatchFiles
INFO: Started server process [219220]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: ('127.0.0.1', 33682) - "WebSocket /ws" [accepted]
/home/...../source/app.py:10: RuntimeWarning: coroutine 'ChannelsPlugin.subscribe' was never awaited
async with channels.subscribe(["some_channel"]) as subscriber, subscriber.run_in_background(socket.send_text):
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
INFO: connection open
INFO: connection closed
```
I'm not sure how to resolve this problem. Probably the documentation does not refect the current state of Litestar? Could you check if the example is still valid? Thank you :-)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2813">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2813/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2813/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of docs/examples/channels/run_in_background.py]
1 from litestar import Litestar, WebSocket, websocket
2 from litestar.channels import ChannelsPlugin
3 from litestar.channels.backends.memory import MemoryChannelsBackend
4
5
6 @websocket("/ws")
7 async def handler(socket: WebSocket, channels: ChannelsPlugin) -> None:
8 await socket.accept()
9
10 async with channels.subscribe(["some_channel"]) as subscriber, subscriber.run_in_background(socket.send_text):
11 while True:
12 await socket.receive_text()
13 # do something with the message here
14
15
16 app = Litestar(
17 [handler],
18 plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],
19 )
20
[end of docs/examples/channels/run_in_background.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/examples/channels/run_in_background.py b/docs/examples/channels/run_in_background.py
--- a/docs/examples/channels/run_in_background.py
+++ b/docs/examples/channels/run_in_background.py
@@ -7,13 +7,13 @@
async def handler(socket: WebSocket, channels: ChannelsPlugin) -> None:
await socket.accept()
- async with channels.subscribe(["some_channel"]) as subscriber, subscriber.run_in_background(socket.send_text):
+ async with await channels.subscribe(["some_channel"]) as subscriber, subscriber.run_in_background(socket.send_text):
while True:
- await socket.receive_text()
- # do something with the message here
+ response = await socket.receive_text()
+ await subscriber.send(response)
app = Litestar(
[handler],
- plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],
+ plugins=[ChannelsPlugin(backend=MemoryChannelsBackend(), channels=["some_channel"])],
)
| {"golden_diff": "diff --git a/docs/examples/channels/run_in_background.py b/docs/examples/channels/run_in_background.py\n--- a/docs/examples/channels/run_in_background.py\n+++ b/docs/examples/channels/run_in_background.py\n@@ -7,13 +7,13 @@\n async def handler(socket: WebSocket, channels: ChannelsPlugin) -> None:\n await socket.accept()\n \n- async with channels.subscribe([\"some_channel\"]) as subscriber, subscriber.run_in_background(socket.send_text):\n+ async with await channels.subscribe([\"some_channel\"]) as subscriber, subscriber.run_in_background(socket.send_text):\n while True:\n- await socket.receive_text()\n- # do something with the message here\n+ response = await socket.receive_text()\n+ await subscriber.send(response)\n \n \n app = Litestar(\n [handler],\n- plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],\n+ plugins=[ChannelsPlugin(backend=MemoryChannelsBackend(), channels=[\"some_channel\"])],\n )\n", "issue": "Docs: Channels run_in_background.py example does not work\n### Summary\r\n\r\nHi,\r\n\r\ni just tried to run the channels example \"run_in_background.py\" from the [website](https://docs.litestar.dev/2/usage/channels.html) (which translates to [run_in_background.py](https://github.com/litestar-org/litestar/blob/main/docs/examples/channels/run_in_background.py)) and found that it didn't work for me. \r\n\r\nI'm running Python 3.8.10 in a virtual environment with Litestar 2.4.1.\r\n\r\nThe application contains exacly the same source code that is provided in the example file and was run using `uvicorn app:app --reload`\r\n\r\nLog output: \r\n\r\n```\r\nINFO: Started reloader process [219108] using WatchFiles\r\nProcess SpawnProcess-1:\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/usr/lib/python3.8/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/home/.../venv/lib/python3.8/site-packages/uvicorn/_subprocess.py\", line 76, in subprocess_started\r\n target(sockets=sockets)\r\n File \"/home/.../venv/lib/python3.8/site-packages/uvicorn/server.py\", line 61, in run\r\n return asyncio.run(self.serve(sockets=sockets))\r\n File \"/usr/lib/python3.8/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"uvloop/loop.pyx\", line 1517, in uvloop.loop.Loop.run_until_complete\r\n File \"/home/.../venv/lib/python3.8/site-packages/uvicorn/server.py\", line 68, in serve\r\n config.load()\r\n File \"/home/.../venv/lib/python3.8/site-packages/uvicorn/config.py\", line 467, in load\r\n self.loaded_app = import_from_string(self.app)\r\n File \"/home/.../venv/lib/python3.8/site-packages/uvicorn/importer.py\", line 21, in import_from_string\r\n module = importlib.import_module(module_str)\r\n File \"/usr/lib/python3.8/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1014, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 991, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 975, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 671, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 848, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/home/.../source/app.py\", line 18, in <module>\r\n plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],\r\n File \"/home/.../venv/lib/python3.8/site-packages/litestar/channels/plugin.py\", line 82, in __init__\r\n raise ImproperlyConfiguredException(\"Must define either channels or set arbitrary_channels_allowed=True\")\r\nlitestar.exceptions.http_exceptions.ImproperlyConfiguredException: 500: Must define either channels or set arbitrary_channels_allowed=True\r\n```\r\n\r\nThis error is easy to resolve. I've changed \r\n\r\n plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],\r\n\r\n to\r\n\r\n plugins=[ChannelsPlugin(backend=MemoryChannelsBackend(), channels=[\"general\"])],\r\n\r\nand the app started up successfully. \r\n\r\nBut there seems to be another problem. As soon as my browser connects to the websocket, I get the following warning:\r\n\r\n```\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\nINFO: Started reloader process [219218] using WatchFiles\r\nINFO: Started server process [219220]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nINFO: ('127.0.0.1', 33682) - \"WebSocket /ws\" [accepted]\r\n/home/...../source/app.py:10: RuntimeWarning: coroutine 'ChannelsPlugin.subscribe' was never awaited\r\n async with channels.subscribe([\"some_channel\"]) as subscriber, subscriber.run_in_background(socket.send_text):\r\nRuntimeWarning: Enable tracemalloc to get the object allocation traceback\r\nINFO: connection open\r\nINFO: connection closed\r\n```\r\n\r\nI'm not sure how to resolve this problem. Probably the documentation does not refect the current state of Litestar? Could you check if the example is still valid? Thank you :-) \r\n\r\n\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2813\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2813/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2813/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from litestar import Litestar, WebSocket, websocket\nfrom litestar.channels import ChannelsPlugin\nfrom litestar.channels.backends.memory import MemoryChannelsBackend\n\n\n@websocket(\"/ws\")\nasync def handler(socket: WebSocket, channels: ChannelsPlugin) -> None:\n await socket.accept()\n\n async with channels.subscribe([\"some_channel\"]) as subscriber, subscriber.run_in_background(socket.send_text):\n while True:\n await socket.receive_text()\n # do something with the message here\n\n\napp = Litestar(\n [handler],\n plugins=[ChannelsPlugin(backend=MemoryChannelsBackend())],\n)\n", "path": "docs/examples/channels/run_in_background.py"}]} | 2,092 | 201 |
gh_patches_debug_5491 | rasdani/github-patches | git_diff | microsoft__torchgeo-579 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Landcover.ai download link
It looks like the landcover.ai domain name expired (breaking our download). The dataset is now hosted at https://landcover.ai.linuxpolska.com/.
</issue>
<code>
[start of torchgeo/datasets/landcoverai.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """LandCover.ai dataset."""
5
6 import glob
7 import hashlib
8 import os
9 from functools import lru_cache
10 from typing import Callable, Dict, Optional
11
12 import matplotlib.pyplot as plt
13 import numpy as np
14 import torch
15 from matplotlib.colors import ListedColormap
16 from PIL import Image
17 from torch import Tensor
18
19 from .geo import VisionDataset
20 from .utils import download_url, extract_archive, working_dir
21
22
23 class LandCoverAI(VisionDataset):
24 r"""LandCover.ai dataset.
25
26 The `LandCover.ai <https://landcover.ai/>`_ (Land Cover from Aerial Imagery)
27 dataset is a dataset for automatic mapping of buildings, woodlands, water and
28 roads from aerial images. This implementation is specifically for Version 1 of
29 Landcover.ai.
30
31 Dataset features:
32
33 * land cover from Poland, Central Europe
34 * three spectral bands - RGB
35 * 33 orthophotos with 25 cm per pixel resolution (~9000x9500 px)
36 * 8 orthophotos with 50 cm per pixel resolution (~4200x4700 px)
37 * total area of 216.27 km\ :sup:`2`
38
39 Dataset format:
40
41 * rasters are three-channel GeoTiffs with EPSG:2180 spatial reference system
42 * masks are single-channel GeoTiffs with EPSG:2180 spatial reference system
43
44 Dataset classes:
45
46 1. building (1.85 km\ :sup:`2`\ )
47 2. woodland (72.02 km\ :sup:`2`\ )
48 3. water (13.15 km\ :sup:`2`\ )
49 4. road (3.5 km\ :sup:`2`\ )
50
51 If you use this dataset in your research, please cite the following paper:
52
53 * https://arxiv.org/abs/2005.02264v3
54
55 .. note::
56
57 This dataset requires the following additional library to be installed:
58
59 * `opencv-python <https://pypi.org/project/opencv-python/>`_ to generate
60 the train/val/test split
61 """
62
63 url = "https://landcover.ai/download/landcover.ai.v1.zip"
64 filename = "landcover.ai.v1.zip"
65 md5 = "3268c89070e8734b4e91d531c0617e03"
66 sha256 = "15ee4ca9e3fd187957addfa8f0d74ac31bc928a966f76926e11b3c33ea76daa1"
67 classes = ["Background", "Building", "Woodland", "Water", "Road"]
68 cmap = ListedColormap(
69 [
70 [0.63921569, 1.0, 0.45098039],
71 [0.61176471, 0.61176471, 0.61176471],
72 [0.14901961, 0.45098039, 0.0],
73 [0.0, 0.77254902, 1.0],
74 [0.0, 0.0, 0.0],
75 ]
76 )
77
78 def __init__(
79 self,
80 root: str = "data",
81 split: str = "train",
82 transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,
83 download: bool = False,
84 checksum: bool = False,
85 ) -> None:
86 """Initialize a new LandCover.ai dataset instance.
87
88 Args:
89 root: root directory where dataset can be found
90 split: one of "train", "val", or "test"
91 transforms: a function/transform that takes input sample and its target as
92 entry and returns a transformed version
93 download: if True, download dataset and store it in the root directory
94 checksum: if True, check the MD5 of the downloaded files (may be slow)
95
96 Raises:
97 AssertionError: if ``split`` argument is invalid
98 RuntimeError: if ``download=False`` and data is not found, or checksums
99 don't match
100 """
101 assert split in ["train", "val", "test"]
102
103 self.root = root
104 self.split = split
105 self.transforms = transforms
106 self.download = download
107 self.checksum = checksum
108
109 self._verify()
110
111 with open(os.path.join(self.root, split + ".txt")) as f:
112 self.ids = f.readlines()
113
114 def __getitem__(self, index: int) -> Dict[str, Tensor]:
115 """Return an index within the dataset.
116
117 Args:
118 index: index to return
119
120 Returns:
121 data and label at that index
122 """
123 id_ = self.ids[index].rstrip()
124 sample = {"image": self._load_image(id_), "mask": self._load_target(id_)}
125
126 if self.transforms is not None:
127 sample = self.transforms(sample)
128
129 return sample
130
131 def __len__(self) -> int:
132 """Return the number of data points in the dataset.
133
134 Returns:
135 length of the dataset
136 """
137 return len(self.ids)
138
139 @lru_cache()
140 def _load_image(self, id_: str) -> Tensor:
141 """Load a single image.
142
143 Args:
144 id_: unique ID of the image
145
146 Returns:
147 the image
148 """
149 filename = os.path.join(self.root, "output", id_ + ".jpg")
150 with Image.open(filename) as img:
151 array: "np.typing.NDArray[np.int_]" = np.array(img)
152 tensor = torch.from_numpy(array)
153 # Convert from HxWxC to CxHxW
154 tensor = tensor.permute((2, 0, 1))
155 return tensor
156
157 @lru_cache()
158 def _load_target(self, id_: str) -> Tensor:
159 """Load the target mask for a single image.
160
161 Args:
162 id_: unique ID of the image
163
164 Returns:
165 the target mask
166 """
167 filename = os.path.join(self.root, "output", id_ + "_m.png")
168 with Image.open(filename) as img:
169 array: "np.typing.NDArray[np.int_]" = np.array(img.convert("L"))
170 tensor = torch.from_numpy(array)
171 return tensor
172
173 def _verify(self) -> None:
174 """Verify the integrity of the dataset.
175
176 Raises:
177 RuntimeError: if ``download=False`` but dataset is missing or checksum fails
178 """
179 # Check if the extracted files already exist
180 jpg = os.path.join(self.root, "output", "*_*.jpg")
181 png = os.path.join(self.root, "output", "*_*_m.png")
182 if glob.glob(jpg) and glob.glob(png):
183 return
184
185 # Check if the zip file has already been downloaded
186 pathname = os.path.join(self.root, self.filename)
187 if os.path.exists(pathname):
188 self._extract()
189 return
190
191 # Check if the user requested to download the dataset
192 if not self.download:
193 raise RuntimeError(
194 f"Dataset not found in `root={self.root}` and `download=False`, "
195 "either specify a different `root` directory or use `download=True` "
196 "to automaticaly download the dataset."
197 )
198
199 # Download the dataset
200 self._download()
201 self._extract()
202
203 def _download(self) -> None:
204 """Download the dataset."""
205 download_url(self.url, self.root, md5=self.md5 if self.checksum else None)
206
207 def _extract(self) -> None:
208 """Extract the dataset.
209
210 Raises:
211 AssertionError: if the checksum of split.py does not match
212 """
213 extract_archive(os.path.join(self.root, self.filename))
214
215 # Generate train/val/test splits
216 # Always check the sha256 of this file before executing
217 # to avoid malicious code injection
218 with working_dir(self.root):
219 with open("split.py") as f:
220 split = f.read().encode("utf-8")
221 assert hashlib.sha256(split).hexdigest() == self.sha256
222 exec(split)
223
224 def plot(
225 self,
226 sample: Dict[str, Tensor],
227 show_titles: bool = True,
228 suptitle: Optional[str] = None,
229 ) -> plt.Figure:
230 """Plot a sample from the dataset.
231
232 Args:
233 sample: a sample returned by :meth:`__getitem__`
234 show_titles: flag indicating whether to show titles above each panel
235 suptitle: optional string to use as a suptitle
236
237 Returns:
238 a matplotlib Figure with the rendered sample
239
240 .. versionadded:: 0.2
241 """
242 image = np.rollaxis(sample["image"].numpy(), 0, 3)
243 mask = sample["mask"].numpy()
244
245 num_panels = 2
246 showing_predictions = "prediction" in sample
247 if showing_predictions:
248 predictions = sample["prediction"].numpy()
249 num_panels += 1
250
251 fig, axs = plt.subplots(1, num_panels, figsize=(num_panels * 4, 5))
252 axs[0].imshow(image)
253 axs[0].axis("off")
254 axs[1].imshow(mask, vmin=0, vmax=4, cmap=self.cmap, interpolation="none")
255 axs[1].axis("off")
256 if show_titles:
257 axs[0].set_title("Image")
258 axs[1].set_title("Mask")
259
260 if showing_predictions:
261 axs[2].imshow(
262 predictions, vmin=0, vmax=4, cmap=self.cmap, interpolation="none"
263 )
264 axs[2].axis("off")
265 if show_titles:
266 axs[2].set_title("Predictions")
267
268 if suptitle is not None:
269 plt.suptitle(suptitle)
270 return fig
271
[end of torchgeo/datasets/landcoverai.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchgeo/datasets/landcoverai.py b/torchgeo/datasets/landcoverai.py
--- a/torchgeo/datasets/landcoverai.py
+++ b/torchgeo/datasets/landcoverai.py
@@ -60,7 +60,7 @@
the train/val/test split
"""
- url = "https://landcover.ai/download/landcover.ai.v1.zip"
+ url = "https://landcover.ai.linuxpolska.com/download/landcover.ai.v1.zip"
filename = "landcover.ai.v1.zip"
md5 = "3268c89070e8734b4e91d531c0617e03"
sha256 = "15ee4ca9e3fd187957addfa8f0d74ac31bc928a966f76926e11b3c33ea76daa1"
| {"golden_diff": "diff --git a/torchgeo/datasets/landcoverai.py b/torchgeo/datasets/landcoverai.py\n--- a/torchgeo/datasets/landcoverai.py\n+++ b/torchgeo/datasets/landcoverai.py\n@@ -60,7 +60,7 @@\n the train/val/test split\n \"\"\"\n \n- url = \"https://landcover.ai/download/landcover.ai.v1.zip\"\n+ url = \"https://landcover.ai.linuxpolska.com/download/landcover.ai.v1.zip\"\n filename = \"landcover.ai.v1.zip\"\n md5 = \"3268c89070e8734b4e91d531c0617e03\"\n sha256 = \"15ee4ca9e3fd187957addfa8f0d74ac31bc928a966f76926e11b3c33ea76daa1\"\n", "issue": "Landcover.ai download link\nIt looks like the landcover.ai domain name expired (breaking our download). The dataset is now hosted at https://landcover.ai.linuxpolska.com/.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"LandCover.ai dataset.\"\"\"\n\nimport glob\nimport hashlib\nimport os\nfrom functools import lru_cache\nfrom typing import Callable, Dict, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.colors import ListedColormap\nfrom PIL import Image\nfrom torch import Tensor\n\nfrom .geo import VisionDataset\nfrom .utils import download_url, extract_archive, working_dir\n\n\nclass LandCoverAI(VisionDataset):\n r\"\"\"LandCover.ai dataset.\n\n The `LandCover.ai <https://landcover.ai/>`_ (Land Cover from Aerial Imagery)\n dataset is a dataset for automatic mapping of buildings, woodlands, water and\n roads from aerial images. This implementation is specifically for Version 1 of\n Landcover.ai.\n\n Dataset features:\n\n * land cover from Poland, Central Europe\n * three spectral bands - RGB\n * 33 orthophotos with 25 cm per pixel resolution (~9000x9500 px)\n * 8 orthophotos with 50 cm per pixel resolution (~4200x4700 px)\n * total area of 216.27 km\\ :sup:`2`\n\n Dataset format:\n\n * rasters are three-channel GeoTiffs with EPSG:2180 spatial reference system\n * masks are single-channel GeoTiffs with EPSG:2180 spatial reference system\n\n Dataset classes:\n\n 1. building (1.85 km\\ :sup:`2`\\ )\n 2. woodland (72.02 km\\ :sup:`2`\\ )\n 3. water (13.15 km\\ :sup:`2`\\ )\n 4. road (3.5 km\\ :sup:`2`\\ )\n\n If you use this dataset in your research, please cite the following paper:\n\n * https://arxiv.org/abs/2005.02264v3\n\n .. note::\n\n This dataset requires the following additional library to be installed:\n\n * `opencv-python <https://pypi.org/project/opencv-python/>`_ to generate\n the train/val/test split\n \"\"\"\n\n url = \"https://landcover.ai/download/landcover.ai.v1.zip\"\n filename = \"landcover.ai.v1.zip\"\n md5 = \"3268c89070e8734b4e91d531c0617e03\"\n sha256 = \"15ee4ca9e3fd187957addfa8f0d74ac31bc928a966f76926e11b3c33ea76daa1\"\n classes = [\"Background\", \"Building\", \"Woodland\", \"Water\", \"Road\"]\n cmap = ListedColormap(\n [\n [0.63921569, 1.0, 0.45098039],\n [0.61176471, 0.61176471, 0.61176471],\n [0.14901961, 0.45098039, 0.0],\n [0.0, 0.77254902, 1.0],\n [0.0, 0.0, 0.0],\n ]\n )\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n transforms: Optional[Callable[[Dict[str, Tensor]], Dict[str, Tensor]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new LandCover.ai dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"val\", or \"test\"\n transforms: a function/transform that takes input sample and its target as\n entry and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 of the downloaded files (may be slow)\n\n Raises:\n AssertionError: if ``split`` argument is invalid\n RuntimeError: if ``download=False`` and data is not found, or checksums\n don't match\n \"\"\"\n assert split in [\"train\", \"val\", \"test\"]\n\n self.root = root\n self.split = split\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n\n with open(os.path.join(self.root, split + \".txt\")) as f:\n self.ids = f.readlines()\n\n def __getitem__(self, index: int) -> Dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n id_ = self.ids[index].rstrip()\n sample = {\"image\": self._load_image(id_), \"mask\": self._load_target(id_)}\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.ids)\n\n @lru_cache()\n def _load_image(self, id_: str) -> Tensor:\n \"\"\"Load a single image.\n\n Args:\n id_: unique ID of the image\n\n Returns:\n the image\n \"\"\"\n filename = os.path.join(self.root, \"output\", id_ + \".jpg\")\n with Image.open(filename) as img:\n array: \"np.typing.NDArray[np.int_]\" = np.array(img)\n tensor = torch.from_numpy(array)\n # Convert from HxWxC to CxHxW\n tensor = tensor.permute((2, 0, 1))\n return tensor\n\n @lru_cache()\n def _load_target(self, id_: str) -> Tensor:\n \"\"\"Load the target mask for a single image.\n\n Args:\n id_: unique ID of the image\n\n Returns:\n the target mask\n \"\"\"\n filename = os.path.join(self.root, \"output\", id_ + \"_m.png\")\n with Image.open(filename) as img:\n array: \"np.typing.NDArray[np.int_]\" = np.array(img.convert(\"L\"))\n tensor = torch.from_numpy(array)\n return tensor\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\n\n Raises:\n RuntimeError: if ``download=False`` but dataset is missing or checksum fails\n \"\"\"\n # Check if the extracted files already exist\n jpg = os.path.join(self.root, \"output\", \"*_*.jpg\")\n png = os.path.join(self.root, \"output\", \"*_*_m.png\")\n if glob.glob(jpg) and glob.glob(png):\n return\n\n # Check if the zip file has already been downloaded\n pathname = os.path.join(self.root, self.filename)\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise RuntimeError(\n f\"Dataset not found in `root={self.root}` and `download=False`, \"\n \"either specify a different `root` directory or use `download=True` \"\n \"to automaticaly download the dataset.\"\n )\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset.\"\"\"\n download_url(self.url, self.root, md5=self.md5 if self.checksum else None)\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\n\n Raises:\n AssertionError: if the checksum of split.py does not match\n \"\"\"\n extract_archive(os.path.join(self.root, self.filename))\n\n # Generate train/val/test splits\n # Always check the sha256 of this file before executing\n # to avoid malicious code injection\n with working_dir(self.root):\n with open(\"split.py\") as f:\n split = f.read().encode(\"utf-8\")\n assert hashlib.sha256(split).hexdigest() == self.sha256\n exec(split)\n\n def plot(\n self,\n sample: Dict[str, Tensor],\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> plt.Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample returned by :meth:`__getitem__`\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional string to use as a suptitle\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n .. versionadded:: 0.2\n \"\"\"\n image = np.rollaxis(sample[\"image\"].numpy(), 0, 3)\n mask = sample[\"mask\"].numpy()\n\n num_panels = 2\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n predictions = sample[\"prediction\"].numpy()\n num_panels += 1\n\n fig, axs = plt.subplots(1, num_panels, figsize=(num_panels * 4, 5))\n axs[0].imshow(image)\n axs[0].axis(\"off\")\n axs[1].imshow(mask, vmin=0, vmax=4, cmap=self.cmap, interpolation=\"none\")\n axs[1].axis(\"off\")\n if show_titles:\n axs[0].set_title(\"Image\")\n axs[1].set_title(\"Mask\")\n\n if showing_predictions:\n axs[2].imshow(\n predictions, vmin=0, vmax=4, cmap=self.cmap, interpolation=\"none\"\n )\n axs[2].axis(\"off\")\n if show_titles:\n axs[2].set_title(\"Predictions\")\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n return fig\n", "path": "torchgeo/datasets/landcoverai.py"}]} | 3,548 | 230 |
gh_patches_debug_30812 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3311 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider xpo_logistics is broken
During the global build at 2021-10-20-14-42-48, spider **xpo_logistics** failed with **0 features** and **1 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/xpo_logistics.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/xpo_logistics.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/xpo_logistics.geojson))
</issue>
<code>
[start of locations/spiders/xpo_logistics.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import re
4 import ast
5 from locations.items import GeojsonPointItem
6
7 class XPOLogisticsSpider(scrapy.Spider):
8 name = "xpo_logistics"
9 item_attributes = { 'brand': "XPO Logistics" }
10 allowed_domains = ["www.xpo.com"]
11 start_urls = (
12 'https://www.xpo.com/global-locations/',
13 )
14
15 def parse(self, response):
16 script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
17 data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
18 data = ast.literal_eval(data)
19
20 for store in data:
21 yield GeojsonPointItem(
22 lat=float(store['latitude']),
23 lon=float(store['longitude'].replace(',','')),
24 phone=store['telephone'],
25 ref=store['office_name'],
26 addr_full=store['street'],
27 city=store['city'],
28 state=store['state'],
29 postcode=store['postal_code'],
30 country=store['country'],
31 name=store['office_name']
32 )
33
34
35
[end of locations/spiders/xpo_logistics.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/xpo_logistics.py b/locations/spiders/xpo_logistics.py
--- a/locations/spiders/xpo_logistics.py
+++ b/locations/spiders/xpo_logistics.py
@@ -1,28 +1,29 @@
# -*- coding: utf-8 -*-
-import scrapy
-import re
import ast
+
+import scrapy
+
from locations.items import GeojsonPointItem
+
class XPOLogisticsSpider(scrapy.Spider):
name = "xpo_logistics"
- item_attributes = { 'brand': "XPO Logistics" }
+ item_attributes = { 'brand': "XPO Logistics", 'brand_wikidata': 'Q8042415' }
allowed_domains = ["www.xpo.com"]
start_urls = (
'https://www.xpo.com/global-locations/',
)
def parse(self, response):
- script = response.xpath('//script[contains(.,"globalLocationsArray")]').extract_first()
- data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]
- data = ast.literal_eval(data)
+ script = response.xpath('//script[@id="globalLocations"]/text()').extract_first()
+ data = ast.literal_eval(script)
for store in data:
yield GeojsonPointItem(
lat=float(store['latitude']),
lon=float(store['longitude'].replace(',','')),
phone=store['telephone'],
- ref=store['office_name'],
+ ref=f"{store['office_name']}-{store['postal_code']}",
addr_full=store['street'],
city=store['city'],
state=store['state'],
@@ -30,5 +31,3 @@
country=store['country'],
name=store['office_name']
)
-
-
| {"golden_diff": "diff --git a/locations/spiders/xpo_logistics.py b/locations/spiders/xpo_logistics.py\n--- a/locations/spiders/xpo_logistics.py\n+++ b/locations/spiders/xpo_logistics.py\n@@ -1,28 +1,29 @@\n # -*- coding: utf-8 -*-\n-import scrapy\n-import re\n import ast\n+\n+import scrapy\n+\n from locations.items import GeojsonPointItem\n \n+\n class XPOLogisticsSpider(scrapy.Spider):\n name = \"xpo_logistics\"\n- item_attributes = { 'brand': \"XPO Logistics\" }\n+ item_attributes = { 'brand': \"XPO Logistics\", 'brand_wikidata': 'Q8042415' }\n allowed_domains = [\"www.xpo.com\"]\n start_urls = (\n 'https://www.xpo.com/global-locations/',\n )\n \n def parse(self, response):\n- script = response.xpath('//script[contains(.,\"globalLocationsArray\")]').extract_first()\n- data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]\n- data = ast.literal_eval(data)\n+ script = response.xpath('//script[@id=\"globalLocations\"]/text()').extract_first()\n+ data = ast.literal_eval(script)\n \n for store in data:\n yield GeojsonPointItem(\n lat=float(store['latitude']),\n lon=float(store['longitude'].replace(',','')),\n phone=store['telephone'],\n- ref=store['office_name'],\n+ ref=f\"{store['office_name']}-{store['postal_code']}\",\n addr_full=store['street'],\n city=store['city'],\n state=store['state'],\n@@ -30,5 +31,3 @@\n country=store['country'],\n name=store['office_name']\n )\n-\n-\n", "issue": "Spider xpo_logistics is broken\nDuring the global build at 2021-10-20-14-42-48, spider **xpo_logistics** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/logs/xpo_logistics.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/xpo_logistics.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-10-20-14-42-48/output/xpo_logistics.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport ast\nfrom locations.items import GeojsonPointItem\n\nclass XPOLogisticsSpider(scrapy.Spider):\n name = \"xpo_logistics\"\n item_attributes = { 'brand': \"XPO Logistics\" }\n allowed_domains = [\"www.xpo.com\"]\n start_urls = (\n 'https://www.xpo.com/global-locations/',\n )\n\n def parse(self, response):\n script = response.xpath('//script[contains(.,\"globalLocationsArray\")]').extract_first()\n data = re.search(r'globalLocationsArray = (.*);', script).groups()[0]\n data = ast.literal_eval(data)\n\n for store in data:\n yield GeojsonPointItem(\n lat=float(store['latitude']),\n lon=float(store['longitude'].replace(',','')),\n phone=store['telephone'],\n ref=store['office_name'],\n addr_full=store['street'],\n city=store['city'],\n state=store['state'],\n postcode=store['postal_code'],\n country=store['country'],\n name=store['office_name']\n )\n\n\n", "path": "locations/spiders/xpo_logistics.py"}]} | 1,033 | 393 |
gh_patches_debug_27592 | rasdani/github-patches | git_diff | quantumlib__Cirq-3357 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calling EngineProcessor.list_calibrations gives error
**Description of the issue**
Calling `EngineProcessor.list_calibrations` gives an error.
**How to reproduce the issue**
```python
import cirq
engine = cirq.google.Engine(project_id=PROJECT_ID,
proto_version=cirq.google.ProtoVersion.V2)
engine_processor = engine.get_processor('rainbow')
calibrations = engine_processor.list_calibrations()
```
Output:
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id)
102 row, col = parts
--> 103 return devices.GridQubit(row=int(row), col=int(col))
104 except ValueError:
ValueError: invalid literal for int() with base 10: 'q3'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-2-69914ce7bc88> in <module>
4 proto_version=cirq.google.ProtoVersion.V2)
5 engine_processor = engine.get_processor('rainbow')
----> 6 calibrations = engine_processor.list_calibrations()
~/Projects/Cirq/cirq/google/engine/engine_processor.py in list_calibrations(self, earliest_timestamp_seconds, latest_timestamp_seconds)
158 response = self.context.client.list_calibrations(
159 self.project_id, self.processor_id, filter_str)
--> 160 return [self._to_calibration(c.data) for c in list(response)]
161
162 def get_calibration(self, calibration_timestamp_seconds: int
~/Projects/Cirq/cirq/google/engine/engine_processor.py in <listcomp>(.0)
158 response = self.context.client.list_calibrations(
159 self.project_id, self.processor_id, filter_str)
--> 160 return [self._to_calibration(c.data) for c in list(response)]
161
162 def get_calibration(self, calibration_timestamp_seconds: int
~/Projects/Cirq/cirq/google/engine/engine_processor.py in _to_calibration(calibration_any)
130 metrics = v2.metrics_pb2.MetricsSnapshot.FromString(
131 calibration_any.value)
--> 132 return calibration.Calibration(metrics)
133
134 def list_calibrations(self,
~/Projects/Cirq/cirq/google/engine/calibration.py in __init__(self, calibration)
53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:
54 self.timestamp = calibration.timestamp_ms
---> 55 self._metric_dict = self._compute_metric_dict(calibration.metrics)
56
57 def _compute_metric_dict(
~/Projects/Cirq/cirq/google/engine/calibration.py in _compute_metric_dict(self, metrics)
68 ]
69 if metric.targets:
---> 70 qubits = tuple(
71 v2.grid_qubit_from_proto_id(t) for t in metric.targets)
72 results[name][qubits] = flat_values
~/Projects/Cirq/cirq/google/engine/calibration.py in <genexpr>(.0)
69 if metric.targets:
70 qubits = tuple(
---> 71 v2.grid_qubit_from_proto_id(t) for t in metric.targets)
72 results[name][qubits] = flat_values
73 else:
~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id)
103 return devices.GridQubit(row=int(row), col=int(col))
104 except ValueError:
--> 105 raise ValueError(
106 'GridQubit proto id must be of the form <int>_<int> but was {}'.
107 format(proto_id))
ValueError: GridQubit proto id must be of the form <int>_<int> but was q3_2
```
**Cirq version**
0.9.0.dev
</issue>
<code>
[start of cirq/google/api/v2/program.py]
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import TYPE_CHECKING
16
17 from cirq import devices, ops
18
19 if TYPE_CHECKING:
20 import cirq
21
22
23 def qubit_to_proto_id(q: 'cirq.Qid') -> str:
24 """Return a proto id for a `cirq.Qid`.
25
26 For `cirq.GridQubit`s this id `{row}_{col}` where `{row}` is the integer
27 row of the grid qubit, and `{col}` is the integer column of the qubit.
28
29 For `cirq.NamedQubit`s this id is the name.
30
31 For `cirq.LineQubit`s this is string of the `x` attribute.
32 """
33 if isinstance(q, devices.GridQubit):
34 return '{}_{}'.format(q.row, q.col)
35 elif isinstance(q, ops.NamedQubit):
36 return q.name
37 elif isinstance(q, devices.LineQubit):
38 return '{}'.format(q.x)
39 else:
40 raise ValueError('Qubits of type {} do not support proto id'.format(
41 type(q)))
42
43
44 def qubit_from_proto_id(proto_id: str) -> 'cirq.Qid':
45 """Return a `cirq.Qid` for a proto id.
46
47 Proto IDs of the form {int}_{int} are parsed as GridQubits.
48
49 Proto IDs of the form {int} are parsed as LineQubits.
50
51 All other proto IDs are parsed as NamedQubits. Note that this will happily
52 accept any string; for circuits which explicitly use Grid or LineQubits,
53 prefer one of the specialized methods below.
54
55 Args:
56 proto_id: The id to convert.
57
58 Returns:
59 A `cirq.Qid` corresponding to the proto id.
60 """
61 num_coords = len(proto_id.split('_'))
62 if num_coords == 2:
63 try:
64 grid_q = grid_qubit_from_proto_id(proto_id)
65 return grid_q
66 except ValueError:
67 pass # Not a grid qubit.
68 elif num_coords == 1:
69 try:
70 line_q = line_qubit_from_proto_id(proto_id)
71 return line_q
72 except ValueError:
73 pass # Not a line qubit.
74
75 # named_qubit_from_proto has no failure condition.
76 named_q = named_qubit_from_proto_id(proto_id)
77 return named_q
78
79
80 def grid_qubit_from_proto_id(proto_id: str) -> 'cirq.GridQubit':
81 """Parse a proto id to a `cirq.GridQubit`.
82
83 Proto ids for grid qubits are of the form `{row}_{col}` where `{row}` is
84 the integer row of the grid qubit, and `{col}` is the integer column of
85 the qubit.
86
87 Args:
88 proto_id: The id to convert.
89
90 Returns:
91 A `cirq.GridQubit` corresponding to the proto id.
92
93 Raises:
94 ValueError: If the string not of the correct format.
95 """
96 parts = proto_id.split('_')
97 if len(parts) != 2:
98 raise ValueError(
99 'GridQubit proto id must be of the form <int>_<int> but was {}'.
100 format(proto_id))
101 try:
102 row, col = parts
103 return devices.GridQubit(row=int(row), col=int(col))
104 except ValueError:
105 raise ValueError(
106 'GridQubit proto id must be of the form <int>_<int> but was {}'.
107 format(proto_id))
108
109
110 def line_qubit_from_proto_id(proto_id: str) -> 'cirq.LineQubit':
111 """Parse a proto id to a `cirq.LineQubit`.
112
113 Proto ids for line qubits are integer strings representing the `x`
114 attribute of the line qubit.
115
116 Args:
117 proto_id: The id to convert.
118
119 Returns:
120 A `cirq.LineQubit` corresponding to the proto id.
121
122 Raises:
123 ValueError: If the string is not an integer.
124 """
125 try:
126 return devices.LineQubit(x=int(proto_id))
127 except ValueError:
128 raise ValueError(
129 'Line qubit proto id must be an int but was {}'.format(proto_id))
130
131
132 def named_qubit_from_proto_id(proto_id: str) -> 'cirq.NamedQubit':
133 """Parse a proto id to a `cirq.NamedQubit'
134
135 This simply returns a `cirq.NamedQubit` with a name equal to `proto_id`.
136 """
137 return ops.NamedQubit(proto_id)
138
[end of cirq/google/api/v2/program.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/google/api/v2/program.py b/cirq/google/api/v2/program.py
--- a/cirq/google/api/v2/program.py
+++ b/cirq/google/api/v2/program.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
+import re
from typing import TYPE_CHECKING
from cirq import devices, ops
@@ -19,6 +19,8 @@
if TYPE_CHECKING:
import cirq
+GRID_QUBIT_ID_PATTERN = r'^q?(-?\d+)_(-?\d+)$'
+
def qubit_to_proto_id(q: 'cirq.Qid') -> str:
"""Return a proto id for a `cirq.Qid`.
@@ -93,13 +95,14 @@
Raises:
ValueError: If the string not of the correct format.
"""
- parts = proto_id.split('_')
- if len(parts) != 2:
+
+ match = re.match(GRID_QUBIT_ID_PATTERN, proto_id)
+ if match is None:
raise ValueError(
'GridQubit proto id must be of the form <int>_<int> but was {}'.
format(proto_id))
try:
- row, col = parts
+ row, col = match.groups()
return devices.GridQubit(row=int(row), col=int(col))
except ValueError:
raise ValueError(
| {"golden_diff": "diff --git a/cirq/google/api/v2/program.py b/cirq/google/api/v2/program.py\n--- a/cirq/google/api/v2/program.py\n+++ b/cirq/google/api/v2/program.py\n@@ -11,7 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-\n+import re\n from typing import TYPE_CHECKING\n \n from cirq import devices, ops\n@@ -19,6 +19,8 @@\n if TYPE_CHECKING:\n import cirq\n \n+GRID_QUBIT_ID_PATTERN = r'^q?(-?\\d+)_(-?\\d+)$'\n+\n \n def qubit_to_proto_id(q: 'cirq.Qid') -> str:\n \"\"\"Return a proto id for a `cirq.Qid`.\n@@ -93,13 +95,14 @@\n Raises:\n ValueError: If the string not of the correct format.\n \"\"\"\n- parts = proto_id.split('_')\n- if len(parts) != 2:\n+\n+ match = re.match(GRID_QUBIT_ID_PATTERN, proto_id)\n+ if match is None:\n raise ValueError(\n 'GridQubit proto id must be of the form <int>_<int> but was {}'.\n format(proto_id))\n try:\n- row, col = parts\n+ row, col = match.groups()\n return devices.GridQubit(row=int(row), col=int(col))\n except ValueError:\n raise ValueError(\n", "issue": "Calling EngineProcessor.list_calibrations gives error\n**Description of the issue**\r\n\r\nCalling `EngineProcessor.list_calibrations` gives an error.\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\nimport cirq\r\n\r\nengine = cirq.google.Engine(project_id=PROJECT_ID,\r\n proto_version=cirq.google.ProtoVersion.V2)\r\nengine_processor = engine.get_processor('rainbow')\r\ncalibrations = engine_processor.list_calibrations()\r\n```\r\nOutput:\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id)\r\n 102 row, col = parts\r\n--> 103 return devices.GridQubit(row=int(row), col=int(col))\r\n 104 except ValueError:\r\n\r\nValueError: invalid literal for int() with base 10: 'q3'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nValueError Traceback (most recent call last)\r\n<ipython-input-2-69914ce7bc88> in <module>\r\n 4 proto_version=cirq.google.ProtoVersion.V2)\r\n 5 engine_processor = engine.get_processor('rainbow')\r\n----> 6 calibrations = engine_processor.list_calibrations()\r\n\r\n~/Projects/Cirq/cirq/google/engine/engine_processor.py in list_calibrations(self, earliest_timestamp_seconds, latest_timestamp_seconds)\r\n 158 response = self.context.client.list_calibrations(\r\n 159 self.project_id, self.processor_id, filter_str)\r\n--> 160 return [self._to_calibration(c.data) for c in list(response)]\r\n 161 \r\n 162 def get_calibration(self, calibration_timestamp_seconds: int\r\n\r\n~/Projects/Cirq/cirq/google/engine/engine_processor.py in <listcomp>(.0)\r\n 158 response = self.context.client.list_calibrations(\r\n 159 self.project_id, self.processor_id, filter_str)\r\n--> 160 return [self._to_calibration(c.data) for c in list(response)]\r\n 161 \r\n 162 def get_calibration(self, calibration_timestamp_seconds: int\r\n\r\n~/Projects/Cirq/cirq/google/engine/engine_processor.py in _to_calibration(calibration_any)\r\n 130 metrics = v2.metrics_pb2.MetricsSnapshot.FromString(\r\n 131 calibration_any.value)\r\n--> 132 return calibration.Calibration(metrics)\r\n 133 \r\n 134 def list_calibrations(self,\r\n\r\n~/Projects/Cirq/cirq/google/engine/calibration.py in __init__(self, calibration)\r\n 53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\r\n 54 self.timestamp = calibration.timestamp_ms\r\n---> 55 self._metric_dict = self._compute_metric_dict(calibration.metrics)\r\n 56 \r\n 57 def _compute_metric_dict(\r\n\r\n~/Projects/Cirq/cirq/google/engine/calibration.py in _compute_metric_dict(self, metrics)\r\n 68 ]\r\n 69 if metric.targets:\r\n---> 70 qubits = tuple(\r\n 71 v2.grid_qubit_from_proto_id(t) for t in metric.targets)\r\n 72 results[name][qubits] = flat_values\r\n\r\n~/Projects/Cirq/cirq/google/engine/calibration.py in <genexpr>(.0)\r\n 69 if metric.targets:\r\n 70 qubits = tuple(\r\n---> 71 v2.grid_qubit_from_proto_id(t) for t in metric.targets)\r\n 72 results[name][qubits] = flat_values\r\n 73 else:\r\n\r\n~/Projects/Cirq/cirq/google/api/v2/program.py in grid_qubit_from_proto_id(proto_id)\r\n 103 return devices.GridQubit(row=int(row), col=int(col))\r\n 104 except ValueError:\r\n--> 105 raise ValueError(\r\n 106 'GridQubit proto id must be of the form <int>_<int> but was {}'.\r\n 107 format(proto_id))\r\n\r\nValueError: GridQubit proto id must be of the form <int>_<int> but was q3_2\r\n```\r\n\r\n**Cirq version**\r\n0.9.0.dev\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import TYPE_CHECKING\n\nfrom cirq import devices, ops\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef qubit_to_proto_id(q: 'cirq.Qid') -> str:\n \"\"\"Return a proto id for a `cirq.Qid`.\n\n For `cirq.GridQubit`s this id `{row}_{col}` where `{row}` is the integer\n row of the grid qubit, and `{col}` is the integer column of the qubit.\n\n For `cirq.NamedQubit`s this id is the name.\n\n For `cirq.LineQubit`s this is string of the `x` attribute.\n \"\"\"\n if isinstance(q, devices.GridQubit):\n return '{}_{}'.format(q.row, q.col)\n elif isinstance(q, ops.NamedQubit):\n return q.name\n elif isinstance(q, devices.LineQubit):\n return '{}'.format(q.x)\n else:\n raise ValueError('Qubits of type {} do not support proto id'.format(\n type(q)))\n\n\ndef qubit_from_proto_id(proto_id: str) -> 'cirq.Qid':\n \"\"\"Return a `cirq.Qid` for a proto id.\n\n Proto IDs of the form {int}_{int} are parsed as GridQubits.\n\n Proto IDs of the form {int} are parsed as LineQubits.\n\n All other proto IDs are parsed as NamedQubits. Note that this will happily\n accept any string; for circuits which explicitly use Grid or LineQubits,\n prefer one of the specialized methods below.\n\n Args:\n proto_id: The id to convert.\n\n Returns:\n A `cirq.Qid` corresponding to the proto id.\n \"\"\"\n num_coords = len(proto_id.split('_'))\n if num_coords == 2:\n try:\n grid_q = grid_qubit_from_proto_id(proto_id)\n return grid_q\n except ValueError:\n pass # Not a grid qubit.\n elif num_coords == 1:\n try:\n line_q = line_qubit_from_proto_id(proto_id)\n return line_q\n except ValueError:\n pass # Not a line qubit.\n\n # named_qubit_from_proto has no failure condition.\n named_q = named_qubit_from_proto_id(proto_id)\n return named_q\n\n\ndef grid_qubit_from_proto_id(proto_id: str) -> 'cirq.GridQubit':\n \"\"\"Parse a proto id to a `cirq.GridQubit`.\n\n Proto ids for grid qubits are of the form `{row}_{col}` where `{row}` is\n the integer row of the grid qubit, and `{col}` is the integer column of\n the qubit.\n\n Args:\n proto_id: The id to convert.\n\n Returns:\n A `cirq.GridQubit` corresponding to the proto id.\n\n Raises:\n ValueError: If the string not of the correct format.\n \"\"\"\n parts = proto_id.split('_')\n if len(parts) != 2:\n raise ValueError(\n 'GridQubit proto id must be of the form <int>_<int> but was {}'.\n format(proto_id))\n try:\n row, col = parts\n return devices.GridQubit(row=int(row), col=int(col))\n except ValueError:\n raise ValueError(\n 'GridQubit proto id must be of the form <int>_<int> but was {}'.\n format(proto_id))\n\n\ndef line_qubit_from_proto_id(proto_id: str) -> 'cirq.LineQubit':\n \"\"\"Parse a proto id to a `cirq.LineQubit`.\n\n Proto ids for line qubits are integer strings representing the `x`\n attribute of the line qubit.\n\n Args:\n proto_id: The id to convert.\n\n Returns:\n A `cirq.LineQubit` corresponding to the proto id.\n\n Raises:\n ValueError: If the string is not an integer.\n \"\"\"\n try:\n return devices.LineQubit(x=int(proto_id))\n except ValueError:\n raise ValueError(\n 'Line qubit proto id must be an int but was {}'.format(proto_id))\n\n\ndef named_qubit_from_proto_id(proto_id: str) -> 'cirq.NamedQubit':\n \"\"\"Parse a proto id to a `cirq.NamedQubit'\n\n This simply returns a `cirq.NamedQubit` with a name equal to `proto_id`.\n \"\"\"\n return ops.NamedQubit(proto_id)\n", "path": "cirq/google/api/v2/program.py"}]} | 2,894 | 338 |
gh_patches_debug_31509 | rasdani/github-patches | git_diff | pallets__click-240 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Single-character wrapped lines with long options
When I run the script at the bottom I get the following output. I would, obviously, expect ALSOLONG to be all together on the second row.
```
Usage: example.py relatively_long example [OPTIONS] FIRST SECOND LONGISH
A
L
S
O
L
O
N
G
Error: Missing argument "first".
```
##
``` py
import click
@click.group()
def foo():
pass
@foo.group()
def relatively_long():
pass
@relatively_long.command()
@click.argument('first')
@click.argument('second')
@click.argument('longish')
@click.argument('alsolong')
def example():
pass
foo()
```
</issue>
<code>
[start of click/formatting.py]
1 from contextlib import contextmanager
2 from .termui import get_terminal_size
3 from .parser import split_opt
4 from ._compat import term_len
5
6
7 def measure_table(rows):
8 widths = {}
9 for row in rows:
10 for idx, col in enumerate(row):
11 widths[idx] = max(widths.get(idx, 0), term_len(col))
12 return tuple(y for x, y in sorted(widths.items()))
13
14
15 def iter_rows(rows, col_count):
16 for row in rows:
17 row = tuple(row)
18 yield row + ('',) * (col_count - len(row))
19
20
21 def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
22 preserve_paragraphs=False):
23 """A helper function that intelligently wraps text. By default, it
24 assumes that it operates on a single paragraph of text but if the
25 `preserve_paragraphs` parameter is provided it will intelligently
26 handle paragraphs (defined by two empty lines).
27
28 If paragraphs are handled, a paragraph can be prefixed with an empty
29 line containing the ``\\b`` character (``\\x08``) to indicate that
30 no rewrapping should happen in that block.
31
32 :param text: the text that should be rewrapped.
33 :param width: the maximum width for the text.
34 :param initial_indent: the initial indent that should be placed on the
35 first line as a string.
36 :param subsequent_indent: the indent string that should be placed on
37 each consecutive line.
38 :param preserve_paragraphs: if this flag is set then the wrapping will
39 intelligently handle paragraphs.
40 """
41 from ._textwrap import TextWrapper
42 text = text.expandtabs()
43 wrapper = TextWrapper(width, initial_indent=initial_indent,
44 subsequent_indent=subsequent_indent,
45 replace_whitespace=False)
46 if not preserve_paragraphs:
47 return wrapper.fill(text)
48
49 p = []
50 buf = []
51 indent = None
52
53 def _flush_par():
54 if not buf:
55 return
56 if buf[0].strip() == '\b':
57 p.append((indent or 0, True, '\n'.join(buf[1:])))
58 else:
59 p.append((indent or 0, False, ' '.join(buf)))
60 del buf[:]
61
62 for line in text.splitlines():
63 if not line:
64 _flush_par()
65 indent = None
66 else:
67 if indent is None:
68 orig_len = term_len(line)
69 line = line.lstrip()
70 indent = orig_len - term_len(line)
71 buf.append(line)
72 _flush_par()
73
74 rv = []
75 for indent, raw, text in p:
76 with wrapper.extra_indent(' ' * indent):
77 if raw:
78 rv.append(wrapper.indent_only(text))
79 else:
80 rv.append(wrapper.fill(text))
81
82 return '\n\n'.join(rv)
83
84
85 class HelpFormatter(object):
86 """This class helps with formatting text-based help pages. It's
87 usually just needed for very special internal cases, but it's also
88 exposed so that developers can write their own fancy outputs.
89
90 At present, it always writes into memory.
91
92 :param indent_increment: the additional increment for each level.
93 :param width: the width for the text. This defaults to the terminal
94 width clamped to a maximum of 78.
95 """
96
97 def __init__(self, indent_increment=2, width=None, max_width=None):
98 self.indent_increment = indent_increment
99 if max_width is None:
100 max_width = 80
101 if width is None:
102 width = max(min(get_terminal_size()[0], max_width) - 2, 50)
103 self.width = width
104 self.current_indent = 0
105 self.buffer = []
106
107 def write(self, string):
108 """Writes a unicode string into the internal buffer."""
109 self.buffer.append(string)
110
111 def indent(self):
112 """Increases the indentation."""
113 self.current_indent += self.indent_increment
114
115 def dedent(self):
116 """Decreases the indentation."""
117 self.current_indent -= self.indent_increment
118
119 def write_usage(self, prog, args='', prefix='Usage: '):
120 """Writes a usage line into the buffer.
121
122 :param prog: the program name.
123 :param args: whitespace separated list of arguments.
124 :param prefix: the prefix for the first line.
125 """
126 prefix = '%*s%s' % (self.current_indent, prefix, prog)
127 self.write(prefix)
128
129 text_width = max(self.width - self.current_indent - term_len(prefix), 10)
130 indent = ' ' * (term_len(prefix) + 1)
131 self.write(wrap_text(args, text_width,
132 initial_indent=' ',
133 subsequent_indent=indent))
134
135 self.write('\n')
136
137 def write_heading(self, heading):
138 """Writes a heading into the buffer."""
139 self.write('%*s%s:\n' % (self.current_indent, '', heading))
140
141 def write_paragraph(self):
142 """Writes a paragraph into the buffer."""
143 if self.buffer:
144 self.write('\n')
145
146 def write_text(self, text):
147 """Writes re-indented text into the buffer. This rewraps and
148 preserves paragraphs.
149 """
150 text_width = max(self.width - self.current_indent, 11)
151 indent = ' ' * self.current_indent
152 self.write(wrap_text(text, text_width,
153 initial_indent=indent,
154 subsequent_indent=indent,
155 preserve_paragraphs=True))
156 self.write('\n')
157
158 def write_dl(self, rows, col_max=30, col_spacing=2):
159 """Writes a definition list into the buffer. This is how options
160 and commands are usually formatted.
161
162 :param rows: a list of two item tuples for the terms and values.
163 :param col_max: the maximum width of the first column.
164 :param col_spacing: the number of spaces between the first and
165 second column.
166 """
167 rows = list(rows)
168 widths = measure_table(rows)
169 if len(widths) != 2:
170 raise TypeError('Expected two columns for definition list')
171
172 first_col = min(widths[0], col_max) + col_spacing
173
174 for first, second in iter_rows(rows, len(widths)):
175 self.write('%*s%s' % (self.current_indent, '', first))
176 if not second:
177 self.write('\n')
178 continue
179 if term_len(first) <= first_col - col_spacing:
180 self.write(' ' * (first_col - term_len(first)))
181 else:
182 self.write('\n')
183 self.write(' ' * (first_col + self.current_indent))
184
185 text_width = max(self.width - first_col - 2, 10)
186 lines = iter(wrap_text(second, text_width).splitlines())
187 if lines:
188 self.write(next(lines) + '\n')
189 for line in lines:
190 self.write('%*s%s\n' % (
191 first_col + self.current_indent, '', line))
192 else:
193 self.write('\n')
194
195 @contextmanager
196 def section(self, name):
197 """Helpful context manager that writes a paragraph, a heading,
198 and the indents.
199
200 :param name: the section name that is written as heading.
201 """
202 self.write_paragraph()
203 self.write_heading(name)
204 self.indent()
205 try:
206 yield
207 finally:
208 self.dedent()
209
210 @contextmanager
211 def indentation(self):
212 """A context manager that increases the indentation."""
213 self.indent()
214 try:
215 yield
216 finally:
217 self.dedent()
218
219 def getvalue(self):
220 """Returns the buffer contents."""
221 return ''.join(self.buffer)
222
223
224 def join_options(options):
225 """Given a list of option strings this joins them in the most appropriate
226 way and returns them in the form ``(formatted_string,
227 any_prefix_is_slash)`` where the second item in the tuple is a flag that
228 indicates if any of the option prefixes was a slash.
229 """
230 rv = []
231 any_prefix_is_slash = False
232 for opt in options:
233 prefix = split_opt(opt)[0]
234 if prefix == '/':
235 any_prefix_is_slash = True
236 rv.append((len(prefix), opt))
237
238 rv.sort(key=lambda x: x[0])
239
240 rv = ', '.join(x[1] for x in rv)
241 return rv, any_prefix_is_slash
242
[end of click/formatting.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/click/formatting.py b/click/formatting.py
--- a/click/formatting.py
+++ b/click/formatting.py
@@ -18,6 +18,12 @@
yield row + ('',) * (col_count - len(row))
+def add_subsequent_indent(text, subsequent_indent):
+ lines = text.splitlines()
+ lines = [lines[0]] + [subsequent_indent + line for line in lines[1:]]
+ return '\n'.join(lines)
+
+
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
@@ -40,11 +46,13 @@
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
+ post_wrap_indent = subsequent_indent[:-1]
+ subsequent_indent = subsequent_indent[-1:]
wrapper = TextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
- return wrapper.fill(text)
+ return add_subsequent_indent(wrapper.fill(text), post_wrap_indent)
p = []
buf = []
@@ -75,9 +83,11 @@
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
- rv.append(wrapper.indent_only(text))
+ rv.append(add_subsequent_indent(wrapper.indent_only(text),
+ post_wrap_indent))
else:
- rv.append(wrapper.fill(text))
+ rv.append(add_subsequent_indent(wrapper.fill(text),
+ post_wrap_indent))
return '\n\n'.join(rv)
| {"golden_diff": "diff --git a/click/formatting.py b/click/formatting.py\n--- a/click/formatting.py\n+++ b/click/formatting.py\n@@ -18,6 +18,12 @@\n yield row + ('',) * (col_count - len(row))\n \n \n+def add_subsequent_indent(text, subsequent_indent):\n+ lines = text.splitlines()\n+ lines = [lines[0]] + [subsequent_indent + line for line in lines[1:]]\n+ return '\\n'.join(lines)\n+\n+\n def wrap_text(text, width=78, initial_indent='', subsequent_indent='',\n preserve_paragraphs=False):\n \"\"\"A helper function that intelligently wraps text. By default, it\n@@ -40,11 +46,13 @@\n \"\"\"\n from ._textwrap import TextWrapper\n text = text.expandtabs()\n+ post_wrap_indent = subsequent_indent[:-1]\n+ subsequent_indent = subsequent_indent[-1:]\n wrapper = TextWrapper(width, initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False)\n if not preserve_paragraphs:\n- return wrapper.fill(text)\n+ return add_subsequent_indent(wrapper.fill(text), post_wrap_indent)\n \n p = []\n buf = []\n@@ -75,9 +83,11 @@\n for indent, raw, text in p:\n with wrapper.extra_indent(' ' * indent):\n if raw:\n- rv.append(wrapper.indent_only(text))\n+ rv.append(add_subsequent_indent(wrapper.indent_only(text),\n+ post_wrap_indent))\n else:\n- rv.append(wrapper.fill(text))\n+ rv.append(add_subsequent_indent(wrapper.fill(text),\n+ post_wrap_indent))\n \n return '\\n\\n'.join(rv)\n", "issue": "Single-character wrapped lines with long options\nWhen I run the script at the bottom I get the following output. I would, obviously, expect ALSOLONG to be all together on the second row.\n\n```\nUsage: example.py relatively_long example [OPTIONS] FIRST SECOND LONGISH\n A\n L\n S\n O\n L\n O\n N\n G\n\nError: Missing argument \"first\".\n```\n## \n\n``` py\nimport click\n\n\[email protected]()\ndef foo():\n pass\n\n\[email protected]()\ndef relatively_long():\n pass\n\n\n@relatively_long.command()\[email protected]('first')\[email protected]('second')\[email protected]('longish')\[email protected]('alsolong')\ndef example():\n pass\n\n\nfoo()\n```\n\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom .termui import get_terminal_size\nfrom .parser import split_opt\nfrom ._compat import term_len\n\n\ndef measure_table(rows):\n widths = {}\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(rows, col_count):\n for row in rows:\n row = tuple(row)\n yield row + ('',) * (col_count - len(row))\n\n\ndef wrap_text(text, width=78, initial_indent='', subsequent_indent='',\n preserve_paragraphs=False):\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n text = text.expandtabs()\n wrapper = TextWrapper(width, initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False)\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p = []\n buf = []\n indent = None\n\n def _flush_par():\n if not buf:\n return\n if buf[0].strip() == '\\b':\n p.append((indent or 0, True, '\\n'.join(buf[1:])))\n else:\n p.append((indent or 0, False, ' '.join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(' ' * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return '\\n\\n'.join(rv)\n\n\nclass HelpFormatter(object):\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(self, indent_increment=2, width=None, max_width=None):\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = max(min(get_terminal_size()[0], max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer = []\n\n def write(self, string):\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self):\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self):\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(self, prog, args='', prefix='Usage: '):\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: the prefix for the first line.\n \"\"\"\n prefix = '%*s%s' % (self.current_indent, prefix, prog)\n self.write(prefix)\n\n text_width = max(self.width - self.current_indent - term_len(prefix), 10)\n indent = ' ' * (term_len(prefix) + 1)\n self.write(wrap_text(args, text_width,\n initial_indent=' ',\n subsequent_indent=indent))\n\n self.write('\\n')\n\n def write_heading(self, heading):\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write('%*s%s:\\n' % (self.current_indent, '', heading))\n\n def write_paragraph(self):\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write('\\n')\n\n def write_text(self, text):\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n text_width = max(self.width - self.current_indent, 11)\n indent = ' ' * self.current_indent\n self.write(wrap_text(text, text_width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True))\n self.write('\\n')\n\n def write_dl(self, rows, col_max=30, col_spacing=2):\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError('Expected two columns for definition list')\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write('%*s%s' % (self.current_indent, '', first))\n if not second:\n self.write('\\n')\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(' ' * (first_col - term_len(first)))\n else:\n self.write('\\n')\n self.write(' ' * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n lines = iter(wrap_text(second, text_width).splitlines())\n if lines:\n self.write(next(lines) + '\\n')\n for line in lines:\n self.write('%*s%s\\n' % (\n first_col + self.current_indent, '', line))\n else:\n self.write('\\n')\n\n @contextmanager\n def section(self, name):\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self):\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self):\n \"\"\"Returns the buffer contents.\"\"\"\n return ''.join(self.buffer)\n\n\ndef join_options(options):\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n for opt in options:\n prefix = split_opt(opt)[0]\n if prefix == '/':\n any_prefix_is_slash = True\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n\n rv = ', '.join(x[1] for x in rv)\n return rv, any_prefix_is_slash\n", "path": "click/formatting.py"}]} | 3,121 | 374 |
gh_patches_debug_41271 | rasdani/github-patches | git_diff | facebookresearch__fairscale-67 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feat] Sync OSS.param_groups and the sharded optimizer param_groups
## π Feature
Make sure that all attributes (not just LR) are in sync in between the OSS.param_groups and the actual optimizer.
## Motivation
Some frameworks make it possible to alter any attribute here, not just LR (momentum, beta for Adam, ..). We do not currently support this and silently fail
## Pitch
Part of the not really well defined PyTorch optimizer features which are nice to have
## Alternatives
- At least add a warning when we're out of sync
- Nuke the .param_groups access
- Force a new OSS optimizer to be created everytime the user wants to change an attribute in param_groups
## Additional context
<!-- Add any other context or screenshots about the feature request here. -->
</issue>
<code>
[start of fairscale/optim/oss.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 import copy
7 from itertools import chain
8 import logging
9 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type
10
11 import torch
12 import torch.distributed as dist
13 from torch.optim import SGD, Optimizer
14
15 from .utils import broadcast_object, recursive_copy_to_device
16
17 if TYPE_CHECKING: # pragma: no cover
18 from torch.optim.optimizer import _params_t
19 else:
20 _params_t = Any
21
22
23 class OSS(Optimizer):
24 """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
25 optimizer and shards its state as described by ZeRO_.
26 ::
27 opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
28
29 .. _ZeRO: https://arxiv.org/abs/1910.02054
30
31 We use a greedy algorithm to pack a number of parameters
32 at each rank. Each parameter belongs to a single rank and
33 is not divided among rank.
34
35 After each rank completed their parameter update, they broadcast
36 the new version of the parameters to all other ranks to synchronize
37 the parameters for next round forward/backward computation.
38
39 Args:
40 params (list of tensors):
41 parameters to be optimized
42 Keyword Args:
43 optim (torch.nn.Optimizer):
44 optimizer to shard (default: SGD)
45 group (group):
46 torch.distributed group (default: group.WORLD)
47 """
48
49 optim: Optimizer
50 in_super_constructor: bool
51
52 def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):
53 # Hold all the model params in the root .param_groups
54 self.in_super_constructor = True
55 super().__init__(params, defaults)
56 self.in_super_constructor = False
57
58 # Build the wrapped optimizer, responsible for a shard of the params
59 self.group = group
60 self.rank = dist.get_rank(group)
61 split_param_groups = self.partition_parameters()
62 self.optim = optim(split_param_groups[self.rank], **defaults)
63
64 # Optional consolidated optimizer state
65 self._all_states: List[Dict[str, Any]] = []
66
67 # Current device is set by the parameters allocated to this rank
68 self._device = split_param_groups[self.rank][0]["params"][0].device
69
70 def partition_parameters(self) -> List[List[dict]]:
71 """Partitions parameters across distributed ranks.
72
73 Returns a list of param_groups (which is a list of dict) where each
74 element of the list contains the param_groups for a rank. Element 0
75 corresponds to rank 0, etc. We need all the ranks for the broadcast
76 inside step().
77 """
78 world_size = dist.get_world_size(self.group)
79 param_groups: List[List] = [list() for _ in range(world_size)]
80 sizes = [0] * world_size
81 for param_group in self.param_groups:
82 param_lists: List[List] = [list() for _ in range(world_size)]
83 for param in param_group["params"]:
84 # Add this param to rank with smallest size.
85 rank = sizes.index(min(sizes))
86 param_lists[rank].append(param)
87 sizes[rank] += param.numel()
88 for rank, params in enumerate(param_lists):
89 param_group_rank = copy.copy(param_group)
90 param_group_rank["params"] = params
91 param_groups[rank].append(param_group_rank)
92 return param_groups
93
94 # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
95 # For example, the apex library contains fused optimizers with a step that supports extra kwargs.
96 def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
97 # Sync lr in case its been update by an LRScheduler.
98 self._sync_lr()
99
100 # Run the optimizer step on this shard only
101 loss = self.optim.step(closure=closure, **kwargs) # type: ignore
102
103 # Sync all the states
104 for rank, param_groups in enumerate(self.partition_parameters()):
105 for param_group in param_groups:
106 for param in param_group["params"]:
107 dist.broadcast(tensor=param, src=rank, group=self.group)
108 return loss
109
110 def local_state_dict(self) -> dict:
111 """ Gets this rank's state_dict. """
112 return self.optim.state_dict()
113
114 def consolidate_state_dict(self, recipient_rank: int = 0) -> None:
115 """ Update the consolidated state_dict list, one per rank.
116
117 This needs to be called on all replicas """
118
119 # Sync lr in case its been update by an LRScheduler.
120 self._sync_lr()
121
122 if self.rank == recipient_rank:
123 # Pull the sharded state from all the other replicas
124 # Store all the states in order, rank by rank
125 logging.debug("Pulling the sharded optimizer state from all replicas")
126 self._all_states = self._collect_sharded_states()
127 else:
128 # Acknowledge broadcasts, and send this rank's shard when needed
129 self._broadcast_state_dict()
130
131 def state_dict(self) -> Dict[str, Any]:
132 """
133 Return the last known global optimizer state, which consist of a list of the shards.
134
135 NOTE: This is limited to the replica which was responsible for the consolidation.
136 The state may also not be up to date, depending on when `consolidate_state_dict` was last called.
137 """
138
139 assert (
140 len(self._all_states) > 0
141 ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
142
143 return {
144 "state": [s["state"] for s in self._all_states],
145 "param_groups": [s["param_groups"] for s in self._all_states],
146 }
147
148 def load_local_state_dict(self, state_dict: dict) -> None:
149 """ Loads this rank's state_dict. """
150
151 self.optim.load_state_dict(state_dict)
152
153 # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)
154 # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268
155 groups = self.optim.param_groups
156 saved_groups = state_dict["param_groups"]
157 id_map = {
158 old_id: p
159 for old_id, p in zip(chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups)))
160 }
161 for k, v in state_dict["state"].items():
162 if k in id_map:
163 param = id_map[k]
164 self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)
165
166 # Restore the global param_groups (the params themselves are already correct)
167 for global_group, local_group in zip(self.param_groups, groups):
168 for k, v in local_group.items():
169 if k != "params":
170 global_group[k] = v
171
172 def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
173 """ Restore the global parameter groups as well as the shard """
174 # Dispatch this rank's state dictionary to the wrapped shard optimizer
175 self.load_local_state_dict(
176 {"state": state_dict["state"][self.rank], "param_groups": state_dict["param_groups"][self.rank]}
177 )
178
179 # Update the param_groups attribute for this instance
180 # TODO(ben)
181
182 def add_param_group(self, param_group: dict) -> None:
183 super().add_param_group(param_group)
184 if not self.in_super_constructor:
185 param_groups = self.partition_parameters()[self.rank]
186 if len(param_groups) == len(self.optim.param_groups) + 1:
187 self.optim.add_param_group(param_groups[-1])
188
189 def _sync_lr(self) -> None:
190 """Sync learning rate (needed to support LRScheduler)."""
191 for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
192 local_group["lr"] = global_group["lr"]
193
194 def _collect_sharded_states(self) -> List[Dict[str, Any]]:
195 """
196 Collect all the state shards, in CPU memory.
197 """
198 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
199 all_states: List[Dict[str, Any]] = []
200
201 for rank in range(dist.get_world_size(group=self.group)):
202 if rank == self.rank:
203 logging.debug("Saving self state")
204 all_states.append(
205 recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device("cpu"))
206 )
207
208 # Sync with other replicas
209 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
210 else:
211 # Fetch the optim state from the other replicas
212 logging.debug("Receiving state from rank %s ", rank)
213 replica_state = broadcast_object(
214 empty_buffer, src_rank=rank, group=self.group, dist_device=self._device
215 )
216
217 all_states.append(
218 recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu"))
219 )
220
221 logging.debug("State from rank %s received", rank)
222
223 return all_states
224
225 def _broadcast_state_dict(self) -> None:
226 """
227 Broadcast this rank's state shard, discard others
228 """
229 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)
230
231 for rank in range(dist.get_world_size(group=self.group)):
232 if rank == self.rank:
233 # Send the state to the reference replica
234 logging.debug(
235 "Sending the sharded optimizer state to the reference replica from rank %s", rank,
236 )
237 broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)
238 else:
239 # Discard this tensor/rank, broadcast necessary for syncing
240 logging.debug("Discarding broadcast from rank %s", rank)
241 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)
242
[end of fairscale/optim/oss.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py
--- a/fairscale/optim/oss.py
+++ b/fairscale/optim/oss.py
@@ -67,6 +67,12 @@
# Current device is set by the parameters allocated to this rank
self._device = split_param_groups[self.rank][0]["params"][0].device
+ # Sync local and global param_groups keys
+ for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
+ for k, v in local_group.items():
+ if k != "params":
+ global_group[k] = v
+
def partition_parameters(self) -> List[List[dict]]:
"""Partitions parameters across distributed ranks.
@@ -94,8 +100,8 @@
# NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.
# For example, the apex library contains fused optimizers with a step that supports extra kwargs.
def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:
- # Sync lr in case its been update by an LRScheduler.
- self._sync_lr()
+ # Sync oss param_groups attributes in case they've been updated by a scheduler.
+ self._sync_param_groups()
# Run the optimizer step on this shard only
loss = self.optim.step(closure=closure, **kwargs) # type: ignore
@@ -116,8 +122,8 @@
This needs to be called on all replicas """
- # Sync lr in case its been update by an LRScheduler.
- self._sync_lr()
+ # Sync lr and other attributes in case its been updated
+ self._sync_param_groups()
if self.rank == recipient_rank:
# Pull the sharded state from all the other replicas
@@ -176,9 +182,6 @@
{"state": state_dict["state"][self.rank], "param_groups": state_dict["param_groups"][self.rank]}
)
- # Update the param_groups attribute for this instance
- # TODO(ben)
-
def add_param_group(self, param_group: dict) -> None:
super().add_param_group(param_group)
if not self.in_super_constructor:
@@ -186,10 +189,13 @@
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
- def _sync_lr(self) -> None:
- """Sync learning rate (needed to support LRScheduler)."""
+ def _sync_param_groups(self) -> None:
+ """Sync learning rate and other optimizer attributes (needed to support schedulers)."""
for global_group, local_group in zip(self.param_groups, self.optim.param_groups):
- local_group["lr"] = global_group["lr"]
+ for k in local_group.keys():
+ if k != "params":
+ # Params have been sharded and should not be synced here
+ local_group[k] = global_group[k]
def _collect_sharded_states(self) -> List[Dict[str, Any]]:
"""
| {"golden_diff": "diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py\n--- a/fairscale/optim/oss.py\n+++ b/fairscale/optim/oss.py\n@@ -67,6 +67,12 @@\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n \n+ # Sync local and global param_groups keys\n+ for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n+ for k, v in local_group.items():\n+ if k != \"params\":\n+ global_group[k] = v\n+\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n \n@@ -94,8 +100,8 @@\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n- # Sync lr in case its been update by an LRScheduler.\n- self._sync_lr()\n+ # Sync oss param_groups attributes in case they've been updated by a scheduler.\n+ self._sync_param_groups()\n \n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n@@ -116,8 +122,8 @@\n \n This needs to be called on all replicas \"\"\"\n \n- # Sync lr in case its been update by an LRScheduler.\n- self._sync_lr()\n+ # Sync lr and other attributes in case its been updated\n+ self._sync_param_groups()\n \n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n@@ -176,9 +182,6 @@\n {\"state\": state_dict[\"state\"][self.rank], \"param_groups\": state_dict[\"param_groups\"][self.rank]}\n )\n \n- # Update the param_groups attribute for this instance\n- # TODO(ben)\n-\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n@@ -186,10 +189,13 @@\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n \n- def _sync_lr(self) -> None:\n- \"\"\"Sync learning rate (needed to support LRScheduler).\"\"\"\n+ def _sync_param_groups(self) -> None:\n+ \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n- local_group[\"lr\"] = global_group[\"lr\"]\n+ for k in local_group.keys():\n+ if k != \"params\":\n+ # Params have been sharded and should not be synced here\n+ local_group[k] = global_group[k]\n \n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n", "issue": "[feat] Sync OSS.param_groups and the sharded optimizer param_groups\n## \ud83d\ude80 Feature\r\nMake sure that all attributes (not just LR) are in sync in between the OSS.param_groups and the actual optimizer. \r\n \r\n## Motivation\r\nSome frameworks make it possible to alter any attribute here, not just LR (momentum, beta for Adam, ..). We do not currently support this and silently fail\r\n\r\n## Pitch\r\nPart of the not really well defined PyTorch optimizer features which are nice to have\r\n\r\n## Alternatives\r\n- At least add a warning when we're out of sync\r\n- Nuke the .param_groups access\r\n- Force a new OSS optimizer to be created everytime the user wants to change an attribute in param_groups\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync lr in case its been update by an LRScheduler.\n self._sync_lr()\n\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n\n # Sync all the states\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n dist.broadcast(tensor=param, src=rank, group=self.group)\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr in case its been update by an LRScheduler.\n self._sync_lr()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": [s[\"param_groups\"] for s in self._all_states],\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict(\n {\"state\": state_dict[\"state\"][self.rank], \"param_groups\": state_dict[\"param_groups\"][self.rank]}\n )\n\n # Update the param_groups attribute for this instance\n # TODO(ben)\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_lr(self) -> None:\n \"\"\"Sync learning rate (needed to support LRScheduler).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n local_group[\"lr\"] = global_group[\"lr\"]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}]} | 3,578 | 717 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.