problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_28186
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-93
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dynamic blocks handling is partial
**Describe the bug**
An S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally.
**To Reproduce**
Steps to reproduce the behavior:
S3 configuration:
```
resource "aws_s3_bucket" "bridgecrew_cws_bucket" {
count = var.existing_bucket_name == null ? 1 : 0
bucket = local.bucket_name
acl = "private"
versioning {
enabled = true
}
lifecycle_rule {
id = "Delete old log files"
enabled = true
noncurrent_version_expiration {
days = var.log_file_expiration
}
expiration {
days = var.log_file_expiration
}
}
dynamic "logging" {
for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : []
content {
target_bucket = logging.value
target_prefix = "/${local.bucket_name}"
}
}
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
kms_master_key_id = local.kms_key
sse_algorithm = "aws:kms"
}
}
}
tags = {
Name = "BridgecrewCWSBucket"
}
}
```
**Expected behavior**
The check should not fail
**Desktop (please complete the following information):**
- OS: mac OSX Catalina
- Checkov Version 1.0.167
Docker command in README.md is wrong
**Describe the bug**
The docker run command in the readme is incorrect and does not work. It should be:
docker run -v /user/tf:/tf bridgecrew/checkov -d /tf
</issue>
<code>
[start of checkov/terraform/parser.py]
1 import logging
2 import os
3 from os import path
4
5 import hcl2
6
7
8 class Parser:
9 logger = logging.getLogger(__name__)
10
11 def hcl2(self, directory, tf_definitions={}, parsing_errors={}):
12 modules_scan = []
13
14 for file in os.listdir(directory):
15 if file.endswith(".tf"):
16 tf_file = os.path.join(directory, file)
17 if tf_file not in tf_definitions.keys():
18 try:
19 with(open(tf_file, 'r')) as file:
20 file.seek(0)
21 dict = hcl2.load(file)
22 tf_defenition = dict
23 tf_definitions[tf_file] = tf_defenition
24 # TODO move from here
25 # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)
26
27 for modules in dict.get("module", []):
28 for module in modules.values():
29 relative_path = module['source'][0]
30 abs_path = os.path.join(directory, relative_path)
31 modules_scan.append(abs_path)
32 except Exception as e:
33 self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)
34 parsing_errors[tf_file] = e
35 for m in modules_scan:
36 if path.exists(m):
37 self.hcl2(directory=m, tf_definitions=tf_definitions)
38
39 def parse_file(self, file, tf_definitions={}, parsing_errors={}):
40 if file.endswith(".tf"):
41 try:
42 with(open(file, 'r')) as tf_file:
43 tf_file.seek(0)
44 dict = hcl2.load(tf_file)
45 tf_defenition = dict
46 tf_definitions[file] = tf_defenition
47 except Exception as e:
48 self.logger.debug('failed while parsing file %s' % file, exc_info=e)
49 parsing_errors[file] = e
50
[end of checkov/terraform/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py
--- a/checkov/terraform/parser.py
+++ b/checkov/terraform/parser.py
@@ -18,13 +18,18 @@
try:
with(open(tf_file, 'r')) as file:
file.seek(0)
- dict = hcl2.load(file)
- tf_defenition = dict
- tf_definitions[tf_file] = tf_defenition
+ tf_definition = hcl2.load(file)
+ for resource_type in tf_definition.get('resource', []):
+ for resource in resource_type.values():
+ for named_resource in resource.values():
+ for dynamic_block in named_resource.get('dynamic', []):
+ for dynamic_field_name, dynamic_field_value in dynamic_block.items():
+ named_resource[dynamic_field_name] = dynamic_field_value['for_each']
+ tf_definitions[tf_file] = tf_definition
# TODO move from here
# tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)
- for modules in dict.get("module", []):
+ for modules in tf_definition.get("module", []):
for module in modules.values():
relative_path = module['source'][0]
abs_path = os.path.join(directory, relative_path)
|
{"golden_diff": "diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py\n--- a/checkov/terraform/parser.py\n+++ b/checkov/terraform/parser.py\n@@ -18,13 +18,18 @@\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n- dict = hcl2.load(file)\n- tf_defenition = dict\n- tf_definitions[tf_file] = tf_defenition\n+ tf_definition = hcl2.load(file)\n+ for resource_type in tf_definition.get('resource', []):\n+ for resource in resource_type.values():\n+ for named_resource in resource.values():\n+ for dynamic_block in named_resource.get('dynamic', []):\n+ for dynamic_field_name, dynamic_field_value in dynamic_block.items():\n+ named_resource[dynamic_field_name] = dynamic_field_value['for_each']\n+ tf_definitions[tf_file] = tf_definition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n \n- for modules in dict.get(\"module\", []):\n+ for modules in tf_definition.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n", "issue": "Dynamic blocks handling is partial\n**Describe the bug**\r\nAn S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nS3 configuration:\r\n```\r\nresource \"aws_s3_bucket\" \"bridgecrew_cws_bucket\" {\r\n count = var.existing_bucket_name == null ? 1 : 0\r\n\r\n bucket = local.bucket_name\r\n acl = \"private\"\r\n\r\n versioning {\r\n enabled = true\r\n }\r\n\r\n lifecycle_rule {\r\n id = \"Delete old log files\"\r\n enabled = true\r\n\r\n noncurrent_version_expiration {\r\n days = var.log_file_expiration\r\n }\r\n\r\n expiration {\r\n days = var.log_file_expiration\r\n }\r\n }\r\n\r\n dynamic \"logging\" {\r\n for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : []\r\n\r\n content {\r\n target_bucket = logging.value\r\n target_prefix = \"/${local.bucket_name}\"\r\n }\r\n }\r\n\r\n server_side_encryption_configuration {\r\n rule {\r\n apply_server_side_encryption_by_default {\r\n kms_master_key_id = local.kms_key\r\n sse_algorithm = \"aws:kms\"\r\n }\r\n }\r\n }\r\n\r\n tags = {\r\n Name = \"BridgecrewCWSBucket\"\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nThe check should not fail\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: mac OSX Catalina\r\n - Checkov Version 1.0.167\r\n\r\n\nDocker command in README.md is wrong\n**Describe the bug**\r\nThe docker run command in the readme is incorrect and does not work. It should be: \r\ndocker run -v /user/tf:/tf bridgecrew/checkov -d /tf\r\n\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom os import path\n\nimport hcl2\n\n\nclass Parser:\n logger = logging.getLogger(__name__)\n\n def hcl2(self, directory, tf_definitions={}, parsing_errors={}):\n modules_scan = []\n\n for file in os.listdir(directory):\n if file.endswith(\".tf\"):\n tf_file = os.path.join(directory, file)\n if tf_file not in tf_definitions.keys():\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n dict = hcl2.load(file)\n tf_defenition = dict\n tf_definitions[tf_file] = tf_defenition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n\n for modules in dict.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n modules_scan.append(abs_path)\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)\n parsing_errors[tf_file] = e\n for m in modules_scan:\n if path.exists(m):\n self.hcl2(directory=m, tf_definitions=tf_definitions)\n\n def parse_file(self, file, tf_definitions={}, parsing_errors={}):\n if file.endswith(\".tf\"):\n try:\n with(open(file, 'r')) as tf_file:\n tf_file.seek(0)\n dict = hcl2.load(tf_file)\n tf_defenition = dict\n tf_definitions[file] = tf_defenition\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % file, exc_info=e)\n parsing_errors[file] = e\n", "path": "checkov/terraform/parser.py"}]}
| 1,391 | 287 |
gh_patches_debug_15817
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-3588
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable manual register() ids in syft.grid.register()
**Is your feature request related to a problem? Please describe.**
It is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash.
This hash should be printed with clear instructions ("Send this to whomever you'd like to connect with") when register() is called.
</issue>
<code>
[start of syft/grid/__init__.py]
1 from .network import Network
2
3 DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
4
5
6 def register(node_id: str, **kwargs):
7 """ Add this process as a new peer registering it in the grid network.
8
9 Args:
10 node_id: Id used to identify this node.
11 Returns:
12 peer: Peer Network instance.
13 """
14 if not kwargs:
15 args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL}
16 else:
17 args = kwargs
18
19 peer = Network(node_id, **args)
20 peer.start()
21 return peer
22
[end of syft/grid/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py
--- a/syft/grid/__init__.py
+++ b/syft/grid/__init__.py
@@ -1,13 +1,12 @@
from .network import Network
+import uuid
DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com"
-def register(node_id: str, **kwargs):
+def register(**kwargs):
""" Add this process as a new peer registering it in the grid network.
- Args:
- node_id: Id used to identify this node.
Returns:
peer: Peer Network instance.
"""
@@ -16,6 +15,8 @@
else:
args = kwargs
- peer = Network(node_id, **args)
+ peer_id = str(uuid.uuid4())
+ peer = Network(peer_id, **args)
peer.start()
+
return peer
|
{"golden_diff": "diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py\n--- a/syft/grid/__init__.py\n+++ b/syft/grid/__init__.py\n@@ -1,13 +1,12 @@\n from .network import Network\n+import uuid\n \n DEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n \n \n-def register(node_id: str, **kwargs):\n+def register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n- Args:\n- node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n@@ -16,6 +15,8 @@\n else:\n args = kwargs\n \n- peer = Network(node_id, **args)\n+ peer_id = str(uuid.uuid4())\n+ peer = Network(peer_id, **args)\n peer.start()\n+\n return peer\n", "issue": "Disable manual register() ids in syft.grid.register()\n**Is your feature request related to a problem? Please describe.**\r\nIt is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash.\r\n\r\nThis hash should be printed with clear instructions (\"Send this to whomever you'd like to connect with\") when register() is called.\n", "before_files": [{"content": "from .network import Network\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(node_id: str, **kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Args:\n node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer = Network(node_id, **args)\n peer.start()\n return peer\n", "path": "syft/grid/__init__.py"}]}
| 828 | 219 |
gh_patches_debug_42215
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-1442
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ARIMA and AutoARIMA should return pandas.NA when value cannot be predicted, e.g., first index in-sample for start=0, d=1
Update 2021-09-15 - re-opened since a recurring issue.
Not a bug, but an interface convention - currently ARIMA, AutoARIMA and other learners fail when asked for a prediction that is undefined, e.g., difference parameter d=1 and first in-sample index (start=0).
It would be more sensible if `pandas.NA` is returned instead for such indices, perhaps a warning should be raised too that `pandas.NA` are produced.
In first instance, we should update ARIMA and AutoARIMA with this convention.
---
**Describe the bug**
When using `Imputer` with `forecaster = AutoARIMA()` the error "ValueError: In-sample predictions undefined for start=0 when d=1" is raised.
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-11-2d05f4822fd6> in <module>()
4
5 transformer = Imputer(method="forecaster", forecaster=AutoARIMA()).fit(Z=df.value)
----> 6 y_hat = transformer.transform(Z=df.value)
/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py in transform(self, Z, X)
102 z_pred = forecaster.fit(
103 z.fillna(method="ffill").fillna(method="backfill")
--> 104 ).predict(fh=fh_ins)
105 # fill with trend values
106 z = z.fillna(value=z_pred)
/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/_sktime.py in predict(self, fh, X, return_pred_int, alpha)
226 self.check_is_fitted()
227 self._set_fh(fh)
--> 228 return self._predict(self.fh, X, return_pred_int=return_pred_int, alpha=alpha)
229
230 def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA):
/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict(self, fh, X, return_pred_int, alpha)
58 # all values are in-sample
59 elif fh.is_all_in_sample(self.cutoff):
---> 60 return self._predict_in_sample(fh_ins, **kwargs)
61
62 # both in-sample and out-of-sample values
/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict_in_sample(self, fh, X, return_pred_int, alpha)
80 X=X,
81 return_conf_int=return_pred_int,
---> 82 alpha=alpha,
83 )
84
/usr/local/lib/python3.7/dist-packages/pmdarima/utils/metaestimators.py in <lambda>(*args, **kwargs)
51
52 # lambda, but not partial, allows help() to work with update_wrapper
---> 53 out = (lambda *args, **kwargs: self.fn(obj, *args, **kwargs))
54 # update the docstring of the returned function
55 update_wrapper(out, self.fn)
/usr/local/lib/python3.7/dist-packages/pmdarima/arima/auto.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, typ, **kwargs)
242 return_conf_int=return_conf_int,
243 alpha=alpha,
--> 244 typ=typ,
245 )
246
/usr/local/lib/python3.7/dist-packages/pmdarima/arima/arima.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, **kwargs)
584 if start is not None and start < d:
585 raise ValueError("In-sample predictions undefined for start={0} "
--> 586 "when d={1}".format(start, d))
587
588 # if we fit with exog, make sure one was passed:
ValueError: In-sample predictions undefined for start=0 when d=1
```
**To Reproduce**
Data:
[df.csv](https://github.com/alan-turing-institute/sktime/files/6426660/df.csv)
```python
import pandas as pd
from sktime.transformations.series.impute import Imputer
from sktime.forecasting.arima import AutoARIMA
df = pd.read_csv('df.csv')
df['timestamp'] = pd.DatetimeIndex(df['timestamp']).to_period('D')
df = df.set_index('timestamp')
transformer = Imputer(method="forecaster", forecaster=AutoARIMA()).fit(Z=df.value)
y_hat = transformer.transform(Z=df.value)
```
**Expected behavior**
`transformer.transform(Z)` should return `Z` with NAn values replaced by AutoArima predictions.
**Additional context**
I think the problem is in `/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py` in `transform(self, Z, X)`, line 100, because all the points are being passed to `fh_ins` instead of just the points with NaN.
```python
100 fh_ins = -np.arange(len(z))
101 # fill NaN before fitting with ffill and backfill (heuristic)
102 z_pred = forecaster.fit(
103 z.fillna(method="ffill").fillna(method="backfill")
104 ).predict(fh=fh_ins)
105 # fill with trend values
106 z = z.fillna(value=z_pred)
```
**Versions**
<details>
```
System:
python: 3.7.10 (default, Feb 20 2021, 21:17:23) [GCC 7.5.0]
executable: /usr/bin/python3
machine: Linux-4.19.112+-x86_64-with-Ubuntu-18.04-bionic
Python dependencies:
pip: 19.3.1
setuptools: 56.0.0
sklearn: 0.24.2
sktime: 0.6.0
statsmodels: 0.12.2
numpy: 1.19.5
scipy: 1.4.1
Cython: 0.29.22
pandas: 1.1.5
matplotlib: 3.2.2
joblib: 1.0.1
numba: 0.51.2
pmdarima: 1.8.2
tsfresh: None
```
</details>
</issue>
<code>
[start of sktime/forecasting/base/adapters/_pmdarima.py]
1 # -*- coding: utf-8 -*-
2 # !/usr/bin/env python3 -u
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Implements adapter for pmdarima forecasters to be used in sktime framework."""
5
6 __author__ = ["Markus Löning", "Hongyi Yang"]
7 __all__ = ["_PmdArimaAdapter"]
8
9 import pandas as pd
10
11 from sktime.forecasting.base._base import DEFAULT_ALPHA
12 from sktime.forecasting.base import BaseForecaster
13
14
15 class _PmdArimaAdapter(BaseForecaster):
16 """Base class for interfacing pmdarima."""
17
18 _tags = {
19 "ignores-exogeneous-X": True,
20 "capability:pred_int": True,
21 "requires-fh-in-fit": False,
22 "handles-missing-data": False,
23 }
24
25 def __init__(self):
26 self._forecaster = None
27 super(_PmdArimaAdapter, self).__init__()
28
29 def _instantiate_model(self):
30 raise NotImplementedError("abstract method")
31
32 def _fit(self, y, X=None, fh=None, **fit_params):
33 """Fit to training data.
34
35 Parameters
36 ----------
37 y : pd.Series
38 Target time series to which to fit the forecaster.
39 fh : int, list, np.array or ForecastingHorizon, optional (default=None)
40 The forecasters horizon with the steps ahead to to predict.
41 X : pd.DataFrame, optional (default=None)
42 Exogenous variables are ignored
43
44 Returns
45 -------
46 self : returns an instance of self.
47 """
48 self._forecaster = self._instantiate_model()
49 self._forecaster.fit(y, X=X, **fit_params)
50 return self
51
52 def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):
53 # distinguish between in-sample and out-of-sample prediction
54 fh_oos = fh.to_out_of_sample(self.cutoff)
55 fh_ins = fh.to_in_sample(self.cutoff)
56
57 kwargs = {"X": X, "return_pred_int": return_pred_int, "alpha": alpha}
58
59 # all values are out-of-sample
60 if fh.is_all_out_of_sample(self.cutoff):
61 return self._predict_fixed_cutoff(fh_oos, **kwargs)
62
63 # all values are in-sample
64 elif fh.is_all_in_sample(self.cutoff):
65 return self._predict_in_sample(fh_ins, **kwargs)
66
67 # both in-sample and out-of-sample values
68 else:
69 y_ins = self._predict_in_sample(fh_ins, **kwargs)
70 y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)
71 return y_ins.append(y_oos)
72
73 def _predict_in_sample(
74 self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
75 ):
76 if isinstance(alpha, (list, tuple)):
77 raise NotImplementedError("multiple `alpha` values are not yet supported")
78
79 # for in-sample predictions, pmdarima requires zero-based
80 # integer indicies
81 start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
82 result = self._forecaster.predict_in_sample(
83 start=start,
84 end=end,
85 X=X,
86 return_conf_int=return_pred_int,
87 alpha=alpha,
88 )
89
90 fh_abs = fh.to_absolute(self.cutoff)
91 fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
92 if return_pred_int:
93 # unpack and format results
94 y_pred, pred_int = result
95 y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)
96 pred_int = pd.DataFrame(
97 pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"]
98 )
99 return y_pred, pred_int
100
101 else:
102 return pd.Series(result[fh_idx], index=fh_abs)
103
104 def _predict_fixed_cutoff(
105 self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
106 ):
107 # make prediction
108 n_periods = int(fh.to_relative(self.cutoff)[-1])
109 result = self._forecaster.predict(
110 n_periods=n_periods,
111 X=X,
112 return_conf_int=return_pred_int,
113 alpha=alpha,
114 )
115
116 fh_abs = fh.to_absolute(self.cutoff)
117 fh_idx = fh.to_indexer(self.cutoff)
118 if return_pred_int:
119 y_pred, pred_int = result
120 y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)
121 pred_int = pd.DataFrame(
122 pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"]
123 )
124 return y_pred, pred_int
125 else:
126 return pd.Series(result[fh_idx], index=fh_abs)
127
128 def get_fitted_params(self):
129 """Get fitted parameters.
130
131 Returns
132 -------
133 fitted_params : dict
134 """
135 self.check_is_fitted()
136 names = self._get_fitted_param_names()
137 params = self._get_fitted_params()
138 fitted_params = {name: param for name, param in zip(names, params)}
139
140 if hasattr(self._forecaster, "model_"): # AutoARIMA
141 res = self._forecaster.model_.arima_res_
142 elif hasattr(self._forecaster, "arima_res_"): # ARIMA
143 res = self._forecaster.arima_res_
144 else:
145 res = None
146
147 for name in ["aic", "aicc", "bic", "hqic"]:
148 fitted_params[name] = getattr(res, name, None)
149
150 return fitted_params
151
152 def _get_fitted_params(self):
153 # Return parameter values under `arima_res_`
154 if hasattr(self._forecaster, "model_"): # AutoARIMA
155 return self._forecaster.model_.arima_res_._results.params
156 elif hasattr(self._forecaster, "arima_res_"): # ARIMA
157 return self._forecaster.arima_res_._results.params
158 else:
159 raise NotImplementedError()
160
161 def _get_fitted_param_names(self):
162 # Return parameter names under `arima_res_`
163 if hasattr(self._forecaster, "model_"): # AutoARIMA
164 return self._forecaster.model_.arima_res_._results.param_names
165 elif hasattr(self._forecaster, "arima_res_"): # ARIMA
166 return self._forecaster.arima_res_._results.param_names
167 else:
168 raise NotImplementedError()
169
170 def summary(self):
171 """Summary of the fitted model."""
172 return self._forecaster.summary()
173
[end of sktime/forecasting/base/adapters/_pmdarima.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/forecasting/base/adapters/_pmdarima.py b/sktime/forecasting/base/adapters/_pmdarima.py
--- a/sktime/forecasting/base/adapters/_pmdarima.py
+++ b/sktime/forecasting/base/adapters/_pmdarima.py
@@ -7,7 +7,6 @@
__all__ = ["_PmdArimaAdapter"]
import pandas as pd
-
from sktime.forecasting.base._base import DEFAULT_ALPHA
from sktime.forecasting.base import BaseForecaster
@@ -66,9 +65,18 @@
# both in-sample and out-of-sample values
else:
- y_ins = self._predict_in_sample(fh_ins, **kwargs)
- y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)
- return y_ins.append(y_oos)
+ if return_pred_int:
+ y_ins_pred, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)
+ y_oos_pred, y_oos_pred_int = self._predict_fixed_cutoff(
+ fh_oos, **kwargs
+ )
+ return y_ins_pred.append(y_oos_pred), y_ins_pred_int.append(
+ y_oos_pred_int
+ )
+ else:
+ y_ins = self._predict_in_sample(fh_ins, **kwargs)
+ y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)
+ return y_ins.append(y_oos)
def _predict_in_sample(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
@@ -76,9 +84,34 @@
if isinstance(alpha, (list, tuple)):
raise NotImplementedError("multiple `alpha` values are not yet supported")
- # for in-sample predictions, pmdarima requires zero-based
- # integer indicies
+ if hasattr(self, "order"):
+ diff_order = self.order[1]
+ else:
+ diff_order = self._forecaster.model_.order[1]
+
+ # Initialize return objects
+ fh_abs = fh.to_absolute(self.cutoff).to_numpy()
+ fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
+ y_pred = pd.Series(index=fh_abs)
+ pred_int = pd.DataFrame(index=fh_abs, columns=["lower", "upper"])
+
+ # for in-sample predictions, pmdarima requires zero-based integer indicies
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
+ if start < 0:
+ # Can't forecasts earlier to train starting point
+ raise ValueError("Can't make predictions earlier to train starting point")
+ elif start < diff_order:
+ # Can't forecasts earlier to arima's differencing order
+ # But we return NaN for these supposedly forecastable points
+ start = diff_order
+ if end < start:
+ # since we might have forced `start` to surpass `end`
+ end = diff_order
+ # get rid of unforcastable points
+ fh_abs = fh_abs[fh_idx >= diff_order]
+ # reindex accordingly
+ fh_idx = fh_idx[fh_idx >= diff_order] - diff_order
+
result = self._forecaster.predict_in_sample(
start=start,
end=end,
@@ -87,19 +120,16 @@
alpha=alpha,
)
- fh_abs = fh.to_absolute(self.cutoff)
- fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)
if return_pred_int:
- # unpack and format results
- y_pred, pred_int = result
- y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)
- pred_int = pd.DataFrame(
- pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"]
+ # unpack results
+ y_pred.loc[fh_abs], pred_int.loc[fh_abs] = (
+ result[0][fh_idx],
+ result[1][fh_idx, :],
)
return y_pred, pred_int
-
else:
- return pd.Series(result[fh_idx], index=fh_abs)
+ y_pred.loc[fh_abs] = result[fh_idx]
+ return y_pred
def _predict_fixed_cutoff(
self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
|
{"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_pmdarima.py b/sktime/forecasting/base/adapters/_pmdarima.py\n--- a/sktime/forecasting/base/adapters/_pmdarima.py\n+++ b/sktime/forecasting/base/adapters/_pmdarima.py\n@@ -7,7 +7,6 @@\n __all__ = [\"_PmdArimaAdapter\"]\n \n import pandas as pd\n-\n from sktime.forecasting.base._base import DEFAULT_ALPHA\n from sktime.forecasting.base import BaseForecaster\n \n@@ -66,9 +65,18 @@\n \n # both in-sample and out-of-sample values\n else:\n- y_ins = self._predict_in_sample(fh_ins, **kwargs)\n- y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n- return y_ins.append(y_oos)\n+ if return_pred_int:\n+ y_ins_pred, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)\n+ y_oos_pred, y_oos_pred_int = self._predict_fixed_cutoff(\n+ fh_oos, **kwargs\n+ )\n+ return y_ins_pred.append(y_oos_pred), y_ins_pred_int.append(\n+ y_oos_pred_int\n+ )\n+ else:\n+ y_ins = self._predict_in_sample(fh_ins, **kwargs)\n+ y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n+ return y_ins.append(y_oos)\n \n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n@@ -76,9 +84,34 @@\n if isinstance(alpha, (list, tuple)):\n raise NotImplementedError(\"multiple `alpha` values are not yet supported\")\n \n- # for in-sample predictions, pmdarima requires zero-based\n- # integer indicies\n+ if hasattr(self, \"order\"):\n+ diff_order = self.order[1]\n+ else:\n+ diff_order = self._forecaster.model_.order[1]\n+\n+ # Initialize return objects\n+ fh_abs = fh.to_absolute(self.cutoff).to_numpy()\n+ fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n+ y_pred = pd.Series(index=fh_abs)\n+ pred_int = pd.DataFrame(index=fh_abs, columns=[\"lower\", \"upper\"])\n+\n+ # for in-sample predictions, pmdarima requires zero-based integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n+ if start < 0:\n+ # Can't forecasts earlier to train starting point\n+ raise ValueError(\"Can't make predictions earlier to train starting point\")\n+ elif start < diff_order:\n+ # Can't forecasts earlier to arima's differencing order\n+ # But we return NaN for these supposedly forecastable points\n+ start = diff_order\n+ if end < start:\n+ # since we might have forced `start` to surpass `end`\n+ end = diff_order\n+ # get rid of unforcastable points\n+ fh_abs = fh_abs[fh_idx >= diff_order]\n+ # reindex accordingly\n+ fh_idx = fh_idx[fh_idx >= diff_order] - diff_order\n+\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n@@ -87,19 +120,16 @@\n alpha=alpha,\n )\n \n- fh_abs = fh.to_absolute(self.cutoff)\n- fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n if return_pred_int:\n- # unpack and format results\n- y_pred, pred_int = result\n- y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n- pred_int = pd.DataFrame(\n- pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n+ # unpack results\n+ y_pred.loc[fh_abs], pred_int.loc[fh_abs] = (\n+ result[0][fh_idx],\n+ result[1][fh_idx, :],\n )\n return y_pred, pred_int\n-\n else:\n- return pd.Series(result[fh_idx], index=fh_abs)\n+ y_pred.loc[fh_abs] = result[fh_idx]\n+ return y_pred\n \n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n", "issue": "ARIMA and AutoARIMA should return pandas.NA when value cannot be predicted, e.g., first index in-sample for start=0, d=1\nUpdate 2021-09-15 - re-opened since a recurring issue.\r\n\r\nNot a bug, but an interface convention - currently ARIMA, AutoARIMA and other learners fail when asked for a prediction that is undefined, e.g., difference parameter d=1 and first in-sample index (start=0).\r\n\r\nIt would be more sensible if `pandas.NA` is returned instead for such indices, perhaps a warning should be raised too that `pandas.NA` are produced.\r\n\r\nIn first instance, we should update ARIMA and AutoARIMA with this convention.\r\n\r\n---\r\n\r\n**Describe the bug**\r\n\r\nWhen using `Imputer` with `forecaster = AutoARIMA()` the error \"ValueError: In-sample predictions undefined for start=0 when d=1\" is raised.\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-11-2d05f4822fd6> in <module>()\r\n 4 \r\n 5 transformer = Imputer(method=\"forecaster\", forecaster=AutoARIMA()).fit(Z=df.value)\r\n----> 6 y_hat = transformer.transform(Z=df.value)\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py in transform(self, Z, X)\r\n 102 z_pred = forecaster.fit(\r\n 103 z.fillna(method=\"ffill\").fillna(method=\"backfill\")\r\n--> 104 ).predict(fh=fh_ins)\r\n 105 # fill with trend values\r\n 106 z = z.fillna(value=z_pred)\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/_sktime.py in predict(self, fh, X, return_pred_int, alpha)\r\n 226 self.check_is_fitted()\r\n 227 self._set_fh(fh)\r\n--> 228 return self._predict(self.fh, X, return_pred_int=return_pred_int, alpha=alpha)\r\n 229 \r\n 230 def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA):\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict(self, fh, X, return_pred_int, alpha)\r\n 58 # all values are in-sample\r\n 59 elif fh.is_all_in_sample(self.cutoff):\r\n---> 60 return self._predict_in_sample(fh_ins, **kwargs)\r\n 61 \r\n 62 # both in-sample and out-of-sample values\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict_in_sample(self, fh, X, return_pred_int, alpha)\r\n 80 X=X,\r\n 81 return_conf_int=return_pred_int,\r\n---> 82 alpha=alpha,\r\n 83 )\r\n 84 \r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/utils/metaestimators.py in <lambda>(*args, **kwargs)\r\n 51 \r\n 52 # lambda, but not partial, allows help() to work with update_wrapper\r\n---> 53 out = (lambda *args, **kwargs: self.fn(obj, *args, **kwargs))\r\n 54 # update the docstring of the returned function\r\n 55 update_wrapper(out, self.fn)\r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/arima/auto.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, typ, **kwargs)\r\n 242 return_conf_int=return_conf_int,\r\n 243 alpha=alpha,\r\n--> 244 typ=typ,\r\n 245 )\r\n 246 \r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/arima/arima.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, **kwargs)\r\n 584 if start is not None and start < d:\r\n 585 raise ValueError(\"In-sample predictions undefined for start={0} \"\r\n--> 586 \"when d={1}\".format(start, d))\r\n 587 \r\n 588 # if we fit with exog, make sure one was passed:\r\n\r\nValueError: In-sample predictions undefined for start=0 when d=1\r\n```\r\n\r\n**To Reproduce**\r\n\r\nData:\r\n[df.csv](https://github.com/alan-turing-institute/sktime/files/6426660/df.csv) \r\n\r\n```python\r\nimport pandas as pd\r\nfrom sktime.transformations.series.impute import Imputer\r\nfrom sktime.forecasting.arima import AutoARIMA\r\n\r\ndf = pd.read_csv('df.csv') \r\ndf['timestamp'] = pd.DatetimeIndex(df['timestamp']).to_period('D')\r\ndf = df.set_index('timestamp')\r\n\r\ntransformer = Imputer(method=\"forecaster\", forecaster=AutoARIMA()).fit(Z=df.value)\r\ny_hat = transformer.transform(Z=df.value)\r\n```\r\n\r\n**Expected behavior**\r\n\r\n`transformer.transform(Z)` should return `Z` with NAn values replaced by AutoArima predictions.\r\n\r\n**Additional context**\r\n\r\nI think the problem is in `/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py` in `transform(self, Z, X)`, line 100, because all the points are being passed to `fh_ins` instead of just the points with NaN.\r\n\r\n```python\r\n100 fh_ins = -np.arange(len(z))\r\n101 # fill NaN before fitting with ffill and backfill (heuristic)\r\n102 z_pred = forecaster.fit(\r\n103 z.fillna(method=\"ffill\").fillna(method=\"backfill\")\r\n104 ).predict(fh=fh_ins)\r\n105 # fill with trend values\r\n106 z = z.fillna(value=z_pred)\r\n\r\n```\r\n\r\n**Versions**\r\n<details>\r\n\r\n```\r\nSystem:\r\n python: 3.7.10 (default, Feb 20 2021, 21:17:23) [GCC 7.5.0]\r\nexecutable: /usr/bin/python3\r\n machine: Linux-4.19.112+-x86_64-with-Ubuntu-18.04-bionic\r\n\r\nPython dependencies:\r\n pip: 19.3.1\r\n setuptools: 56.0.0\r\n sklearn: 0.24.2\r\n sktime: 0.6.0\r\n statsmodels: 0.12.2\r\n numpy: 1.19.5\r\n scipy: 1.4.1\r\n Cython: 0.29.22\r\n pandas: 1.1.5\r\n matplotlib: 3.2.2\r\n joblib: 1.0.1\r\n numba: 0.51.2\r\n pmdarima: 1.8.2\r\n tsfresh: None\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for pmdarima forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"Markus L\u00f6ning\", \"Hongyi Yang\"]\n__all__ = [\"_PmdArimaAdapter\"]\n\nimport pandas as pd\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base import BaseForecaster\n\n\nclass _PmdArimaAdapter(BaseForecaster):\n \"\"\"Base class for interfacing pmdarima.\"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self):\n self._forecaster = None\n super(_PmdArimaAdapter, self).__init__()\n\n def _instantiate_model(self):\n raise NotImplementedError(\"abstract method\")\n\n def _fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list, np.array or ForecastingHorizon, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._forecaster = self._instantiate_model()\n self._forecaster.fit(y, X=X, **fit_params)\n return self\n\n def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n # distinguish between in-sample and out-of-sample prediction\n fh_oos = fh.to_out_of_sample(self.cutoff)\n fh_ins = fh.to_in_sample(self.cutoff)\n\n kwargs = {\"X\": X, \"return_pred_int\": return_pred_int, \"alpha\": alpha}\n\n # all values are out-of-sample\n if fh.is_all_out_of_sample(self.cutoff):\n return self._predict_fixed_cutoff(fh_oos, **kwargs)\n\n # all values are in-sample\n elif fh.is_all_in_sample(self.cutoff):\n return self._predict_in_sample(fh_ins, **kwargs)\n\n # both in-sample and out-of-sample values\n else:\n y_ins = self._predict_in_sample(fh_ins, **kwargs)\n y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n return y_ins.append(y_oos)\n\n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n if isinstance(alpha, (list, tuple)):\n raise NotImplementedError(\"multiple `alpha` values are not yet supported\")\n\n # for in-sample predictions, pmdarima requires zero-based\n # integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n if return_pred_int:\n # unpack and format results\n y_pred, pred_int = result\n y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n return y_pred, pred_int\n\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n # make prediction\n n_periods = int(fh.to_relative(self.cutoff)[-1])\n result = self._forecaster.predict(\n n_periods=n_periods,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff)\n if return_pred_int:\n y_pred, pred_int = result\n y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n return y_pred, pred_int\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n self.check_is_fitted()\n names = self._get_fitted_param_names()\n params = self._get_fitted_params()\n fitted_params = {name: param for name, param in zip(names, params)}\n\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n res = self._forecaster.model_.arima_res_\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n res = self._forecaster.arima_res_\n else:\n res = None\n\n for name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(res, name, None)\n\n return fitted_params\n\n def _get_fitted_params(self):\n # Return parameter values under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.params\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.params\n else:\n raise NotImplementedError()\n\n def _get_fitted_param_names(self):\n # Return parameter names under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.param_names\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.param_names\n else:\n raise NotImplementedError()\n\n def summary(self):\n \"\"\"Summary of the fitted model.\"\"\"\n return self._forecaster.summary()\n", "path": "sktime/forecasting/base/adapters/_pmdarima.py"}]}
| 4,012 | 998 |
gh_patches_debug_22494
|
rasdani/github-patches
|
git_diff
|
tobymao__sqlglot-3129
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Athena Iceberg Tables parsing issue
Hi,
I want to parse a SQL Statement that creates an Iceberg table on Athena:
```sql
create table if not exists tmp.mytable (
name string
)
location 's3://bucket/tmp/mytable/'
tblproperties (
'table_type'='iceberg',
'format'='parquet'
);
```
running
```python
stmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA)
stmts[0].sql()
```
returns:
```sql
CREATE TABLE IF NOT EXISTS tmp.mytable
(name TEXT)
LOCATION 's3://bucket/tmp/mytable/'
WITH (
table_type='iceberg',
FORMAT='parquet'
)
```
Unfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables.
The parsed statement should look like this:
```sql
CREATE TABLE IF NOT EXISTS tmp.mytable
(name STRING)
LOCATION 's3://bucket/tmp/mytable/'
TBLPROPERTIES (
'table_type'='iceberg',
'FORMAT'='parquet'
)
```
Instead of WITH -> TBLPROPERTIES
The keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT
https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html
https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html
</issue>
<code>
[start of sqlglot/dialects/athena.py]
1 from __future__ import annotations
2
3 from sqlglot.dialects.trino import Trino
4 from sqlglot.tokens import TokenType
5
6
7 class Athena(Trino):
8 class Parser(Trino.Parser):
9 STATEMENT_PARSERS = {
10 **Trino.Parser.STATEMENT_PARSERS,
11 TokenType.USING: lambda self: self._parse_as_command(self._prev),
12 }
13
[end of sqlglot/dialects/athena.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py
--- a/sqlglot/dialects/athena.py
+++ b/sqlglot/dialects/athena.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from sqlglot import exp
from sqlglot.dialects.trino import Trino
from sqlglot.tokens import TokenType
@@ -10,3 +11,27 @@
**Trino.Parser.STATEMENT_PARSERS,
TokenType.USING: lambda self: self._parse_as_command(self._prev),
}
+
+ class Generator(Trino.Generator):
+ PROPERTIES_LOCATION = {
+ **Trino.Generator.PROPERTIES_LOCATION,
+ exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,
+ }
+
+ TYPE_MAPPING = {
+ **Trino.Generator.TYPE_MAPPING,
+ exp.DataType.Type.TEXT: "STRING",
+ }
+
+ TRANSFORMS = {
+ **Trino.Generator.TRANSFORMS,
+ exp.FileFormatProperty: lambda self, e: f"'FORMAT'={self.sql(e, 'this')}",
+ }
+
+ def property_sql(self, expression: exp.Property) -> str:
+ return (
+ f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}"
+ )
+
+ def with_properties(self, properties: exp.Properties) -> str:
+ return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
|
{"golden_diff": "diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py\n--- a/sqlglot/dialects/athena.py\n+++ b/sqlglot/dialects/athena.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+from sqlglot import exp\n from sqlglot.dialects.trino import Trino\n from sqlglot.tokens import TokenType\n \n@@ -10,3 +11,27 @@\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n+\n+ class Generator(Trino.Generator):\n+ PROPERTIES_LOCATION = {\n+ **Trino.Generator.PROPERTIES_LOCATION,\n+ exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,\n+ }\n+\n+ TYPE_MAPPING = {\n+ **Trino.Generator.TYPE_MAPPING,\n+ exp.DataType.Type.TEXT: \"STRING\",\n+ }\n+\n+ TRANSFORMS = {\n+ **Trino.Generator.TRANSFORMS,\n+ exp.FileFormatProperty: lambda self, e: f\"'FORMAT'={self.sql(e, 'this')}\",\n+ }\n+\n+ def property_sql(self, expression: exp.Property) -> str:\n+ return (\n+ f\"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}\"\n+ )\n+\n+ def with_properties(self, properties: exp.Properties) -> str:\n+ return self.properties(properties, prefix=self.seg(\"TBLPROPERTIES\"))\n", "issue": "Athena Iceberg Tables parsing issue\nHi,\r\nI want to parse a SQL Statement that creates an Iceberg table on Athena:\r\n\r\n```sql\r\ncreate table if not exists tmp.mytable (\r\n name string\r\n)\r\nlocation 's3://bucket/tmp/mytable/'\r\ntblproperties (\r\n 'table_type'='iceberg',\r\n 'format'='parquet'\r\n);\r\n```\r\nrunning \r\n```python\r\nstmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA)\r\nstmts[0].sql()\r\n```\r\nreturns:\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name TEXT) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nWITH (\r\n table_type='iceberg', \r\n FORMAT='parquet'\r\n)\r\n```\r\n\r\nUnfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables.\r\n\r\nThe parsed statement should look like this:\r\n\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name STRING) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nTBLPROPERTIES (\r\n 'table_type'='iceberg', \r\n 'FORMAT'='parquet'\r\n)\r\n```\r\n\r\nInstead of WITH -> TBLPROPERTIES\r\nThe keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT\r\n\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot.dialects.trino import Trino\nfrom sqlglot.tokens import TokenType\n\n\nclass Athena(Trino):\n class Parser(Trino.Parser):\n STATEMENT_PARSERS = {\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n", "path": "sqlglot/dialects/athena.py"}]}
| 971 | 341 |
gh_patches_debug_25411
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-338
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add README to PyPI
# Description
At the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub).
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup, find_packages
4
5 extras_require = {
6 'tensorflow': [
7 'tensorflow>=1.10.0',
8 'tensorflow-probability==0.3.0',
9 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass
10 'setuptools<=39.1.0',
11 ],
12 'torch': ['torch>=0.4.0'],
13 'mxnet': [
14 'mxnet>=1.0.0',
15 'requests<2.19.0,>=2.18.4',
16 'numpy<1.15.0,>=1.8.2',
17 'requests<2.19.0,>=2.18.4',
18 ],
19 # 'dask': [
20 # 'dask[array]'
21 # ],
22 'xmlimport': ['uproot'],
23 'minuit': ['iminuit'],
24 'develop': [
25 'pyflakes',
26 'pytest>=3.5.1',
27 'pytest-cov>=2.5.1',
28 'pytest-benchmark[histogram]',
29 'pytest-console-scripts',
30 'python-coveralls',
31 'coverage>=4.0', # coveralls
32 'matplotlib',
33 'jupyter',
34 'nbdime',
35 'uproot>=3.0.0',
36 'papermill',
37 'graphviz',
38 'bumpversion',
39 'sphinx',
40 'sphinxcontrib-bibtex',
41 'sphinxcontrib-napoleon',
42 'sphinx_rtd_theme',
43 'nbsphinx',
44 'm2r',
45 'jsonpatch',
46 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
47 'pre-commit',
48 'black;python_version>="3.6"', # Black is Python3 only
49 ],
50 }
51 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
52
53 setup(
54 name='pyhf',
55 version='0.0.15',
56 description='(partial) pure python histfactory implementation',
57 url='https://github.com/diana-hep/pyhf',
58 author='Lukas Heinrich',
59 author_email='[email protected]',
60 license='Apache',
61 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',
62 classifiers=[
63 "Programming Language :: Python :: 2",
64 "Programming Language :: Python :: 2.7",
65 "Programming Language :: Python :: 3",
66 "Programming Language :: Python :: 3.6",
67 ],
68 packages=find_packages(),
69 include_package_data=True,
70 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
71 install_requires=[
72 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet
73 'click>=6.0', # for console scripts,
74 'tqdm', # for readxml
75 'six', # for modifiers
76 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
77 'jsonpatch',
78 ],
79 extras_require=extras_require,
80 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
81 dependency_links=[],
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,11 @@
#!/usr/bin/env python
from setuptools import setup, find_packages
+from os import path
+
+this_directory = path.abspath(path.dirname(__file__))
+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
+ long_description = readme_md.read()
extras_require = {
'tensorflow': [
@@ -46,6 +51,7 @@
'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now
'pre-commit',
'black;python_version>="3.6"', # Black is Python3 only
+ 'twine',
],
}
extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
@@ -54,6 +60,8 @@
name='pyhf',
version='0.0.15',
description='(partial) pure python histfactory implementation',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
url='https://github.com/diana-hep/pyhf',
author='Lukas Heinrich',
author_email='[email protected]',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,11 @@\n #!/usr/bin/env python\n \n from setuptools import setup, find_packages\n+from os import path\n+\n+this_directory = path.abspath(path.dirname(__file__))\n+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n+ long_description = readme_md.read()\n \n extras_require = {\n 'tensorflow': [\n@@ -46,6 +51,7 @@\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n+ 'twine',\n ],\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n@@ -54,6 +60,8 @@\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n", "issue": "Add README to PyPI\n# Description\r\n\r\nAt the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub).\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
| 1,525 | 289 |
gh_patches_debug_27906
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2735
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Brainstorming for 1.10 / 2.0 roadmap
# Description
Now 1.9 has been released for some time, it is time to discuss what we want to work on for the next release.
- Release date?
- Scope 1.10 (normal release) or 2.0 (release with breaking changes)?
- What do we want to work on?
## Ideas
Ideas from the last meeting:
- introduce snappymail.
- rootless containers
- probably requires init container
- replacement of Socrates. ghostwheel42 will work on this (if he finds time).
- A security/keys page for Mailu admin interface.
- On this page you can configure/change/generate keys for all your Mail domains.
- DNS overview
- Have some UI that tells you if DNS is correctly configured.
- Have a look at BuildX
- means we can build for multiple platforms including ARM
- means we remove the build arm script
- means CI will be changed massively.
- For ARM, we could maybe build once per week to make sure the build time for normal builts is not too long.
- autoconfiguration for email clients?
- automx: https://rseichter.github.io/automx2/?
- integrate whatever we choose with mailu (part of mailu)?
- also good time to drop starttls for imap/pop/smtp(sending mail).
- So only support direct SSL/TLS
- Could be done via environment_variable. When not configured, then starttls supported. If configured (default for new deployments) then it is disabled.
- Another idea is to disable starttls, and report a custom error (when you use explicit starttls) that you must switch to implicit SSL/TLS port 465.
- Full 2 factor authentication with xoath2
- Too large in scope for this release. But preparations could be made.
- Means we need autoconfiguration. Otherwise the email client will not use xoath2.
- means using single sign on via identity provider (which mailu could be as well). This opens the door to use other identity providers in the future.
Feel free to suggest your own ideas
## Misc
For small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.
What are your thoughts? Please share your feedback.
Regardless the above wall of text, feel free to mention any feature/issue you would like included in the next release.
## My 2 cents
For release date we could maybe aim around August/September?
We are all very busy with our personal life now. This should give enough time to implement new features. Of course a decided date is not set in stone. It could still be moved forward/backwards.
https://github.com/Mailu/Mailu/blob/master/design/mailu-directory-structure.md
This is what I was thinking about for the scope. Changing the directory structure must be part of a major release with breaking changes. Do we want to make this change for this release or postpone this?
Personally I'd like to check
- switching to SnappyMail
- rootless containers
- BuildX. At least investigate what changes are required.
Feel free to suggest your own ideas.
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4
5 import os
6
7 extensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']
8 templates_path = ['_templates']
9 source_suffix = '.rst'
10 master_doc = 'index'
11 project = 'Mailu'
12 copyright = '2018, Mailu authors'
13 author = 'Mailu authors'
14 version = release = os.environ.get('VERSION', 'master')
15 language = 'en'
16 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']
17 pygments_style = 'sphinx'
18 todo_include_todos = False
19 html_theme = 'sphinx_rtd_theme'
20 html_title = 'Mailu, Docker based mail server'
21 html_static_path = []
22 htmlhelp_basename = 'Mailudoc'
23
24 # Custom sidebar templates, must be a dictionary that maps document names
25 # to template names.
26 html_sidebars = {
27 '**': [
28 'relations.html',
29 'searchbox.html',
30 ]
31 }
32
33 # Theme options
34 html_context = {
35 'display_github': True,
36 'github_user': 'mailu',
37 'github_repo': 'mailu',
38 'github_version': version,
39 'stable_version': '1.9',
40 'versions': [
41 ('1.8', '/1.8/'),
42 ('1.9', '/1.9/'),
43 ('master', '/master/')
44 ],
45 'conf_py_path': '/docs/'
46 }
47
[end of docs/conf.py]
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 import ipaddress
5
6 DEFAULT_CONFIG = {
7 # Specific to the admin UI
8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
9 'BABEL_DEFAULT_LOCALE': 'en',
10 'BABEL_DEFAULT_TIMEZONE': 'UTC',
11 'BOOTSTRAP_SERVE_LOCAL': True,
12 'RATELIMIT_STORAGE_URL': '',
13 'DEBUG': False,
14 'DEBUG_PROFILER': False,
15 'DEBUG_TB_INTERCEPT_REDIRECTS': False,
16 'DEBUG_ASSETS': '',
17 'DOMAIN_REGISTRATION': False,
18 'TEMPLATES_AUTO_RELOAD': True,
19 'MEMORY_SESSIONS': False,
20 'FETCHMAIL_ENABLED': True,
21 'MAILU_VERSION': 'unknown',
22 # Database settings
23 'DB_FLAVOR': None,
24 'DB_USER': 'mailu',
25 'DB_PW': None,
26 'DB_HOST': 'database',
27 'DB_NAME': 'mailu',
28 'SQLITE_DATABASE_FILE': 'data/main.db',
29 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
30 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',
31 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
32 # Statistics management
33 'INSTANCE_ID_PATH': '/data/instance',
34 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
35 # Common configuration variables
36 'SECRET_KEY': 'changeMe',
37 'DOMAIN': 'mailu.io',
38 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
39 'POSTMASTER': 'postmaster',
40 'WILDCARD_SENDERS': '',
41 'TLS_FLAVOR': 'cert',
42 'INBOUND_TLS_ENFORCE': False,
43 'DEFER_ON_TLS_ERROR': True,
44 'AUTH_RATELIMIT_IP': '5/hour',
45 'AUTH_RATELIMIT_IP_V4_MASK': 24,
46 'AUTH_RATELIMIT_IP_V6_MASK': 48,
47 'AUTH_RATELIMIT_USER': '50/day',
48 'AUTH_RATELIMIT_EXEMPTION': '',
49 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
50 'DISABLE_STATISTICS': False,
51 # Mail settings
52 'DMARC_RUA': None,
53 'DMARC_RUF': None,
54 'WELCOME': False,
55 'WELCOME_SUBJECT': 'Dummy welcome topic',
56 'WELCOME_BODY': 'Dummy welcome body',
57 'DKIM_SELECTOR': 'dkim',
58 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
59 'DEFAULT_QUOTA': 1000000000,
60 'MESSAGE_RATELIMIT': '200/day',
61 'MESSAGE_RATELIMIT_EXEMPTION': '',
62 'RECIPIENT_DELIMITER': '',
63 # Web settings
64 'SITENAME': 'Mailu',
65 'WEBSITE': 'https://mailu.io',
66 'ADMIN': 'none',
67 'WEB_ADMIN': '/admin',
68 'WEB_WEBMAIL': '/webmail',
69 'WEBMAIL': 'none',
70 'RECAPTCHA_PUBLIC_KEY': '',
71 'RECAPTCHA_PRIVATE_KEY': '',
72 'LOGO_URL': None,
73 'LOGO_BACKGROUND': None,
74 # Advanced settings
75 'API': False,
76 'WEB_API': '/api',
77 'API_TOKEN': None,
78 'LOG_LEVEL': 'WARNING',
79 'SESSION_KEY_BITS': 128,
80 'SESSION_TIMEOUT': 3600,
81 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
82 'SESSION_COOKIE_SECURE': None,
83 'CREDENTIAL_ROUNDS': 12,
84 'TLS_PERMISSIVE': True,
85 'TZ': 'Etc/UTC',
86 'DEFAULT_SPAM_THRESHOLD': 80,
87 'PROXY_AUTH_WHITELIST': '',
88 'PROXY_AUTH_HEADER': 'X-Auth-Email',
89 'PROXY_AUTH_CREATE': False,
90 'PROXY_AUTH_LOGOUT_URL': None,
91 'SUBNET': '192.168.203.0/24',
92 'SUBNET6': None,
93 }
94
95 class ConfigManager:
96 """ Naive configuration manager that uses environment only
97 """
98
99 DB_TEMPLATES = {
100 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
101 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
102 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
103 }
104
105 def __init__(self):
106 self.config = dict()
107
108 def __get_env(self, key, value):
109 key_file = key + "_FILE"
110 if key_file in os.environ:
111 with open(os.environ.get(key_file)) as file:
112 value_from_file = file.read()
113 return value_from_file.strip()
114 else:
115 return os.environ.get(key, value)
116
117 def __coerce_value(self, value):
118 if isinstance(value, str) and value.lower() in ('true','yes'):
119 return True
120 elif isinstance(value, str) and value.lower() in ('false', 'no'):
121 return False
122 return value
123
124 def init_app(self, app):
125 # get current app config
126 self.config.update(app.config)
127 # get environment variables
128 for key in os.environ:
129 if key.endswith('_ADDRESS'):
130 self.config[key] = os.environ[key]
131
132 self.config.update({
133 key: self.__coerce_value(self.__get_env(key, value))
134 for key, value in DEFAULT_CONFIG.items()
135 })
136
137 # automatically set the sqlalchemy string
138 if self.config['DB_FLAVOR']:
139 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
140 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
141
142 if not self.config.get('RATELIMIT_STORAGE_URL'):
143 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
144
145 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
146 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
147 self.config['SESSION_COOKIE_HTTPONLY'] = True
148 if self.config['SESSION_COOKIE_SECURE'] is None:
149 self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'
150 self.config['SESSION_PERMANENT'] = True
151 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
152 self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])
153 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
154 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
155 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
156 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
157 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
158 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
159 self.config['HOSTNAMES'] = ','.join(hostnames)
160 self.config['HOSTNAME'] = hostnames[0]
161 self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])
162 self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)
163 try:
164 self.config['MAILU_VERSION'] = open('/version', 'r').read()
165 except FileNotFoundError:
166 pass
167
168 # update the app config
169 app.config.update(self.config)
170
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -31,7 +31,7 @@
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
# Statistics management
'INSTANCE_ID_PATH': '/data/instance',
- 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
+ 'STATS_ENDPOINT': '20.{}.stats.mailu.io',
# Common configuration variables
'SECRET_KEY': 'changeMe',
'DOMAIN': 'mailu.io',
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -9,7 +9,7 @@
source_suffix = '.rst'
master_doc = 'index'
project = 'Mailu'
-copyright = '2018, Mailu authors'
+copyright = '2016, Mailu authors'
author = 'Mailu authors'
version = release = os.environ.get('VERSION', 'master')
language = 'en'
@@ -25,7 +25,7 @@
# to template names.
html_sidebars = {
'**': [
- 'relations.html',
+ 'relations.html',
'searchbox.html',
]
}
@@ -36,10 +36,10 @@
'github_user': 'mailu',
'github_repo': 'mailu',
'github_version': version,
- 'stable_version': '1.9',
+ 'stable_version': '2.0',
'versions': [
- ('1.8', '/1.8/'),
('1.9', '/1.9/'),
+ ('2.0', '/2.0/'),
('master', '/master/')
],
'conf_py_path': '/docs/'
|
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -31,7 +31,7 @@\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n- 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n+ 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -9,7 +9,7 @@\n source_suffix = '.rst'\n master_doc = 'index'\n project = 'Mailu'\n-copyright = '2018, Mailu authors'\n+copyright = '2016, Mailu authors'\n author = 'Mailu authors'\n version = release = os.environ.get('VERSION', 'master')\n language = 'en'\n@@ -25,7 +25,7 @@\n # to template names.\n html_sidebars = {\n '**': [\n- 'relations.html', \n+ 'relations.html',\n 'searchbox.html',\n ]\n }\n@@ -36,10 +36,10 @@\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n- 'stable_version': '1.9',\n+ 'stable_version': '2.0',\n 'versions': [\n- ('1.8', '/1.8/'),\n ('1.9', '/1.9/'),\n+ ('2.0', '/2.0/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n", "issue": "Brainstorming for 1.10 / 2.0 roadmap\n# Description\r\nNow 1.9 has been released for some time, it is time to discuss what we want to work on for the next release.\r\n\r\n- Release date?\r\n- Scope 1.10 (normal release) or 2.0 (release with breaking changes)? \r\n- What do we want to work on?\r\n\r\n## Ideas\r\n\r\nIdeas from the last meeting:\r\n- introduce snappymail.\r\n- rootless containers\r\n - probably requires init container\r\n- replacement of Socrates. ghostwheel42 will work on this (if he finds time).\r\n- A security/keys page for Mailu admin interface. \r\n - On this page you can configure/change/generate keys for all your Mail domains.\r\n - DNS overview \r\n - Have some UI that tells you if DNS is correctly configured.\r\n- Have a look at BuildX\r\n - means we can build for multiple platforms including ARM\r\n - means we remove the build arm script\r\n - means CI will be changed massively.\r\n - For ARM, we could maybe build once per week to make sure the build time for normal builts is not too long.\r\n- autoconfiguration for email clients?\r\n - automx: https://rseichter.github.io/automx2/?\r\n - integrate whatever we choose with mailu (part of mailu)?\r\n - also good time to drop starttls for imap/pop/smtp(sending mail).\r\n - So only support direct SSL/TLS\r\n - Could be done via environment_variable. When not configured, then starttls supported. If configured (default for new deployments) then it is disabled.\r\n - Another idea is to disable starttls, and report a custom error (when you use explicit starttls) that you must switch to implicit SSL/TLS port 465.\r\n- Full 2 factor authentication with xoath2 \r\n - Too large in scope for this release. But preparations could be made.\r\n - Means we need autoconfiguration. Otherwise the email client will not use xoath2.\r\n - means using single sign on via identity provider (which mailu could be as well). This opens the door to use other identity providers in the future.\r\n\r\nFeel free to suggest your own ideas\r\n\r\n## Misc\r\nFor small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.\r\n\r\nWhat are your thoughts? Please share your feedback.\r\nRegardless the above wall of text, feel free to mention any feature/issue you would like included in the next release.\r\n\r\n## My 2 cents\r\nFor release date we could maybe aim around August/September?\r\nWe are all very busy with our personal life now. This should give enough time to implement new features. Of course a decided date is not set in stone. It could still be moved forward/backwards. \r\n\r\nhttps://github.com/Mailu/Mailu/blob/master/design/mailu-directory-structure.md\r\nThis is what I was thinking about for the scope. Changing the directory structure must be part of a major release with breaking changes. Do we want to make this change for this release or postpone this?\r\n\r\nPersonally I'd like to check \r\n - switching to SnappyMail \r\n - rootless containers\r\n - BuildX. At least investigate what changes are required.\r\n\r\nFeel free to suggest your own ideas. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n\nimport os\n\nextensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'Mailu'\ncopyright = '2018, Mailu authors'\nauthor = 'Mailu authors'\nversion = release = os.environ.get('VERSION', 'master')\nlanguage = 'en'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']\npygments_style = 'sphinx'\ntodo_include_todos = False\nhtml_theme = 'sphinx_rtd_theme'\nhtml_title = 'Mailu, Docker based mail server'\nhtml_static_path = []\nhtmlhelp_basename = 'Mailudoc'\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\nhtml_sidebars = {\n '**': [\n 'relations.html', \n 'searchbox.html',\n ]\n}\n\n# Theme options\nhtml_context = {\n 'display_github': True,\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n 'stable_version': '1.9',\n 'versions': [\n ('1.8', '/1.8/'),\n ('1.9', '/1.9/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n}\n", "path": "docs/conf.py"}, {"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}]}
| 3,832 | 405 |
gh_patches_debug_42306
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1600
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect language set on Provider during locale fallback
* Faker version: 11.3.0
When faker is initialised with a locale not implemented by some of the built-in providers, it falls back to the DEFAULT_LOCALE for these providers. However, the language set on the provider’s instance in `__lang__` is still the requested locale and not the locale in effect.
This is due to `Factory._find_provider_class` not returning the locale it chose, in: https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L102-L104 to `Factory._get_provider_class` which then proceeds to return the locale value as it was passed in at the first place. Thus, `provider.__lang__` does not contain the found locale (as the variable name `lang_found` would suggest): https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L61
### Expected behavior
`provider.__lang__` should be set to the actual language / locale being used by the provider.
### Actual behavior
`provider.__lang__` is set to the locale that was requested but is not offered by this provider.
</issue>
<code>
[start of faker/documentor.py]
1 import inspect
2 import warnings
3
4 from typing import Any, Dict, List, Optional, Tuple, Union
5
6 from .generator import Generator
7 from .providers import BaseProvider
8 from .proxy import Faker
9
10
11 class Documentor:
12 def __init__(self, generator: Union[Generator, Faker]) -> None:
13 """
14 :param generator: a localized Generator with providers filled,
15 for which to write the documentation
16 :type generator: faker.Generator()
17 """
18 self.generator = generator
19 self.max_name_len: int = 0
20 self.already_generated: List[str] = []
21
22 def get_formatters(
23 self,
24 locale: Optional[str] = None,
25 excludes: Optional[List[str]] = None,
26 **kwargs: Any,
27 ) -> List[Tuple[BaseProvider, Dict[str, str]]]:
28 self.max_name_len = 0
29 self.already_generated = [] if excludes is None else excludes[:]
30 formatters = []
31 providers: List[BaseProvider] = self.generator.get_providers()
32 for provider in providers[::-1]: # reverse
33 if locale and provider.__lang__ != locale:
34 continue
35 formatters.append(
36 (provider, self.get_provider_formatters(provider, **kwargs)),
37 )
38 return formatters
39
40 def get_provider_formatters(
41 self,
42 provider: BaseProvider,
43 prefix: str = "fake.",
44 with_args: bool = True,
45 with_defaults: bool = True,
46 ) -> Dict[str, str]:
47 formatters = {}
48
49 for name, method in inspect.getmembers(provider, inspect.ismethod):
50 # skip 'private' method and inherited methods
51 if name.startswith("_") or name in self.already_generated:
52 continue
53
54 arguments = []
55 faker_args: List[str] = []
56 faker_kwargs = {}
57
58 if name == "binary":
59 faker_kwargs["length"] = 1024
60 elif name in ["zip", "tar"]:
61 faker_kwargs.update(
62 {
63 "uncompressed_size": 1024,
64 "min_file_size": 512,
65 }
66 )
67
68 if with_args:
69 # retrieve all parameter
70 argspec = inspect.getfullargspec(method)
71
72 lst = [x for x in argspec.args if x not in ["self", "cls"]]
73 for i, arg in enumerate(lst):
74
75 if argspec.defaults and with_defaults:
76
77 try:
78 default = argspec.defaults[i]
79 if isinstance(default, str):
80 default = repr(default)
81 else:
82 # TODO check default type
83 default = f"{default}"
84
85 arg = f"{arg}={default}"
86
87 except IndexError:
88 pass
89
90 arguments.append(arg)
91 if with_args == "first":
92 break
93
94 if with_args != "first":
95 if argspec.varargs:
96 arguments.append("*" + argspec.varargs)
97 if argspec.varkw:
98 arguments.append("**" + argspec.varkw)
99
100 # build fake method signature
101 signature = f"{prefix}{name}({', '.join(arguments)})"
102
103 try:
104 # make a fake example
105 example = self.generator.format(name, *faker_args, **faker_kwargs)
106 except (AttributeError, ValueError) as e:
107 warnings.warn(str(e))
108 continue
109 formatters[signature] = example
110
111 self.max_name_len = max(self.max_name_len, len(signature))
112 self.already_generated.append(name)
113
114 return formatters
115
116 @staticmethod
117 def get_provider_name(provider_class: BaseProvider) -> str:
118 return provider_class.__provider__
119
[end of faker/documentor.py]
[start of faker/factory.py]
1 import locale as pylocale
2 import logging
3 import sys
4
5 from importlib import import_module
6 from typing import Any, List, Optional, Tuple
7
8 from .config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS
9 from .generator import Generator
10 from .utils.loading import list_module
11
12 logger = logging.getLogger(__name__)
13
14 # identify if python is being run in interactive mode. If so, disable logging.
15 inREPL = bool(getattr(sys, "ps1", False))
16 if inREPL:
17 logger.setLevel(logging.CRITICAL)
18 else:
19 logger.debug("Not in REPL -> leaving logger event level as is.")
20
21
22 class Factory:
23 @classmethod
24 def create(
25 cls,
26 locale: Optional[str] = None,
27 providers: Optional[List[str]] = None,
28 generator: Generator = None,
29 includes: Optional[List[str]] = None,
30 # Should we use weightings (more realistic) or weight every element equally (faster)?
31 # By default, use weightings for backwards compatibility & realism
32 use_weighting: bool = True,
33 **config: Any,
34 ) -> Generator:
35 if includes is None:
36 includes = []
37
38 # fix locale to package name
39 locale = locale.replace("-", "_") if locale else DEFAULT_LOCALE
40 locale = pylocale.normalize(locale).split(".")[0]
41 if locale not in AVAILABLE_LOCALES:
42 msg = f"Invalid configuration for faker locale `{locale}`"
43 raise AttributeError(msg)
44
45 config["locale"] = locale
46 config["use_weighting"] = use_weighting
47 providers = providers or PROVIDERS
48
49 providers += includes
50
51 faker = generator or Generator(**config)
52
53 for prov_name in providers:
54 if prov_name == "faker.providers":
55 continue
56
57 prov_cls, lang_found = cls._get_provider_class(prov_name, locale)
58 provider = prov_cls(faker)
59 provider.__use_weighting__ = use_weighting
60 provider.__provider__ = prov_name
61 provider.__lang__ = lang_found
62 faker.add_provider(provider)
63
64 return faker
65
66 @classmethod
67 def _get_provider_class(cls, provider: str, locale: Optional[str] = "") -> Tuple[Any, Optional[str]]:
68
69 provider_class = cls._find_provider_class(provider, locale)
70
71 if provider_class:
72 return provider_class, locale
73
74 if locale and locale != DEFAULT_LOCALE:
75 # fallback to default locale
76 provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)
77 if provider_class:
78 return provider_class, DEFAULT_LOCALE
79
80 # fallback to no locale
81 provider_class = cls._find_provider_class(provider)
82 if provider_class:
83 return provider_class, None
84
85 msg = f"Unable to find provider `{provider}` with locale `{locale}`"
86 raise ValueError(msg)
87
88 @classmethod
89 def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:
90
91 provider_module = import_module(provider_path)
92
93 if getattr(provider_module, "localized", False):
94
95 logger.debug(
96 "Looking for locale `%s` in provider `%s`.",
97 locale,
98 provider_module.__name__,
99 )
100
101 available_locales = list_module(provider_module)
102 if not locale or locale not in available_locales:
103 unavailable_locale = locale
104 locale = getattr(provider_module, "default_locale", DEFAULT_LOCALE)
105 logger.debug(
106 "Specified locale `%s` is not available for "
107 "provider `%s`. Locale reset to `%s` for this "
108 "provider.",
109 unavailable_locale,
110 provider_module.__name__,
111 locale,
112 )
113 else:
114 logger.debug(
115 "Provider `%s` has been localized to `%s`.",
116 provider_module.__name__,
117 locale,
118 )
119
120 path = f"{provider_path}.{locale}"
121 provider_module = import_module(path)
122
123 else:
124
125 logger.debug(
126 "Provider `%s` does not feature localization. "
127 "Specified locale `%s` is not utilized for this "
128 "provider.",
129 provider_module.__name__,
130 locale,
131 )
132
133 if locale is not None:
134 provider_module = import_module(provider_path)
135
136 return provider_module.Provider # type: ignore
137
[end of faker/factory.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/documentor.py b/faker/documentor.py
--- a/faker/documentor.py
+++ b/faker/documentor.py
@@ -30,7 +30,7 @@
formatters = []
providers: List[BaseProvider] = self.generator.get_providers()
for provider in providers[::-1]: # reverse
- if locale and provider.__lang__ != locale:
+ if locale and provider.__lang__ and provider.__lang__ != locale:
continue
formatters.append(
(provider, self.get_provider_formatters(provider, **kwargs)),
diff --git a/faker/factory.py b/faker/factory.py
--- a/faker/factory.py
+++ b/faker/factory.py
@@ -54,7 +54,7 @@
if prov_name == "faker.providers":
continue
- prov_cls, lang_found = cls._get_provider_class(prov_name, locale)
+ prov_cls, lang_found, _ = cls._find_provider_class(prov_name, locale)
provider = prov_cls(faker)
provider.__use_weighting__ = use_weighting
provider.__provider__ = prov_name
@@ -64,31 +64,14 @@
return faker
@classmethod
- def _get_provider_class(cls, provider: str, locale: Optional[str] = "") -> Tuple[Any, Optional[str]]:
-
- provider_class = cls._find_provider_class(provider, locale)
-
- if provider_class:
- return provider_class, locale
-
- if locale and locale != DEFAULT_LOCALE:
- # fallback to default locale
- provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)
- if provider_class:
- return provider_class, DEFAULT_LOCALE
-
- # fallback to no locale
- provider_class = cls._find_provider_class(provider)
- if provider_class:
- return provider_class, None
-
- msg = f"Unable to find provider `{provider}` with locale `{locale}`"
- raise ValueError(msg)
-
- @classmethod
- def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:
+ def _find_provider_class(
+ cls,
+ provider_path: str,
+ locale: Optional[str] = None,
+ ) -> Tuple[Any, Optional[str], Optional[str]]:
provider_module = import_module(provider_path)
+ default_locale = getattr(provider_module, "default_locale", "")
if getattr(provider_module, "localized", False):
@@ -101,7 +84,7 @@
available_locales = list_module(provider_module)
if not locale or locale not in available_locales:
unavailable_locale = locale
- locale = getattr(provider_module, "default_locale", DEFAULT_LOCALE)
+ locale = default_locale or DEFAULT_LOCALE
logger.debug(
"Specified locale `%s` is not available for "
"provider `%s`. Locale reset to `%s` for this "
@@ -122,15 +105,14 @@
else:
- logger.debug(
- "Provider `%s` does not feature localization. "
- "Specified locale `%s` is not utilized for this "
- "provider.",
- provider_module.__name__,
- locale,
- )
-
- if locale is not None:
- provider_module = import_module(provider_path)
+ if locale:
+ logger.debug(
+ "Provider `%s` does not feature localization. "
+ "Specified locale `%s` is not utilized for this "
+ "provider.",
+ provider_module.__name__,
+ locale,
+ )
+ locale = default_locale = None
- return provider_module.Provider # type: ignore
+ return provider_module.Provider, locale, default_locale # type: ignore
|
{"golden_diff": "diff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -30,7 +30,7 @@\n formatters = []\n providers: List[BaseProvider] = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n- if locale and provider.__lang__ != locale:\n+ if locale and provider.__lang__ and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\ndiff --git a/faker/factory.py b/faker/factory.py\n--- a/faker/factory.py\n+++ b/faker/factory.py\n@@ -54,7 +54,7 @@\n if prov_name == \"faker.providers\":\n continue\n \n- prov_cls, lang_found = cls._get_provider_class(prov_name, locale)\n+ prov_cls, lang_found, _ = cls._find_provider_class(prov_name, locale)\n provider = prov_cls(faker)\n provider.__use_weighting__ = use_weighting\n provider.__provider__ = prov_name\n@@ -64,31 +64,14 @@\n return faker\n \n @classmethod\n- def _get_provider_class(cls, provider: str, locale: Optional[str] = \"\") -> Tuple[Any, Optional[str]]:\n-\n- provider_class = cls._find_provider_class(provider, locale)\n-\n- if provider_class:\n- return provider_class, locale\n-\n- if locale and locale != DEFAULT_LOCALE:\n- # fallback to default locale\n- provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n- if provider_class:\n- return provider_class, DEFAULT_LOCALE\n-\n- # fallback to no locale\n- provider_class = cls._find_provider_class(provider)\n- if provider_class:\n- return provider_class, None\n-\n- msg = f\"Unable to find provider `{provider}` with locale `{locale}`\"\n- raise ValueError(msg)\n-\n- @classmethod\n- def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:\n+ def _find_provider_class(\n+ cls,\n+ provider_path: str,\n+ locale: Optional[str] = None,\n+ ) -> Tuple[Any, Optional[str], Optional[str]]:\n \n provider_module = import_module(provider_path)\n+ default_locale = getattr(provider_module, \"default_locale\", \"\")\n \n if getattr(provider_module, \"localized\", False):\n \n@@ -101,7 +84,7 @@\n available_locales = list_module(provider_module)\n if not locale or locale not in available_locales:\n unavailable_locale = locale\n- locale = getattr(provider_module, \"default_locale\", DEFAULT_LOCALE)\n+ locale = default_locale or DEFAULT_LOCALE\n logger.debug(\n \"Specified locale `%s` is not available for \"\n \"provider `%s`. Locale reset to `%s` for this \"\n@@ -122,15 +105,14 @@\n \n else:\n \n- logger.debug(\n- \"Provider `%s` does not feature localization. \"\n- \"Specified locale `%s` is not utilized for this \"\n- \"provider.\",\n- provider_module.__name__,\n- locale,\n- )\n-\n- if locale is not None:\n- provider_module = import_module(provider_path)\n+ if locale:\n+ logger.debug(\n+ \"Provider `%s` does not feature localization. \"\n+ \"Specified locale `%s` is not utilized for this \"\n+ \"provider.\",\n+ provider_module.__name__,\n+ locale,\n+ )\n+ locale = default_locale = None\n \n- return provider_module.Provider # type: ignore\n+ return provider_module.Provider, locale, default_locale # type: ignore\n", "issue": "Incorrect language set on Provider during locale fallback\n* Faker version: 11.3.0\r\n\r\nWhen faker is initialised with a locale not implemented by some of the built-in providers, it falls back to the DEFAULT_LOCALE for these providers. However, the language set on the provider\u2019s instance in `__lang__` is still the requested locale and not the locale in effect.\r\n\r\nThis is due to `Factory._find_provider_class` not returning the locale it chose, in: https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L102-L104 to `Factory._get_provider_class` which then proceeds to return the locale value as it was passed in at the first place. Thus, `provider.__lang__` does not contain the found locale (as the variable name `lang_found` would suggest): https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L61\r\n\r\n### Expected behavior\r\n\r\n`provider.__lang__` should be set to the actual language / locale being used by the provider.\r\n\r\n### Actual behavior\r\n\r\n`provider.__lang__` is set to the locale that was requested but is not offered by this provider.\n", "before_files": [{"content": "import inspect\nimport warnings\n\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom .generator import Generator\nfrom .providers import BaseProvider\nfrom .proxy import Faker\n\n\nclass Documentor:\n def __init__(self, generator: Union[Generator, Faker]) -> None:\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len: int = 0\n self.already_generated: List[str] = []\n\n def get_formatters(\n self,\n locale: Optional[str] = None,\n excludes: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> List[Tuple[BaseProvider, Dict[str, str]]]:\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers: List[BaseProvider] = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\n )\n return formatters\n\n def get_provider_formatters(\n self,\n provider: BaseProvider,\n prefix: str = \"fake.\",\n with_args: bool = True,\n with_defaults: bool = True,\n ) -> Dict[str, str]:\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith(\"_\") or name in self.already_generated:\n continue\n\n arguments = []\n faker_args: List[str] = []\n faker_kwargs = {}\n\n if name == \"binary\":\n faker_kwargs[\"length\"] = 1024\n elif name in [\"zip\", \"tar\"]:\n faker_kwargs.update(\n {\n \"uncompressed_size\": 1024,\n \"min_file_size\": 512,\n }\n )\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getfullargspec(method)\n\n lst = [x for x in argspec.args if x not in [\"self\", \"cls\"]]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if isinstance(default, str):\n default = repr(default)\n else:\n # TODO check default type\n default = f\"{default}\"\n\n arg = f\"{arg}={default}\"\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == \"first\":\n break\n\n if with_args != \"first\":\n if argspec.varargs:\n arguments.append(\"*\" + argspec.varargs)\n if argspec.varkw:\n arguments.append(\"**\" + argspec.varkw)\n\n # build fake method signature\n signature = f\"{prefix}{name}({', '.join(arguments)})\"\n\n try:\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n except (AttributeError, ValueError) as e:\n warnings.warn(str(e))\n continue\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class: BaseProvider) -> str:\n return provider_class.__provider__\n", "path": "faker/documentor.py"}, {"content": "import locale as pylocale\nimport logging\nimport sys\n\nfrom importlib import import_module\nfrom typing import Any, List, Optional, Tuple\n\nfrom .config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS\nfrom .generator import Generator\nfrom .utils.loading import list_module\n\nlogger = logging.getLogger(__name__)\n\n# identify if python is being run in interactive mode. If so, disable logging.\ninREPL = bool(getattr(sys, \"ps1\", False))\nif inREPL:\n logger.setLevel(logging.CRITICAL)\nelse:\n logger.debug(\"Not in REPL -> leaving logger event level as is.\")\n\n\nclass Factory:\n @classmethod\n def create(\n cls,\n locale: Optional[str] = None,\n providers: Optional[List[str]] = None,\n generator: Generator = None,\n includes: Optional[List[str]] = None,\n # Should we use weightings (more realistic) or weight every element equally (faster)?\n # By default, use weightings for backwards compatibility & realism\n use_weighting: bool = True,\n **config: Any,\n ) -> Generator:\n if includes is None:\n includes = []\n\n # fix locale to package name\n locale = locale.replace(\"-\", \"_\") if locale else DEFAULT_LOCALE\n locale = pylocale.normalize(locale).split(\".\")[0]\n if locale not in AVAILABLE_LOCALES:\n msg = f\"Invalid configuration for faker locale `{locale}`\"\n raise AttributeError(msg)\n\n config[\"locale\"] = locale\n config[\"use_weighting\"] = use_weighting\n providers = providers or PROVIDERS\n\n providers += includes\n\n faker = generator or Generator(**config)\n\n for prov_name in providers:\n if prov_name == \"faker.providers\":\n continue\n\n prov_cls, lang_found = cls._get_provider_class(prov_name, locale)\n provider = prov_cls(faker)\n provider.__use_weighting__ = use_weighting\n provider.__provider__ = prov_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider: str, locale: Optional[str] = \"\") -> Tuple[Any, Optional[str]]:\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n msg = f\"Unable to find provider `{provider}` with locale `{locale}`\"\n raise ValueError(msg)\n\n @classmethod\n def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:\n\n provider_module = import_module(provider_path)\n\n if getattr(provider_module, \"localized\", False):\n\n logger.debug(\n \"Looking for locale `%s` in provider `%s`.\",\n locale,\n provider_module.__name__,\n )\n\n available_locales = list_module(provider_module)\n if not locale or locale not in available_locales:\n unavailable_locale = locale\n locale = getattr(provider_module, \"default_locale\", DEFAULT_LOCALE)\n logger.debug(\n \"Specified locale `%s` is not available for \"\n \"provider `%s`. Locale reset to `%s` for this \"\n \"provider.\",\n unavailable_locale,\n provider_module.__name__,\n locale,\n )\n else:\n logger.debug(\n \"Provider `%s` has been localized to `%s`.\",\n provider_module.__name__,\n locale,\n )\n\n path = f\"{provider_path}.{locale}\"\n provider_module = import_module(path)\n\n else:\n\n logger.debug(\n \"Provider `%s` does not feature localization. \"\n \"Specified locale `%s` is not utilized for this \"\n \"provider.\",\n provider_module.__name__,\n locale,\n )\n\n if locale is not None:\n provider_module = import_module(provider_path)\n\n return provider_module.Provider # type: ignore\n", "path": "faker/factory.py"}]}
| 3,110 | 841 |
gh_patches_debug_11225
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-1865
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support memoization of tuples
We already support lists, so implementing an `id_for_memo` for tuple types is straightforward.
</issue>
<code>
[start of parsl/dataflow/memoization.py]
1 import hashlib
2 from functools import singledispatch
3 import logging
4 from parsl.serialize import serialize
5 import types
6
7 logger = logging.getLogger(__name__)
8
9
10 @singledispatch
11 def id_for_memo(obj, output_ref=False):
12 """This should return a byte sequence which identifies the supplied
13 value for memoization purposes: for any two calls of id_for_memo,
14 the byte sequence should be the same when the "same" value is supplied,
15 and different otherwise.
16
17 "same" is in quotes about because sameness is not as straightforward as
18 serialising out the content.
19
20 For example, for two dicts x, y:
21
22 x = {"a":3, "b":4}
23 y = {"b":4, "a":3}
24
25 then: x == y, but their serialization is not equal, and some other
26 functions on x and y are not equal: list(x.keys()) != list(y.keys())
27
28
29 id_for_memo is invoked with output_ref=True when the parameter is an
30 output reference (a value in the outputs=[] parameter of an app
31 invocation).
32
33 Memo hashing might be different for such parameters: for example, a
34 user might choose to hash input File content so that changing the
35 content of an input file invalidates memoization. This does not make
36 sense to do for output files: there is no meaningful content stored
37 where an output filename points at memoization time.
38 """
39 logger.error("id_for_memo attempted on unknown type {}".format(type(obj)))
40 raise ValueError("unknown type for memoization: {}".format(type(obj)))
41
42
43 @id_for_memo.register(str)
44 @id_for_memo.register(int)
45 @id_for_memo.register(float)
46 @id_for_memo.register(types.FunctionType)
47 @id_for_memo.register(type(None))
48 def id_for_memo_serialize(obj, output_ref=False):
49 return serialize(obj)
50
51
52 @id_for_memo.register(list)
53 def id_for_memo_list(denormalized_list, output_ref=False):
54 if type(denormalized_list) != list:
55 raise ValueError("id_for_memo_list cannot work on subclasses of list")
56
57 normalized_list = []
58
59 for e in denormalized_list:
60 normalized_list.append(id_for_memo(e, output_ref=output_ref))
61
62 return serialize(normalized_list)
63
64
65 @id_for_memo.register(dict)
66 def id_for_memo_dict(denormalized_dict, output_ref=False):
67 """This normalises the keys and values of the supplied dictionary.
68
69 When output_ref=True, the values are normalised as output refs, but
70 the keys are not.
71 """
72 if type(denormalized_dict) != dict:
73 raise ValueError("id_for_memo_dict cannot work on subclasses of dict")
74
75 keys = sorted(denormalized_dict)
76
77 normalized_list = []
78 for k in keys:
79 normalized_list.append(id_for_memo(k))
80 normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref))
81 return serialize(normalized_list)
82
83
84 class Memoizer(object):
85 """Memoizer is responsible for ensuring that identical work is not repeated.
86
87 When a task is repeated, i.e., the same function is called with the same exact arguments, the
88 result from a previous execution is reused. `wiki <https://en.wikipedia.org/wiki/Memoization>`_
89
90 The memoizer implementation here does not collapse duplicate calls
91 at call time, but works **only** when the result of a previous
92 call is available at the time the duplicate call is made.
93
94 For instance::
95
96 No advantage from Memoization helps
97 memoization here: here:
98
99 TaskA TaskB
100 | TaskA |
101 | | TaskA done (TaskB)
102 | | | (TaskB)
103 done | |
104 done |
105 done
106
107 The memoizer creates a lookup table by hashing the function name
108 and its inputs, and storing the results of the function.
109
110 When a task is ready for launch, i.e., all of its arguments
111 have resolved, we add its hash to the task datastructure.
112 """
113
114 def __init__(self, dfk, memoize=True, checkpoint={}):
115 """Initialize the memoizer.
116
117 Args:
118 - dfk (DFK obj): The DFK object
119
120 KWargs:
121 - memoize (Bool): enable memoization or not.
122 - checkpoint (Dict): A checkpoint loaded as a dict.
123 """
124 self.dfk = dfk
125 self.memoize = memoize
126
127 if self.memoize:
128 logger.info("App caching initialized")
129 self.memo_lookup_table = checkpoint
130 else:
131 logger.info("App caching disabled for all apps")
132 self.memo_lookup_table = {}
133
134 def make_hash(self, task):
135 """Create a hash of the task inputs.
136
137 If this fails here, then all ipp calls are also likely to fail due to failure
138 at serialization.
139
140 Args:
141 - task (dict) : Task dictionary from dfk.tasks
142
143 Returns:
144 - hash (str) : A unique hash string
145 """
146 # Function name TODO: Add fn body later
147
148 t = []
149
150 # if kwargs contains an outputs parameter, that parameter is removed
151 # and normalised differently - with output_ref set to True.
152 # kwargs listed in ignore_for_cache will also be removed
153
154 filtered_kw = task['kwargs'].copy()
155
156 ignore_list = task['ignore_for_cache']
157
158 logger.debug("Ignoring these kwargs for checkpointing: {}".format(ignore_list))
159 for k in ignore_list:
160 logger.debug("Ignoring kwarg {}".format(k))
161 del filtered_kw[k]
162
163 if 'outputs' in task['kwargs']:
164 outputs = task['kwargs']['outputs']
165 del filtered_kw['outputs']
166 t = t + [id_for_memo(outputs, output_ref=True)] # TODO: use append?
167
168 t = t + [id_for_memo(filtered_kw)]
169 t = t + [id_for_memo(task['func_name']),
170 id_for_memo(task['fn_hash']),
171 id_for_memo(task['args'])]
172
173 x = b''.join(t)
174 hashedsum = hashlib.md5(x).hexdigest()
175 return hashedsum
176
177 def check_memo(self, task_id, task):
178 """Create a hash of the task and its inputs and check the lookup table for this hash.
179
180 If present, the results are returned. The result is a tuple indicating whether a memo
181 exists and the result, since a None result is possible and could be confusing.
182 This seems like a reasonable option without relying on a cache_miss exception.
183
184 Args:
185 - task(task) : task from the dfk.tasks table
186
187 Returns:
188 - Result (Future): A completed future containing the memoized result
189
190 This call will also set task['hashsum'] to the unique hashsum for the func+inputs.
191 """
192 if not self.memoize or not task['memoize']:
193 task['hashsum'] = None
194 logger.debug("Task {} will not be memoized".format(task_id))
195 return None
196
197 hashsum = self.make_hash(task)
198 logger.debug("Task {} has memoization hash {}".format(task_id, hashsum))
199 result = None
200 if hashsum in self.memo_lookup_table:
201 result = self.memo_lookup_table[hashsum]
202 logger.info("Task %s using result from cache", task_id)
203 else:
204 logger.info("Task %s had no result in cache", task_id)
205
206 task['hashsum'] = hashsum
207
208 return result
209
210 def hash_lookup(self, hashsum):
211 """Lookup a hash in the memoization table.
212
213 Args:
214 - hashsum (str): The same hashes used to uniquely identify apps+inputs
215
216 Returns:
217 - Lookup result
218
219 Raises:
220 - KeyError: if hash not in table
221 """
222 return self.memo_lookup_table[hashsum]
223
224 def update_memo(self, task_id, task, r):
225 """Updates the memoization lookup table with the result from a task.
226
227 Args:
228 - task_id (int): Integer task id
229 - task (dict) : A task dict from dfk.tasks
230 - r (Result future): Result future
231
232 A warning is issued when a hash collision occurs during the update.
233 This is not likely.
234 """
235 if not self.memoize or not task['memoize'] or 'hashsum' not in task:
236 return
237
238 if task['hashsum'] in self.memo_lookup_table:
239 logger.info('Updating app cache entry with latest %s:%s call' %
240 (task['func_name'], task_id))
241 self.memo_lookup_table[task['hashsum']] = r
242 else:
243 self.memo_lookup_table[task['hashsum']] = r
244
[end of parsl/dataflow/memoization.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/dataflow/memoization.py b/parsl/dataflow/memoization.py
--- a/parsl/dataflow/memoization.py
+++ b/parsl/dataflow/memoization.py
@@ -62,6 +62,19 @@
return serialize(normalized_list)
+@id_for_memo.register(tuple)
+def id_for_memo_tuple(denormalized_tuple, output_ref=False):
+ if type(denormalized_tuple) != tuple:
+ raise ValueError("id_for_memo_tuple cannot work on subclasses of tuple")
+
+ normalized_list = []
+
+ for e in denormalized_tuple:
+ normalized_list.append(id_for_memo(e, output_ref=output_ref))
+
+ return serialize(normalized_list)
+
+
@id_for_memo.register(dict)
def id_for_memo_dict(denormalized_dict, output_ref=False):
"""This normalises the keys and values of the supplied dictionary.
|
{"golden_diff": "diff --git a/parsl/dataflow/memoization.py b/parsl/dataflow/memoization.py\n--- a/parsl/dataflow/memoization.py\n+++ b/parsl/dataflow/memoization.py\n@@ -62,6 +62,19 @@\n return serialize(normalized_list)\n \n \n+@id_for_memo.register(tuple)\n+def id_for_memo_tuple(denormalized_tuple, output_ref=False):\n+ if type(denormalized_tuple) != tuple:\n+ raise ValueError(\"id_for_memo_tuple cannot work on subclasses of tuple\")\n+\n+ normalized_list = []\n+\n+ for e in denormalized_tuple:\n+ normalized_list.append(id_for_memo(e, output_ref=output_ref))\n+\n+ return serialize(normalized_list)\n+\n+\n @id_for_memo.register(dict)\n def id_for_memo_dict(denormalized_dict, output_ref=False):\n \"\"\"This normalises the keys and values of the supplied dictionary.\n", "issue": "Support memoization of tuples\nWe already support lists, so implementing an `id_for_memo` for tuple types is straightforward. \r\n\n", "before_files": [{"content": "import hashlib\nfrom functools import singledispatch\nimport logging\nfrom parsl.serialize import serialize\nimport types\n\nlogger = logging.getLogger(__name__)\n\n\n@singledispatch\ndef id_for_memo(obj, output_ref=False):\n \"\"\"This should return a byte sequence which identifies the supplied\n value for memoization purposes: for any two calls of id_for_memo,\n the byte sequence should be the same when the \"same\" value is supplied,\n and different otherwise.\n\n \"same\" is in quotes about because sameness is not as straightforward as\n serialising out the content.\n\n For example, for two dicts x, y:\n\n x = {\"a\":3, \"b\":4}\n y = {\"b\":4, \"a\":3}\n\n then: x == y, but their serialization is not equal, and some other\n functions on x and y are not equal: list(x.keys()) != list(y.keys())\n\n\n id_for_memo is invoked with output_ref=True when the parameter is an\n output reference (a value in the outputs=[] parameter of an app\n invocation).\n\n Memo hashing might be different for such parameters: for example, a\n user might choose to hash input File content so that changing the\n content of an input file invalidates memoization. This does not make\n sense to do for output files: there is no meaningful content stored\n where an output filename points at memoization time.\n \"\"\"\n logger.error(\"id_for_memo attempted on unknown type {}\".format(type(obj)))\n raise ValueError(\"unknown type for memoization: {}\".format(type(obj)))\n\n\n@id_for_memo.register(str)\n@id_for_memo.register(int)\n@id_for_memo.register(float)\n@id_for_memo.register(types.FunctionType)\n@id_for_memo.register(type(None))\ndef id_for_memo_serialize(obj, output_ref=False):\n return serialize(obj)\n\n\n@id_for_memo.register(list)\ndef id_for_memo_list(denormalized_list, output_ref=False):\n if type(denormalized_list) != list:\n raise ValueError(\"id_for_memo_list cannot work on subclasses of list\")\n\n normalized_list = []\n\n for e in denormalized_list:\n normalized_list.append(id_for_memo(e, output_ref=output_ref))\n\n return serialize(normalized_list)\n\n\n@id_for_memo.register(dict)\ndef id_for_memo_dict(denormalized_dict, output_ref=False):\n \"\"\"This normalises the keys and values of the supplied dictionary.\n\n When output_ref=True, the values are normalised as output refs, but\n the keys are not.\n \"\"\"\n if type(denormalized_dict) != dict:\n raise ValueError(\"id_for_memo_dict cannot work on subclasses of dict\")\n\n keys = sorted(denormalized_dict)\n\n normalized_list = []\n for k in keys:\n normalized_list.append(id_for_memo(k))\n normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref))\n return serialize(normalized_list)\n\n\nclass Memoizer(object):\n \"\"\"Memoizer is responsible for ensuring that identical work is not repeated.\n\n When a task is repeated, i.e., the same function is called with the same exact arguments, the\n result from a previous execution is reused. `wiki <https://en.wikipedia.org/wiki/Memoization>`_\n\n The memoizer implementation here does not collapse duplicate calls\n at call time, but works **only** when the result of a previous\n call is available at the time the duplicate call is made.\n\n For instance::\n\n No advantage from Memoization helps\n memoization here: here:\n\n TaskA TaskB\n | TaskA |\n | | TaskA done (TaskB)\n | | | (TaskB)\n done | |\n done |\n done\n\n The memoizer creates a lookup table by hashing the function name\n and its inputs, and storing the results of the function.\n\n When a task is ready for launch, i.e., all of its arguments\n have resolved, we add its hash to the task datastructure.\n \"\"\"\n\n def __init__(self, dfk, memoize=True, checkpoint={}):\n \"\"\"Initialize the memoizer.\n\n Args:\n - dfk (DFK obj): The DFK object\n\n KWargs:\n - memoize (Bool): enable memoization or not.\n - checkpoint (Dict): A checkpoint loaded as a dict.\n \"\"\"\n self.dfk = dfk\n self.memoize = memoize\n\n if self.memoize:\n logger.info(\"App caching initialized\")\n self.memo_lookup_table = checkpoint\n else:\n logger.info(\"App caching disabled for all apps\")\n self.memo_lookup_table = {}\n\n def make_hash(self, task):\n \"\"\"Create a hash of the task inputs.\n\n If this fails here, then all ipp calls are also likely to fail due to failure\n at serialization.\n\n Args:\n - task (dict) : Task dictionary from dfk.tasks\n\n Returns:\n - hash (str) : A unique hash string\n \"\"\"\n # Function name TODO: Add fn body later\n\n t = []\n\n # if kwargs contains an outputs parameter, that parameter is removed\n # and normalised differently - with output_ref set to True.\n # kwargs listed in ignore_for_cache will also be removed\n\n filtered_kw = task['kwargs'].copy()\n\n ignore_list = task['ignore_for_cache']\n\n logger.debug(\"Ignoring these kwargs for checkpointing: {}\".format(ignore_list))\n for k in ignore_list:\n logger.debug(\"Ignoring kwarg {}\".format(k))\n del filtered_kw[k]\n\n if 'outputs' in task['kwargs']:\n outputs = task['kwargs']['outputs']\n del filtered_kw['outputs']\n t = t + [id_for_memo(outputs, output_ref=True)] # TODO: use append?\n\n t = t + [id_for_memo(filtered_kw)]\n t = t + [id_for_memo(task['func_name']),\n id_for_memo(task['fn_hash']),\n id_for_memo(task['args'])]\n\n x = b''.join(t)\n hashedsum = hashlib.md5(x).hexdigest()\n return hashedsum\n\n def check_memo(self, task_id, task):\n \"\"\"Create a hash of the task and its inputs and check the lookup table for this hash.\n\n If present, the results are returned. The result is a tuple indicating whether a memo\n exists and the result, since a None result is possible and could be confusing.\n This seems like a reasonable option without relying on a cache_miss exception.\n\n Args:\n - task(task) : task from the dfk.tasks table\n\n Returns:\n - Result (Future): A completed future containing the memoized result\n\n This call will also set task['hashsum'] to the unique hashsum for the func+inputs.\n \"\"\"\n if not self.memoize or not task['memoize']:\n task['hashsum'] = None\n logger.debug(\"Task {} will not be memoized\".format(task_id))\n return None\n\n hashsum = self.make_hash(task)\n logger.debug(\"Task {} has memoization hash {}\".format(task_id, hashsum))\n result = None\n if hashsum in self.memo_lookup_table:\n result = self.memo_lookup_table[hashsum]\n logger.info(\"Task %s using result from cache\", task_id)\n else:\n logger.info(\"Task %s had no result in cache\", task_id)\n\n task['hashsum'] = hashsum\n\n return result\n\n def hash_lookup(self, hashsum):\n \"\"\"Lookup a hash in the memoization table.\n\n Args:\n - hashsum (str): The same hashes used to uniquely identify apps+inputs\n\n Returns:\n - Lookup result\n\n Raises:\n - KeyError: if hash not in table\n \"\"\"\n return self.memo_lookup_table[hashsum]\n\n def update_memo(self, task_id, task, r):\n \"\"\"Updates the memoization lookup table with the result from a task.\n\n Args:\n - task_id (int): Integer task id\n - task (dict) : A task dict from dfk.tasks\n - r (Result future): Result future\n\n A warning is issued when a hash collision occurs during the update.\n This is not likely.\n \"\"\"\n if not self.memoize or not task['memoize'] or 'hashsum' not in task:\n return\n\n if task['hashsum'] in self.memo_lookup_table:\n logger.info('Updating app cache entry with latest %s:%s call' %\n (task['func_name'], task_id))\n self.memo_lookup_table[task['hashsum']] = r\n else:\n self.memo_lookup_table[task['hashsum']] = r\n", "path": "parsl/dataflow/memoization.py"}]}
| 3,120 | 208 |
gh_patches_debug_7584
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-774
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
QEMU uses binfmt root instead of pwndbg.qemu.root()
This bit here should probably use pwndbg.qemu.root() instead of using the module variable directly:
https://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35
</issue>
<code>
[start of pwndbg/file.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Retrieve files from the debuggee's filesystem. Useful when
5 debugging a remote process over SSH or similar, where e.g.
6 /proc/FOO/maps is needed from the remote system.
7 """
8 from __future__ import absolute_import
9 from __future__ import division
10 from __future__ import print_function
11 from __future__ import unicode_literals
12
13 import binascii
14 import os
15 import tempfile
16
17 import gdb
18
19 import pwndbg.qemu
20 import pwndbg.remote
21 import pwndbg.symbol
22
23
24 def get_file(path):
25 """
26 Downloads the specified file from the system where the current process is
27 being debugged.
28
29 Returns:
30 The local path to the file
31 """
32 local_path = path
33
34 if pwndbg.qemu.root():
35 return os.path.join(pwndbg.qemu.binfmt_root, path)
36 elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():
37 local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)
38 error = None
39 try:
40 error = gdb.execute('remote get "%s" "%s"' % (path, local_path),
41 to_string=True)
42 except gdb.error as e:
43 error = e
44
45 if error:
46 raise OSError("Could not download remote file %r:\n" \
47 "Error: %s" % (path, error))
48
49 return local_path
50
51 def get(path):
52 """
53 Retrieves the contents of the specified file on the system
54 where the current process is being debugged.
55
56 Returns:
57 A byte array, or None.
58 """
59 local_path = get_file(path)
60
61 try:
62 with open(local_path,'rb') as f:
63 return f.read()
64 except:
65 return b''
66
67 def readlink(path):
68 """readlink(path) -> str
69
70 Read the link specified by 'path' on the system being debugged.
71
72 Handles local, qemu-usermode, and remote debugging cases.
73 """
74 is_qemu = pwndbg.qemu.is_qemu_usermode()
75
76 if is_qemu:
77 if not os.path.exists(path):
78 path = os.path.join(pwndbg.qemu.root(), path)
79
80 if is_qemu or not pwndbg.remote.is_remote():
81 try:
82 return os.readlink(path)
83 except Exception:
84 return ''
85
86 #
87 # Hurray unexposed packets!
88 #
89 # The 'vFile:readlink:' packet does exactly what it sounds like,
90 # but there is no API exposed to do this and there is also no
91 # command exposed... so we have to send the packet manually.
92 #
93 cmd = 'maintenance packet vFile:readlink:%s'
94
95 # The path must be uppercase hex-encoded and NULL-terminated.
96 path += '\x00'
97 path = binascii.hexlify(path.encode())
98 path = path.upper()
99 path = path.decode()
100
101 result = gdb.execute(cmd % path, from_tty=False, to_string=True)
102
103 """
104 sending: "vFile:readlink:2F70726F632F3130303839302F66642F3000"
105 received: "Fc;pipe:[98420]"
106
107 sending: "vFile:readlink:2F70726F632F3130303839302F66642F333300"
108 received: "F-1,2"
109 """
110
111 _, data = result.split('\n', 1)
112
113 # Sanity check
114 expected = 'received: "F'
115 if not data.startswith(expected):
116 return ''
117
118 # Negative values are errors
119 data = data[len(expected):]
120 if data[0] == '-':
121 return ''
122
123 # If non-negative, there will be a hex-encoded length followed
124 # by a semicolon.
125 n, data = data.split(';', 1)
126
127 n = int(n, 16)
128 if n < 0:
129 return ''
130
131 # The result is quoted by GDB, strip the quote and newline.
132 # I have no idea how well it handles other crazy stuff.
133 ending = '"\n'
134 data = data[:-len(ending)]
135
136 return data
137
[end of pwndbg/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/file.py b/pwndbg/file.py
--- a/pwndbg/file.py
+++ b/pwndbg/file.py
@@ -30,9 +30,9 @@
The local path to the file
"""
local_path = path
-
- if pwndbg.qemu.root():
- return os.path.join(pwndbg.qemu.binfmt_root, path)
+ qemu_root = pwndbg.qemu.root()
+ if qemu_root:
+ return os.path.join(qemu_root, path)
elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():
local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)
error = None
|
{"golden_diff": "diff --git a/pwndbg/file.py b/pwndbg/file.py\n--- a/pwndbg/file.py\n+++ b/pwndbg/file.py\n@@ -30,9 +30,9 @@\n The local path to the file\n \"\"\"\n local_path = path\n-\n- if pwndbg.qemu.root():\n- return os.path.join(pwndbg.qemu.binfmt_root, path)\n+ qemu_root = pwndbg.qemu.root()\n+ if qemu_root:\n+ return os.path.join(qemu_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n", "issue": "QEMU uses binfmt root instead of pwndbg.qemu.root()\nThis bit here should probably use pwndbg.qemu.root() instead of using the module variable directly: \r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRetrieve files from the debuggee's filesystem. Useful when\ndebugging a remote process over SSH or similar, where e.g.\n/proc/FOO/maps is needed from the remote system.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\nimport os\nimport tempfile\n\nimport gdb\n\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.symbol\n\n\ndef get_file(path):\n \"\"\"\n Downloads the specified file from the system where the current process is\n being debugged.\n\n Returns:\n The local path to the file\n \"\"\"\n local_path = path\n\n if pwndbg.qemu.root():\n return os.path.join(pwndbg.qemu.binfmt_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n try:\n error = gdb.execute('remote get \"%s\" \"%s\"' % (path, local_path),\n to_string=True)\n except gdb.error as e:\n error = e\n\n if error:\n raise OSError(\"Could not download remote file %r:\\n\" \\\n \"Error: %s\" % (path, error))\n\n return local_path\n\ndef get(path):\n \"\"\"\n Retrieves the contents of the specified file on the system\n where the current process is being debugged.\n\n Returns:\n A byte array, or None.\n \"\"\"\n local_path = get_file(path)\n\n try:\n with open(local_path,'rb') as f:\n return f.read()\n except:\n return b''\n\ndef readlink(path):\n \"\"\"readlink(path) -> str\n\n Read the link specified by 'path' on the system being debugged.\n\n Handles local, qemu-usermode, and remote debugging cases.\n \"\"\"\n is_qemu = pwndbg.qemu.is_qemu_usermode()\n\n if is_qemu:\n if not os.path.exists(path):\n path = os.path.join(pwndbg.qemu.root(), path)\n\n if is_qemu or not pwndbg.remote.is_remote():\n try:\n return os.readlink(path)\n except Exception:\n return ''\n\n #\n # Hurray unexposed packets!\n #\n # The 'vFile:readlink:' packet does exactly what it sounds like,\n # but there is no API exposed to do this and there is also no\n # command exposed... so we have to send the packet manually.\n #\n cmd = 'maintenance packet vFile:readlink:%s'\n\n # The path must be uppercase hex-encoded and NULL-terminated.\n path += '\\x00'\n path = binascii.hexlify(path.encode())\n path = path.upper()\n path = path.decode()\n\n result = gdb.execute(cmd % path, from_tty=False, to_string=True)\n\n \"\"\"\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F3000\"\n received: \"Fc;pipe:[98420]\"\n\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F333300\"\n received: \"F-1,2\"\n \"\"\"\n\n _, data = result.split('\\n', 1)\n\n # Sanity check\n expected = 'received: \"F'\n if not data.startswith(expected):\n return ''\n\n # Negative values are errors\n data = data[len(expected):]\n if data[0] == '-':\n return ''\n\n # If non-negative, there will be a hex-encoded length followed\n # by a semicolon.\n n, data = data.split(';', 1)\n\n n = int(n, 16)\n if n < 0:\n return ''\n\n # The result is quoted by GDB, strip the quote and newline.\n # I have no idea how well it handles other crazy stuff.\n ending = '\"\\n'\n data = data[:-len(ending)]\n\n return data\n", "path": "pwndbg/file.py"}]}
| 1,897 | 160 |
gh_patches_debug_32529
|
rasdani/github-patches
|
git_diff
|
OpenMined__PySyft-2254
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Syft Keras bug on Windows
Relevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800
Bug:

It looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`.
The fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process.
</issue>
<code>
[start of syft/workers/tfe.py]
1 """To be extended in the near future."""
2 from collections import OrderedDict
3 import logging
4 import subprocess
5
6 import tf_encrypted as tfe
7
8
9 logger = logging.getLogger("tf_encrypted")
10
11
12 class TFEWorker:
13 # TODO(Morten) this should be turned into a proxy, with existing code
14 # extracted into a new component that's launched via a script
15
16 def __init__(self, host=None, auto_managed=True):
17 self.host = host
18 self._server_process = None
19 self._auto_managed = auto_managed
20
21 def start(self, player_name, *workers):
22 if self.host is None:
23 # we're running using a tfe.LocalConfig which doesn't require us to do anything
24 return
25
26 config_filename = "/tmp/tfe.config"
27
28 config, _ = self.config_from_workers(workers)
29 config.save(config_filename)
30
31 if self._auto_managed:
32 cmd = "python -m tf_encrypted.player --config {} {}".format(
33 config_filename, player_name
34 )
35 self._server_process = subprocess.Popen(cmd.split(" "))
36 else:
37 logger.info(
38 "If not done already, please launch the following "
39 "command in a terminal on host '%s':\n"
40 "'python -m tf_encrypted.player --config %s %s'\n"
41 "This can be done automatically in a local subprocess by "
42 "setting `auto_managed=True` when instantiating a TFEWorker.",
43 self.host,
44 config_filename,
45 player_name,
46 )
47
48 def stop(self):
49 if self.host is None:
50 # we're running using a tfe.LocalConfig which doesn't require us to do anything
51 return
52
53 if self._auto_managed:
54 if self._server_process is None:
55 return
56 self._server_process.kill()
57 self._server_process.communicate()
58 self._server_process = None
59 else:
60 logger.info("Please terminate the process on host '%s'.", self.host)
61
62 def connect_to_model(self, input_shape, output_shape, *workers):
63 config, _ = self.config_from_workers(workers)
64 tfe.set_config(config)
65
66 prot = tfe.protocol.SecureNN(
67 config.get_player("server0"), config.get_player("server1"), config.get_player("server2")
68 )
69 tfe.set_protocol(prot)
70
71 self._tf_client = tfe.serving.QueueClient(
72 input_shape=input_shape, output_shape=output_shape
73 )
74
75 sess = tfe.Session(config=config)
76 self._tf_session = sess
77
78 def query_model(self, data):
79 self.query_model_async(data)
80 return self.query_model_join()
81
82 def query_model_async(self, data):
83 self._tf_client.send_input(self._tf_session, data)
84
85 def query_model_join(self):
86 return self._tf_client.receive_output(self._tf_session)
87
88 @classmethod
89 def config_from_workers(cls, workers):
90 if len(workers) != 3:
91 raise ValueError("Expected three workers but {} were given".format(len(workers)))
92
93 player_to_worker_mapping = OrderedDict()
94 player_to_worker_mapping["server0"] = workers[0]
95 player_to_worker_mapping["server1"] = workers[1]
96 player_to_worker_mapping["server2"] = workers[2]
97
98 use_local_config = all(worker.host is None for worker in workers)
99 if use_local_config:
100 config = tfe.LocalConfig(
101 player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False
102 )
103 return config, player_to_worker_mapping
104
105 # use tfe.RemoteConfig
106 hostmap = OrderedDict(
107 [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]
108 )
109 config = tfe.RemoteConfig(hostmap)
110 return config, player_to_worker_mapping
111
[end of syft/workers/tfe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py
--- a/syft/workers/tfe.py
+++ b/syft/workers/tfe.py
@@ -1,12 +1,15 @@
"""To be extended in the near future."""
from collections import OrderedDict
import logging
+import os
import subprocess
+import tempfile
import tf_encrypted as tfe
logger = logging.getLogger("tf_encrypted")
+_TMP_DIR = tempfile.gettempdir()
class TFEWorker:
@@ -23,26 +26,24 @@
# we're running using a tfe.LocalConfig which doesn't require us to do anything
return
- config_filename = "/tmp/tfe.config"
+ config_filename = os.path.join(_TMP_DIR, "tfe.config")
config, _ = self.config_from_workers(workers)
config.save(config_filename)
+ launch_cmd = "python -m tf_encrypted.player --config {} {}".format(
+ config_filename, player_name
+ )
if self._auto_managed:
- cmd = "python -m tf_encrypted.player --config {} {}".format(
- config_filename, player_name
- )
- self._server_process = subprocess.Popen(cmd.split(" "))
+ self._server_process = subprocess.Popen(launch_cmd.split(" "))
else:
logger.info(
"If not done already, please launch the following "
- "command in a terminal on host '%s':\n"
- "'python -m tf_encrypted.player --config %s %s'\n"
+ "command in a terminal on host %s: '%s'\n"
"This can be done automatically in a local subprocess by "
- "setting `auto_managed=True` when instantiating a TFEWorker.",
+ "setting `auto_managed=True` when instantiating a TFEWorker.\n",
self.host,
- config_filename,
- player_name,
+ launch_cmd,
)
def stop(self):
|
{"golden_diff": "diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py\n--- a/syft/workers/tfe.py\n+++ b/syft/workers/tfe.py\n@@ -1,12 +1,15 @@\n \"\"\"To be extended in the near future.\"\"\"\n from collections import OrderedDict\n import logging\n+import os\n import subprocess\n+import tempfile\n \n import tf_encrypted as tfe\n \n \n logger = logging.getLogger(\"tf_encrypted\")\n+_TMP_DIR = tempfile.gettempdir()\n \n \n class TFEWorker:\n@@ -23,26 +26,24 @@\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n \n- config_filename = \"/tmp/tfe.config\"\n+ config_filename = os.path.join(_TMP_DIR, \"tfe.config\")\n \n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n \n+ launch_cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n+ config_filename, player_name\n+ )\n if self._auto_managed:\n- cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n- config_filename, player_name\n- )\n- self._server_process = subprocess.Popen(cmd.split(\" \"))\n+ self._server_process = subprocess.Popen(launch_cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n- \"command in a terminal on host '%s':\\n\"\n- \"'python -m tf_encrypted.player --config %s %s'\\n\"\n+ \"command in a terminal on host %s: '%s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n- \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n+ \"setting `auto_managed=True` when instantiating a TFEWorker.\\n\",\n self.host,\n- config_filename,\n- player_name,\n+ launch_cmd,\n )\n \n def stop(self):\n", "issue": "Syft Keras bug on Windows\nRelevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800\r\n\r\nBug:\r\n\r\n\r\nIt looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`.\r\n\r\nThe fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process.\n", "before_files": [{"content": "\"\"\"To be extended in the near future.\"\"\"\nfrom collections import OrderedDict\nimport logging\nimport subprocess\n\nimport tf_encrypted as tfe\n\n\nlogger = logging.getLogger(\"tf_encrypted\")\n\n\nclass TFEWorker:\n # TODO(Morten) this should be turned into a proxy, with existing code\n # extracted into a new component that's launched via a script\n\n def __init__(self, host=None, auto_managed=True):\n self.host = host\n self._server_process = None\n self._auto_managed = auto_managed\n\n def start(self, player_name, *workers):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n config_filename = \"/tmp/tfe.config\"\n\n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n\n if self._auto_managed:\n cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n config_filename, player_name\n )\n self._server_process = subprocess.Popen(cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n \"command in a terminal on host '%s':\\n\"\n \"'python -m tf_encrypted.player --config %s %s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n self.host,\n config_filename,\n player_name,\n )\n\n def stop(self):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n if self._auto_managed:\n if self._server_process is None:\n return\n self._server_process.kill()\n self._server_process.communicate()\n self._server_process = None\n else:\n logger.info(\"Please terminate the process on host '%s'.\", self.host)\n\n def connect_to_model(self, input_shape, output_shape, *workers):\n config, _ = self.config_from_workers(workers)\n tfe.set_config(config)\n\n prot = tfe.protocol.SecureNN(\n config.get_player(\"server0\"), config.get_player(\"server1\"), config.get_player(\"server2\")\n )\n tfe.set_protocol(prot)\n\n self._tf_client = tfe.serving.QueueClient(\n input_shape=input_shape, output_shape=output_shape\n )\n\n sess = tfe.Session(config=config)\n self._tf_session = sess\n\n def query_model(self, data):\n self.query_model_async(data)\n return self.query_model_join()\n\n def query_model_async(self, data):\n self._tf_client.send_input(self._tf_session, data)\n\n def query_model_join(self):\n return self._tf_client.receive_output(self._tf_session)\n\n @classmethod\n def config_from_workers(cls, workers):\n if len(workers) != 3:\n raise ValueError(\"Expected three workers but {} were given\".format(len(workers)))\n\n player_to_worker_mapping = OrderedDict()\n player_to_worker_mapping[\"server0\"] = workers[0]\n player_to_worker_mapping[\"server1\"] = workers[1]\n player_to_worker_mapping[\"server2\"] = workers[2]\n\n use_local_config = all(worker.host is None for worker in workers)\n if use_local_config:\n config = tfe.LocalConfig(\n player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False\n )\n return config, player_to_worker_mapping\n\n # use tfe.RemoteConfig\n hostmap = OrderedDict(\n [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]\n )\n config = tfe.RemoteConfig(hostmap)\n return config, player_to_worker_mapping\n", "path": "syft/workers/tfe.py"}]}
| 1,828 | 442 |
gh_patches_debug_15049
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-1077
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MAINT: New release?
It has been ~6 months, time for a release? Also it might be necessary to work with the imminent 1.2 release, not sure. But it's generally better if `mne-*` release before `mne` itself. mne-connectivity and mne-realtime have released recently. WDYT?
</issue>
<code>
[start of mne_bids/__init__.py]
1 """MNE software for easily interacting with BIDS compatible datasets."""
2
3 __version__ = '0.11.dev0'
4 from mne_bids import commands
5 from mne_bids.report import make_report
6 from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,
7 print_dir_tree, get_entities_from_fname,
8 search_folder_for_text, get_bids_path_from_fname)
9 from mne_bids.read import get_head_mri_trans, read_raw_bids
10 from mne_bids.utils import get_anonymization_daysback
11 from mne_bids.write import (make_dataset_description, write_anat,
12 write_raw_bids, mark_channels,
13 write_meg_calibration, write_meg_crosstalk,
14 get_anat_landmarks, anonymize_dataset)
15 from mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks
16 from mne_bids.inspect import inspect_dataset
17 from mne_bids.dig import (template_to_head, convert_montage_to_ras,
18 convert_montage_to_mri)
19
[end of mne_bids/__init__.py]
[start of doc/conf.py]
1 """Configure details for documentation with sphinx."""
2 import os
3 import sys
4 from datetime import date
5
6 import sphinx_gallery # noqa: F401
7 from sphinx_gallery.sorting import ExampleTitleSortKey
8
9 import mne_bids
10
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 curdir = os.path.dirname(__file__)
16 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))
17 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
18
19
20 # -- General configuration ------------------------------------------------
21
22 # If your documentation needs a minimal Sphinx version, state it here.
23 #
24 # needs_sphinx = '1.0'
25
26 # Add any Sphinx extension module names here, as strings. They can be
27 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
28 # ones.
29 extensions = [
30 'sphinx.ext.githubpages',
31 'sphinx.ext.autodoc',
32 'sphinx.ext.mathjax',
33 'sphinx.ext.viewcode',
34 'sphinx.ext.autosummary',
35 'sphinx.ext.doctest',
36 'sphinx.ext.intersphinx',
37 'sphinx_gallery.gen_gallery',
38 'numpydoc',
39 'sphinx_copybutton',
40 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py
41 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py
42 ]
43
44 # configure sphinx-copybutton
45 copybutton_prompt_text = r">>> |\.\.\. |\$ "
46 copybutton_prompt_is_regexp = True
47
48 # configure numpydoc
49 numpydoc_xref_param_type = True
50 numpydoc_class_members_toctree = False
51 numpydoc_attributes_as_param_list = True
52 numpydoc_xref_aliases = {
53 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',
54 'path-like': ':term:`path-like <mne:path-like>`',
55 'array-like': ':term:`array_like <numpy:array_like>`',
56 'int': ':class:`int <python:int>`',
57 'bool': ':class:`bool <python:bool>`',
58 'float': ':class:`float <python:float>`',
59 'list': ':class:`list <python:list>`',
60 'tuple': ':class:`tuple <python:tuple>`',
61 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',
62 }
63 numpydoc_xref_ignore = {
64 # words
65 'instance', 'instances', 'of'
66 }
67
68
69 # generate autosummary even if no references
70 autosummary_generate = True
71 autodoc_default_options = {'inherited-members': None}
72 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix
73
74 # configure linkcheck
75 # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder
76 linkcheck_retries = 2
77 linkcheck_rate_limit_timeout = 15.0
78 linkcheck_ignore = [
79 r'https://www.researchgate.net/profile/.*',
80 ]
81
82 # The suffix(es) of source filenames.
83 # You can specify multiple suffix as a list of string:
84 #
85 # source_suffix = ['.rst', '.md']
86 source_suffix = '.rst'
87
88 # The master toctree document.
89 master_doc = 'index'
90
91 # General information about the project.
92 project = u'MNE-BIDS'
93 td = date.today()
94 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,
95 td.isoformat())
96
97 author = u'MNE Developers'
98
99 # The version info for the project you're documenting, acts as replacement for
100 # |version| and |release|, also used in various other places throughout the
101 # built documents.
102 #
103 # The short X.Y version.
104 version = mne_bids.__version__
105 # The full version, including alpha/beta/rc tags.
106 release = version
107
108 # List of patterns, relative to source directory, that match files and
109 # directories to ignore when looking for source files.
110 # This patterns also effect to html_static_path and html_extra_path
111 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',
112 '.DS_Store']
113
114 # HTML options (e.g., theme)
115 html_show_sourcelink = False
116 html_copy_source = False
117
118 html_theme = 'pydata_sphinx_theme'
119
120 # Add any paths that contain templates here, relative to this directory.
121 templates_path = ['_templates']
122 html_static_path = ['_static']
123 html_css_files = ['style.css']
124
125 # Theme options are theme-specific and customize the look and feel of a theme
126 # further. For a list of options available for each theme, see the
127 # documentation.
128 html_theme_options = {
129 'icon_links': [
130 dict(name='GitHub',
131 url='https://github.com/mne-tools/mne-bids',
132 icon='fab fa-github-square'),
133 dict(name='Discourse',
134 url='https://mne.discourse.group/tags/mne-bids',
135 icon='fab fa-discourse'),
136 ],
137 'icon_links_label': 'Quick Links', # for screen reader
138 'use_edit_page_button': False,
139 'navigation_with_keys': False,
140 'show_toc_level': 1,
141 'navbar_end': ['version-switcher', 'navbar-icon-links'],
142 }
143
144 html_context = {
145 'versions_dropdown': {
146 'dev': 'v0.11 (devel)',
147 'stable': 'v0.10 (stable)',
148 'v0.9': 'v0.9',
149 'v0.8': 'v0.8',
150 'v0.7': 'v0.7',
151 'v0.6': 'v0.6',
152 'v0.5': 'v0.5',
153 'v0.4': 'v0.4',
154 'v0.3': 'v0.3',
155 'v0.2': 'v0.2',
156 'v0.1': 'v0.1',
157 },
158 }
159
160 html_sidebars = {}
161
162 # Example configuration for intersphinx: refer to the Python standard library.
163 intersphinx_mapping = {
164 'python': ('https://docs.python.org/3', None),
165 'mne': ('https://mne.tools/dev', None),
166 'numpy': ('https://numpy.org/devdocs', None),
167 'scipy': ('https://scipy.github.io/devdocs', None),
168 'matplotlib': ('https://matplotlib.org', None),
169 'nilearn': ('http://nilearn.github.io/stable', None),
170 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),
171 'nibabel': ('https://nipy.org/nibabel', None),
172 }
173 intersphinx_timeout = 5
174
175 # Resolve binder filepath_prefix. From the docs:
176 # "A prefix to append to the filepath in the Binder links. You should use this
177 # if you will store your built documentation in a sub-folder of a repository,
178 # instead of in the root."
179 # we will store dev docs in a `dev` subdirectory and all other docs in a
180 # directory "v" + version_str. E.g., "v0.3"
181 if 'dev' in version:
182 filepath_prefix = 'dev'
183 else:
184 filepath_prefix = 'v{}'.format(version)
185
186 sphinx_gallery_conf = {
187 'doc_module': 'mne_bids',
188 'reference_url': {
189 'mne_bids': None,
190 },
191 'backreferences_dir': 'generated',
192 'examples_dirs': '../examples',
193 'within_subsection_order': ExampleTitleSortKey,
194 'gallery_dirs': 'auto_examples',
195 'filename_pattern': '^((?!sgskip).)*$',
196 'binder': {
197 # Required keys
198 'org': 'mne-tools',
199 'repo': 'mne-bids',
200 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.
201 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).
202 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.
203 'dependencies': [
204 '../test_requirements.txt',
205 './requirements.txt',
206 ],
207 }
208 }
209
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -143,8 +143,9 @@
html_context = {
'versions_dropdown': {
- 'dev': 'v0.11 (devel)',
- 'stable': 'v0.10 (stable)',
+ 'dev': 'v0.12 (devel)',
+ 'stable': 'v0.11 (stable)',
+ 'v0.10': 'v0.10',
'v0.9': 'v0.9',
'v0.8': 'v0.8',
'v0.7': 'v0.7',
diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py
--- a/mne_bids/__init__.py
+++ b/mne_bids/__init__.py
@@ -1,6 +1,6 @@
"""MNE software for easily interacting with BIDS compatible datasets."""
-__version__ = '0.11.dev0'
+__version__ = '0.11'
from mne_bids import commands
from mne_bids.report import make_report
from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,
|
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -143,8 +143,9 @@\n \n html_context = {\n 'versions_dropdown': {\n- 'dev': 'v0.11 (devel)',\n- 'stable': 'v0.10 (stable)',\n+ 'dev': 'v0.12 (devel)',\n+ 'stable': 'v0.11 (stable)',\n+ 'v0.10': 'v0.10',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\ndiff --git a/mne_bids/__init__.py b/mne_bids/__init__.py\n--- a/mne_bids/__init__.py\n+++ b/mne_bids/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n \n-__version__ = '0.11.dev0'\n+__version__ = '0.11'\n from mne_bids import commands\n from mne_bids.report import make_report\n from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,\n", "issue": "MAINT: New release?\nIt has been ~6 months, time for a release? Also it might be necessary to work with the imminent 1.2 release, not sure. But it's generally better if `mne-*` release before `mne` itself. mne-connectivity and mne-realtime have released recently. WDYT?\n", "before_files": [{"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = '0.11.dev0'\nfrom mne_bids import commands\nfrom mne_bids.report import make_report\nfrom mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,\n print_dir_tree, get_entities_from_fname,\n search_folder_for_text, get_bids_path_from_fname)\nfrom mne_bids.read import get_head_mri_trans, read_raw_bids\nfrom mne_bids.utils import get_anonymization_daysback\nfrom mne_bids.write import (make_dataset_description, write_anat,\n write_raw_bids, mark_channels,\n write_meg_calibration, write_meg_crosstalk,\n get_anat_landmarks, anonymize_dataset)\nfrom mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks\nfrom mne_bids.inspect import inspect_dataset\nfrom mne_bids.dig import (template_to_head, convert_montage_to_ras,\n convert_montage_to_mri)\n", "path": "mne_bids/__init__.py"}, {"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.11 (devel)',\n 'stable': 'v0.10 (stable)',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]}
| 3,286 | 292 |
gh_patches_debug_41321
|
rasdani/github-patches
|
git_diff
|
xorbitsai__inference-118
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BUG: chatglm hang
</issue>
<code>
[start of xinference/model/llm/chatglm.py]
1 # Copyright 2022-2023 XProbe Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import time
17 import uuid
18 from pathlib import Path
19 from typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union
20
21 from ...types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
22 from .core import Model
23
24 if TYPE_CHECKING:
25 from .. import ModelSpec
26
27 logger = logging.getLogger(__name__)
28
29
30 class ChatglmCppModelConfig(TypedDict, total=False):
31 pass
32
33
34 class ChatglmCppGenerateConfig(TypedDict, total=False):
35 max_tokens: int
36 top_p: float
37 temperature: float
38 stream: bool
39
40
41 class ChatglmCppChatModel(Model):
42 def __init__(
43 self,
44 model_uid: str,
45 model_spec: "ModelSpec",
46 model_path: str,
47 model_config: Optional[ChatglmCppModelConfig] = None,
48 ):
49 super().__init__(model_uid, model_spec)
50 self._llm = None
51 self._model_path = model_path
52 if model_spec.model_name == "chatglm":
53 self.max_context_length = 2048
54 elif model_spec.model_name == "chatglm2":
55 self.max_context_length = 8192
56 else:
57 raise ValueError(
58 f"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2."
59 )
60
61 # just a placeholder for now as the chatglm_cpp repo doesn't support model config.
62 self._model_config = model_config
63
64 @classmethod
65 def _sanitize_generate_config(
66 cls,
67 chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],
68 ) -> ChatglmCppGenerateConfig:
69 if chatglmcpp_generate_config is None:
70 chatglmcpp_generate_config = ChatglmCppGenerateConfig()
71 chatglmcpp_generate_config.setdefault("max_tokens", 256)
72 chatglmcpp_generate_config.setdefault("temperature", 0.95)
73 chatglmcpp_generate_config.setdefault("top_p", 0.8)
74 chatglmcpp_generate_config.setdefault("stream", False)
75 return chatglmcpp_generate_config
76
77 def load(self):
78 try:
79 import chatglm_cpp
80 except ImportError:
81 error_message = "Failed to import module 'chatglm_cpp'"
82 installation_guide = [
83 "Please make sure 'chatglm_cpp' is installed. ",
84 "You can install it by running the following command in the terminal:\n",
85 "pip install git+https://github.com/li-plus/chatglm.cpp.git@main\n\n",
86 "Or visit the original git repo if the above command fails:\n",
87 "https://github.com/li-plus/chatglm.cpp",
88 ]
89
90 raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}")
91
92 self._llm = chatglm_cpp.Pipeline(Path(self._model_path))
93
94 @staticmethod
95 def _convert_raw_text_chunks_to_chat(
96 tokens: Iterator[str], model_name: str
97 ) -> Iterator[ChatCompletionChunk]:
98 yield {
99 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
100 "model": model_name,
101 "object": "chat.completion.chunk",
102 "created": int(time.time()),
103 "choices": [
104 {
105 "index": 0,
106 "delta": {
107 "role": "assistant",
108 },
109 "finish_reason": None,
110 }
111 ],
112 }
113 for token in enumerate(tokens):
114 yield {
115 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
116 "model": model_name,
117 "object": "chat.completion.chunk",
118 "created": int(time.time()),
119 "choices": [
120 {
121 "index": 0,
122 "delta": {
123 "content": token[1],
124 },
125 "finish_reason": None,
126 }
127 ],
128 }
129
130 @staticmethod
131 def _convert_raw_text_completion_to_chat(
132 text: str, model_name: str
133 ) -> ChatCompletion:
134 return {
135 "id": "chat" + f"cmpl-{str(uuid.uuid4())}",
136 "model": model_name,
137 "object": "chat.completion",
138 "created": int(time.time()),
139 "choices": [
140 {
141 "index": 0,
142 "message": {
143 "role": "assistant",
144 "content": text,
145 },
146 "finish_reason": None,
147 }
148 ],
149 "usage": {
150 "prompt_tokens": -1,
151 "completion_tokens": -1,
152 "total_tokens": -1,
153 },
154 }
155
156 def chat(
157 self,
158 prompt: str,
159 chat_history: Optional[List[ChatCompletionMessage]] = None,
160 generate_config: Optional[ChatglmCppGenerateConfig] = None,
161 ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
162 if chat_history is not None:
163 chat_history_list = [message["content"] for message in chat_history]
164 else:
165 chat_history_list = []
166
167 chat_history_list.append(prompt)
168 logger.debug("Full conversation history:\n%s", str(chat_history_list))
169
170 generate_config = self._sanitize_generate_config(generate_config)
171
172 assert self._llm is not None
173
174 if generate_config.get("stream", False):
175 it = self._llm.stream_chat(
176 chat_history_list,
177 max_context_length=self.max_context_length,
178 max_length=generate_config["max_tokens"],
179 temperature=generate_config["temperature"],
180 top_p=generate_config["top_p"],
181 )
182 assert not isinstance(it, str)
183 return self._convert_raw_text_chunks_to_chat(it, self.model_uid)
184 else:
185 c = self._llm.chat(
186 chat_history_list,
187 max_context_length=self.max_context_length,
188 max_length=generate_config["max_tokens"],
189 temperature=generate_config["temperature"],
190 top_p=generate_config["top_p"],
191 )
192 assert not isinstance(c, Iterator)
193 return self._convert_raw_text_completion_to_chat(c, self.model_uid)
194
[end of xinference/model/llm/chatglm.py]
[start of examples/chat.py]
1 from typing import List
2
3 from xinference.client import Client
4 from xinference.types import ChatCompletionMessage
5
6 if __name__ == '__main__':
7 import argparse
8
9 parser = argparse.ArgumentParser()
10
11 parser.add_argument("--endpoint", type=str, help="Xinference endpoint, required")
12 parser.add_argument("--model_name", type=str, help="Name of the model, required")
13 parser.add_argument(
14 "--model_size_in_billions", type=int, required=False, help="Size of the model in billions", )
15 parser.add_argument("--model_format", type=str, required=False, help="Format of the model", )
16 parser.add_argument("--quantization", type=str, required=False, help="Quantization")
17
18 args = parser.parse_args()
19
20 endpoint = args.endpoint
21 model_name = args.model_name
22 model_size_in_billions = args.model_size_in_billions
23 model_format = args.model_format
24 quantization = args.quantization
25
26 print(f"Xinference endpoint: {endpoint}")
27 print(f"Model Name: {model_name}")
28 print(f"Model Size (in billions): {model_size_in_billions}")
29 print(f"Model Format: {model_format}")
30 print(f"Quantization: {quantization}")
31
32 client = Client(endpoint)
33 model_uid = client.launch_model(
34 model_name=model_name,
35 model_size_in_billions=model_size_in_billions,
36 model_format=model_format,
37 quantization=quantization,
38 n_ctx=2048
39 )
40 model = client.get_model(model_uid)
41
42 chat_history: List["ChatCompletionMessage"] = []
43 while True:
44 prompt = input("you: ")
45 completion = model.chat(
46 prompt,
47 chat_history,
48 generate_config={"max_tokens": 1024}
49 )
50 content = completion["choices"][0]["message"]["content"]
51 print(f"{model_name}: {content}")
52 chat_history.append(
53 ChatCompletionMessage(role="user", content=prompt)
54 )
55 chat_history.append(
56 ChatCompletionMessage(role="assistant", content=content)
57 )
58
[end of examples/chat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/chat.py b/examples/chat.py
--- a/examples/chat.py
+++ b/examples/chat.py
@@ -30,13 +30,7 @@
print(f"Quantization: {quantization}")
client = Client(endpoint)
- model_uid = client.launch_model(
- model_name=model_name,
- model_size_in_billions=model_size_in_billions,
- model_format=model_format,
- quantization=quantization,
- n_ctx=2048
- )
+ model_uid = client.launch_model(model_name, n_ctx=2048)
model = client.get_model(model_uid)
chat_history: List["ChatCompletionMessage"] = []
diff --git a/xinference/model/llm/chatglm.py b/xinference/model/llm/chatglm.py
--- a/xinference/model/llm/chatglm.py
+++ b/xinference/model/llm/chatglm.py
@@ -22,6 +22,8 @@
from .core import Model
if TYPE_CHECKING:
+ from chatglm_cpp import Pipeline
+
from .. import ModelSpec
logger = logging.getLogger(__name__)
@@ -47,16 +49,8 @@
model_config: Optional[ChatglmCppModelConfig] = None,
):
super().__init__(model_uid, model_spec)
- self._llm = None
+ self._llm: Optional["Pipeline"] = None
self._model_path = model_path
- if model_spec.model_name == "chatglm":
- self.max_context_length = 2048
- elif model_spec.model_name == "chatglm2":
- self.max_context_length = 8192
- else:
- raise ValueError(
- f"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2."
- )
# just a placeholder for now as the chatglm_cpp repo doesn't support model config.
self._model_config = model_config
@@ -68,9 +62,6 @@
) -> ChatglmCppGenerateConfig:
if chatglmcpp_generate_config is None:
chatglmcpp_generate_config = ChatglmCppGenerateConfig()
- chatglmcpp_generate_config.setdefault("max_tokens", 256)
- chatglmcpp_generate_config.setdefault("temperature", 0.95)
- chatglmcpp_generate_config.setdefault("top_p", 0.8)
chatglmcpp_generate_config.setdefault("stream", False)
return chatglmcpp_generate_config
@@ -174,20 +165,12 @@
if generate_config.get("stream", False):
it = self._llm.stream_chat(
chat_history_list,
- max_context_length=self.max_context_length,
- max_length=generate_config["max_tokens"],
- temperature=generate_config["temperature"],
- top_p=generate_config["top_p"],
)
assert not isinstance(it, str)
return self._convert_raw_text_chunks_to_chat(it, self.model_uid)
else:
c = self._llm.chat(
chat_history_list,
- max_context_length=self.max_context_length,
- max_length=generate_config["max_tokens"],
- temperature=generate_config["temperature"],
- top_p=generate_config["top_p"],
)
assert not isinstance(c, Iterator)
return self._convert_raw_text_completion_to_chat(c, self.model_uid)
|
{"golden_diff": "diff --git a/examples/chat.py b/examples/chat.py\n--- a/examples/chat.py\n+++ b/examples/chat.py\n@@ -30,13 +30,7 @@\n print(f\"Quantization: {quantization}\")\n \n client = Client(endpoint)\n- model_uid = client.launch_model(\n- model_name=model_name,\n- model_size_in_billions=model_size_in_billions,\n- model_format=model_format,\n- quantization=quantization,\n- n_ctx=2048\n- )\n+ model_uid = client.launch_model(model_name, n_ctx=2048)\n model = client.get_model(model_uid)\n \n chat_history: List[\"ChatCompletionMessage\"] = []\ndiff --git a/xinference/model/llm/chatglm.py b/xinference/model/llm/chatglm.py\n--- a/xinference/model/llm/chatglm.py\n+++ b/xinference/model/llm/chatglm.py\n@@ -22,6 +22,8 @@\n from .core import Model\n \n if TYPE_CHECKING:\n+ from chatglm_cpp import Pipeline\n+\n from .. import ModelSpec\n \n logger = logging.getLogger(__name__)\n@@ -47,16 +49,8 @@\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_spec)\n- self._llm = None\n+ self._llm: Optional[\"Pipeline\"] = None\n self._model_path = model_path\n- if model_spec.model_name == \"chatglm\":\n- self.max_context_length = 2048\n- elif model_spec.model_name == \"chatglm2\":\n- self.max_context_length = 8192\n- else:\n- raise ValueError(\n- f\"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2.\"\n- )\n \n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n@@ -68,9 +62,6 @@\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n- chatglmcpp_generate_config.setdefault(\"max_tokens\", 256)\n- chatglmcpp_generate_config.setdefault(\"temperature\", 0.95)\n- chatglmcpp_generate_config.setdefault(\"top_p\", 0.8)\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n \n@@ -174,20 +165,12 @@\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n- max_context_length=self.max_context_length,\n- max_length=generate_config[\"max_tokens\"],\n- temperature=generate_config[\"temperature\"],\n- top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n- max_context_length=self.max_context_length,\n- max_length=generate_config[\"max_tokens\"],\n- temperature=generate_config[\"temperature\"],\n- top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "issue": "BUG: chatglm hang\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union\n\nfrom ...types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage\nfrom .core import Model\n\nif TYPE_CHECKING:\n from .. import ModelSpec\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatglmCppModelConfig(TypedDict, total=False):\n pass\n\n\nclass ChatglmCppGenerateConfig(TypedDict, total=False):\n max_tokens: int\n top_p: float\n temperature: float\n stream: bool\n\n\nclass ChatglmCppChatModel(Model):\n def __init__(\n self,\n model_uid: str,\n model_spec: \"ModelSpec\",\n model_path: str,\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_spec)\n self._llm = None\n self._model_path = model_path\n if model_spec.model_name == \"chatglm\":\n self.max_context_length = 2048\n elif model_spec.model_name == \"chatglm2\":\n self.max_context_length = 8192\n else:\n raise ValueError(\n f\"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2.\"\n )\n\n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n\n @classmethod\n def _sanitize_generate_config(\n cls,\n chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n chatglmcpp_generate_config.setdefault(\"max_tokens\", 256)\n chatglmcpp_generate_config.setdefault(\"temperature\", 0.95)\n chatglmcpp_generate_config.setdefault(\"top_p\", 0.8)\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n\n def load(self):\n try:\n import chatglm_cpp\n except ImportError:\n error_message = \"Failed to import module 'chatglm_cpp'\"\n installation_guide = [\n \"Please make sure 'chatglm_cpp' is installed. \",\n \"You can install it by running the following command in the terminal:\\n\",\n \"pip install git+https://github.com/li-plus/chatglm.cpp.git@main\\n\\n\",\n \"Or visit the original git repo if the above command fails:\\n\",\n \"https://github.com/li-plus/chatglm.cpp\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._llm = chatglm_cpp.Pipeline(Path(self._model_path))\n\n @staticmethod\n def _convert_raw_text_chunks_to_chat(\n tokens: Iterator[str], model_name: str\n ) -> Iterator[ChatCompletionChunk]:\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n for token in enumerate(tokens):\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": token[1],\n },\n \"finish_reason\": None,\n }\n ],\n }\n\n @staticmethod\n def _convert_raw_text_completion_to_chat(\n text: str, model_name: str\n ) -> ChatCompletion:\n return {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": text,\n },\n \"finish_reason\": None,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": -1,\n \"completion_tokens\": -1,\n \"total_tokens\": -1,\n },\n }\n\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[ChatglmCppGenerateConfig] = None,\n ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:\n if chat_history is not None:\n chat_history_list = [message[\"content\"] for message in chat_history]\n else:\n chat_history_list = []\n\n chat_history_list.append(prompt)\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n\n generate_config = self._sanitize_generate_config(generate_config)\n\n assert self._llm is not None\n\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n max_context_length=self.max_context_length,\n max_length=generate_config[\"max_tokens\"],\n temperature=generate_config[\"temperature\"],\n top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n max_context_length=self.max_context_length,\n max_length=generate_config[\"max_tokens\"],\n temperature=generate_config[\"temperature\"],\n top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "path": "xinference/model/llm/chatglm.py"}, {"content": "from typing import List\n\nfrom xinference.client import Client\nfrom xinference.types import ChatCompletionMessage\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--endpoint\", type=str, help=\"Xinference endpoint, required\")\n parser.add_argument(\"--model_name\", type=str, help=\"Name of the model, required\")\n parser.add_argument(\n \"--model_size_in_billions\", type=int, required=False, help=\"Size of the model in billions\", )\n parser.add_argument(\"--model_format\", type=str, required=False, help=\"Format of the model\", )\n parser.add_argument(\"--quantization\", type=str, required=False, help=\"Quantization\")\n\n args = parser.parse_args()\n\n endpoint = args.endpoint\n model_name = args.model_name\n model_size_in_billions = args.model_size_in_billions\n model_format = args.model_format\n quantization = args.quantization\n\n print(f\"Xinference endpoint: {endpoint}\")\n print(f\"Model Name: {model_name}\")\n print(f\"Model Size (in billions): {model_size_in_billions}\")\n print(f\"Model Format: {model_format}\")\n print(f\"Quantization: {quantization}\")\n\n client = Client(endpoint)\n model_uid = client.launch_model(\n model_name=model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n n_ctx=2048\n )\n model = client.get_model(model_uid)\n\n chat_history: List[\"ChatCompletionMessage\"] = []\n while True:\n prompt = input(\"you: \")\n completion = model.chat(\n prompt,\n chat_history,\n generate_config={\"max_tokens\": 1024}\n )\n content = completion[\"choices\"][0][\"message\"][\"content\"]\n print(f\"{model_name}: {content}\")\n chat_history.append(\n ChatCompletionMessage(role=\"user\", content=prompt)\n )\n chat_history.append(\n ChatCompletionMessage(role=\"assistant\", content=content)\n )\n", "path": "examples/chat.py"}]}
| 3,019 | 745 |
gh_patches_debug_26035
|
rasdani/github-patches
|
git_diff
|
canonical__microk8s-4046
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
update homebrew formula to newest microk8s version (1.27) and ubuntu version (22.04)- otherwise Mac Users can't use it.
#### Summary
The latest present formula on homebrew as of June 2023 point to ubuntu version 18.04 and microk8s version 1.26. This makes it near to impossible for mac users to use it.
#### Why is this important?
Lot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :)
#### Are you interested in contributing to this feature?
yep definitely.
</issue>
<code>
[start of installer/common/definitions.py]
1 MAX_CHARACTERS_WRAP: int = 120
2 command_descriptions = {
3 "add-node": "Adds a node to a cluster",
4 "ambassador": "Ambassador API Gateway and Ingress",
5 "cilium": "The cilium client",
6 "config": "Print the kubeconfig",
7 "ctr": "The containerd client",
8 "dashboard-proxy": "Enable the Kubernetes dashboard and proxy to host",
9 "dbctl": "Backup and restore the Kubernetes datastore",
10 "disable": "Disables running add-ons",
11 "enable": "Enables useful add-ons",
12 "helm": "The helm client",
13 "helm3": "The helm3 client",
14 "inspect": "Checks the cluster and gathers logs",
15 "istioctl": "The istio client",
16 "join": "Joins this instance as a node to a cluster",
17 "kubectl": "The kubernetes client",
18 "leave": "Disconnects this node from any cluster it has joined",
19 "linkerd": "The linkerd client",
20 "refresh-certs": "Refresh the CA certificates in this deployment",
21 "remove-node": "Removes a node from the cluster",
22 "reset": "Cleans the cluster from all workloads",
23 "start": "Starts the kubernetes cluster",
24 "status": "Displays the status of the cluster",
25 "stop": "Stops the kubernetes cluster",
26 }
27 DEFAULT_CORES: int = 2
28 DEFAULT_MEMORY_GB: int = 4
29 DEFAULT_DISK_GB: int = 50
30 DEFAULT_ASSUME: bool = False
31 DEFAULT_CHANNEL: str = "1.26/stable"
32 DEFAULT_IMAGE: str = "18.04"
33
34 MIN_CORES: int = 2
35 MIN_MEMORY_GB: int = 2
36 MIN_DISK_GB: int = 10
37
[end of installer/common/definitions.py]
[start of installer/vm_providers/_multipass/_windows.py]
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright (C) 2018 Canonical Ltd
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 import logging
18 import os.path
19 import requests
20 import shutil
21 import simplejson
22 import subprocess
23 import sys
24 import tempfile
25
26 from progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength
27
28 from common.file_utils import calculate_sha3_384, is_dumb_terminal
29 from vm_providers.errors import (
30 ProviderMultipassDownloadFailed,
31 ProviderMultipassInstallationFailed,
32 )
33
34 if sys.platform == "win32":
35 import winreg
36
37
38 logger = logging.getLogger(__name__)
39
40
41 _MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases"
42 _MULTIPASS_DL_VERSION = "1.11.1"
43 _MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION)
44
45 # Download multipass installer and calculate hash:
46 # python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501
47 _MULTIPASS_DL_SHA3_384 = "7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b" # noqa: E501
48
49
50 def windows_reload_multipass_path_env():
51 """Update PATH to include installed Multipass, if not already set."""
52
53 assert sys.platform == "win32"
54
55 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Environment")
56
57 paths = os.environ["PATH"].split(";")
58
59 # Drop empty placeholder for trailing comma, if present.
60 if paths[-1] == "":
61 del paths[-1]
62
63 reg_user_path, _ = winreg.QueryValueEx(key, "Path")
64 for path in reg_user_path.split(";"):
65 if path not in paths and "Multipass" in path:
66 paths.append(path)
67
68 # Restore path with trailing comma.
69 os.environ["PATH"] = ";".join(paths) + ";"
70
71
72 def _run_installer(installer_path: str, echoer):
73 """Execute multipass installer."""
74
75 echoer.info("Installing Multipass...")
76
77 # Multipass requires administrative privileges to install, which requires
78 # the use of `runas` functionality. Some of the options included:
79 # (1) https://stackoverflow.com/a/34216774
80 # (2) ShellExecuteW and wait on installer by attempting to delete it.
81 # Windows would prevent us from deleting installer with a PermissionError:
82 # PermissionError: [WinError 32] The process cannot access the file because
83 # it is being used by another process: <path>
84 # (3) Use PowerShell's "Start-Process" with RunAs verb as shown below.
85 # None of the options are quite ideal, but #3 will do.
86 cmd = """
87 & {{
88 try {{
89 $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru
90 }} catch {{
91 [Environment]::Exit(1)
92 }}
93 }}
94 """.format(
95 path=installer_path
96 )
97
98 try:
99 subprocess.check_call(["powershell.exe", "-Command", cmd])
100 except subprocess.CalledProcessError:
101 raise ProviderMultipassInstallationFailed("error launching installer")
102
103 # Reload path environment to see if we can find multipass now.
104 windows_reload_multipass_path_env()
105
106 if not shutil.which("multipass.exe"):
107 # Installation failed.
108 raise ProviderMultipassInstallationFailed("installation did not complete successfully")
109
110 echoer.info("Multipass installation completed successfully.")
111
112
113 def _requests_exception_hint(e: requests.RequestException) -> str:
114 # Use the __doc__ description to give the user a hint. It seems to be a
115 # a decent option over trying to enumerate all of possible types.
116 if e.__doc__:
117 split_lines = e.__doc__.splitlines()
118 if split_lines:
119 return e.__doc__.splitlines()[0].decode().strip()
120
121 # Should never get here.
122 return "unknown download error"
123
124
125 def _fetch_installer_url() -> str:
126 """Verify version set is a valid
127 ref in GitHub and return the full
128 URL.
129 """
130
131 try:
132 resp = requests.get(_MULTIPASS_RELEASES_API_URL)
133 except requests.RequestException as e:
134 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))
135
136 try:
137 data = resp.json()
138 except simplejson.JSONDecodeError:
139 raise ProviderMultipassDownloadFailed(
140 "failed to fetch valid release data from {}".format(_MULTIPASS_RELEASES_API_URL)
141 )
142
143 for assets in data:
144 for asset in assets.get("assets", list()):
145 # Find matching name.
146 if asset.get("name") != _MULTIPASS_DL_NAME:
147 continue
148
149 return asset.get("browser_download_url")
150
151 # Something changed we don't know about - we will simply categorize
152 # all possible events as an updated version we do not yet know about.
153 raise ProviderMultipassDownloadFailed("ref specified is not a valid ref in GitHub")
154
155
156 def _download_multipass(dl_dir: str, echoer) -> str:
157 """Creates temporary Downloads installer to temp directory."""
158
159 dl_url = _fetch_installer_url()
160 dl_basename = os.path.basename(dl_url)
161 dl_path = os.path.join(dl_dir, dl_basename)
162
163 echoer.info("Downloading Multipass installer...\n{} -> {}".format(dl_url, dl_path))
164
165 try:
166 request = requests.get(dl_url, stream=True, allow_redirects=True)
167 request.raise_for_status()
168 download_requests_stream(request, dl_path)
169 except requests.RequestException as e:
170 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))
171
172 digest = calculate_sha3_384(dl_path)
173 if digest != _MULTIPASS_DL_SHA3_384:
174 raise ProviderMultipassDownloadFailed(
175 "download failed verification (expected={} but found={})".format(
176 _MULTIPASS_DL_SHA3_384, digest
177 )
178 )
179
180 echoer.info("Verified installer successfully...")
181 return dl_path
182
183
184 def windows_install_multipass(echoer) -> None:
185 """Download and install multipass."""
186
187 assert sys.platform == "win32"
188
189 dl_dir = tempfile.mkdtemp()
190 dl_path = _download_multipass(dl_dir, echoer)
191 _run_installer(dl_path, echoer)
192
193 # Cleanup.
194 shutil.rmtree(dl_dir)
195
196
197 def _init_progress_bar(total_length, destination, message=None):
198 if not message:
199 message = "Downloading {!r}".format(os.path.basename(destination))
200
201 valid_length = total_length and total_length > 0
202
203 if valid_length and is_dumb_terminal():
204 widgets = [message, " ", Percentage()]
205 maxval = total_length
206 elif valid_length and not is_dumb_terminal():
207 widgets = [message, Bar(marker="=", left="[", right="]"), " ", Percentage()]
208 maxval = total_length
209 elif not valid_length and is_dumb_terminal():
210 widgets = [message]
211 maxval = UnknownLength
212 else:
213 widgets = [message, AnimatedMarker()]
214 maxval = UnknownLength
215
216 return ProgressBar(widgets=widgets, maxval=maxval)
217
218
219 def download_requests_stream(request_stream, destination, message=None, total_read=0):
220 """This is a facility to download a request with nice progress bars."""
221
222 # Doing len(request_stream.content) may defeat the purpose of a
223 # progress bar
224 total_length = 0
225 if not request_stream.headers.get("Content-Encoding", ""):
226 total_length = int(request_stream.headers.get("Content-Length", "0"))
227 # Content-Length in the case of resuming will be
228 # Content-Length - total_read so we add back up to have the feel of
229 # resuming
230 if os.path.exists(destination):
231 total_length += total_read
232
233 progress_bar = _init_progress_bar(total_length, destination, message)
234 progress_bar.start()
235
236 if os.path.exists(destination):
237 mode = "ab"
238 else:
239 mode = "wb"
240 with open(destination, mode) as destination_file:
241 for buf in request_stream.iter_content(1024):
242 destination_file.write(buf)
243 if not is_dumb_terminal():
244 total_read += len(buf)
245 progress_bar.update(total_read)
246 progress_bar.finish()
247
[end of installer/vm_providers/_multipass/_windows.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/installer/common/definitions.py b/installer/common/definitions.py
--- a/installer/common/definitions.py
+++ b/installer/common/definitions.py
@@ -28,8 +28,8 @@
DEFAULT_MEMORY_GB: int = 4
DEFAULT_DISK_GB: int = 50
DEFAULT_ASSUME: bool = False
-DEFAULT_CHANNEL: str = "1.26/stable"
-DEFAULT_IMAGE: str = "18.04"
+DEFAULT_CHANNEL: str = "1.27/stable"
+DEFAULT_IMAGE: str = "22.04"
MIN_CORES: int = 2
MIN_MEMORY_GB: int = 2
diff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py
--- a/installer/vm_providers/_multipass/_windows.py
+++ b/installer/vm_providers/_multipass/_windows.py
@@ -39,12 +39,12 @@
_MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases"
-_MULTIPASS_DL_VERSION = "1.11.1"
+_MULTIPASS_DL_VERSION = "1.12.0"
_MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION)
# Download multipass installer and calculate hash:
# python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501
-_MULTIPASS_DL_SHA3_384 = "7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b" # noqa: E501
+_MULTIPASS_DL_SHA3_384 = "ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee" # noqa: E501
def windows_reload_multipass_path_env():
|
{"golden_diff": "diff --git a/installer/common/definitions.py b/installer/common/definitions.py\n--- a/installer/common/definitions.py\n+++ b/installer/common/definitions.py\n@@ -28,8 +28,8 @@\n DEFAULT_MEMORY_GB: int = 4\n DEFAULT_DISK_GB: int = 50\n DEFAULT_ASSUME: bool = False\n-DEFAULT_CHANNEL: str = \"1.26/stable\"\n-DEFAULT_IMAGE: str = \"18.04\"\n+DEFAULT_CHANNEL: str = \"1.27/stable\"\n+DEFAULT_IMAGE: str = \"22.04\"\n \n MIN_CORES: int = 2\n MIN_MEMORY_GB: int = 2\ndiff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py\n--- a/installer/vm_providers/_multipass/_windows.py\n+++ b/installer/vm_providers/_multipass/_windows.py\n@@ -39,12 +39,12 @@\n \n \n _MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n-_MULTIPASS_DL_VERSION = \"1.11.1\"\n+_MULTIPASS_DL_VERSION = \"1.12.0\"\n _MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n \n # Download multipass installer and calculate hash:\n # python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n-_MULTIPASS_DL_SHA3_384 = \"7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b\" # noqa: E501\n+_MULTIPASS_DL_SHA3_384 = \"ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee\" # noqa: E501\n \n \n def windows_reload_multipass_path_env():\n", "issue": "update homebrew formula to newest microk8s version (1.27) and ubuntu version (22.04)- otherwise Mac Users can't use it. \n#### Summary\r\nThe latest present formula on homebrew as of June 2023 point to ubuntu version 18.04 and microk8s version 1.26. This makes it near to impossible for mac users to use it.\r\n\r\n#### Why is this important?\r\nLot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :)\r\n\r\n#### Are you interested in contributing to this feature?\r\nyep definitely.\r\n\n", "before_files": [{"content": "MAX_CHARACTERS_WRAP: int = 120\ncommand_descriptions = {\n \"add-node\": \"Adds a node to a cluster\",\n \"ambassador\": \"Ambassador API Gateway and Ingress\",\n \"cilium\": \"The cilium client\",\n \"config\": \"Print the kubeconfig\",\n \"ctr\": \"The containerd client\",\n \"dashboard-proxy\": \"Enable the Kubernetes dashboard and proxy to host\",\n \"dbctl\": \"Backup and restore the Kubernetes datastore\",\n \"disable\": \"Disables running add-ons\",\n \"enable\": \"Enables useful add-ons\",\n \"helm\": \"The helm client\",\n \"helm3\": \"The helm3 client\",\n \"inspect\": \"Checks the cluster and gathers logs\",\n \"istioctl\": \"The istio client\",\n \"join\": \"Joins this instance as a node to a cluster\",\n \"kubectl\": \"The kubernetes client\",\n \"leave\": \"Disconnects this node from any cluster it has joined\",\n \"linkerd\": \"The linkerd client\",\n \"refresh-certs\": \"Refresh the CA certificates in this deployment\",\n \"remove-node\": \"Removes a node from the cluster\",\n \"reset\": \"Cleans the cluster from all workloads\",\n \"start\": \"Starts the kubernetes cluster\",\n \"status\": \"Displays the status of the cluster\",\n \"stop\": \"Stops the kubernetes cluster\",\n}\nDEFAULT_CORES: int = 2\nDEFAULT_MEMORY_GB: int = 4\nDEFAULT_DISK_GB: int = 50\nDEFAULT_ASSUME: bool = False\nDEFAULT_CHANNEL: str = \"1.26/stable\"\nDEFAULT_IMAGE: str = \"18.04\"\n\nMIN_CORES: int = 2\nMIN_MEMORY_GB: int = 2\nMIN_DISK_GB: int = 10\n", "path": "installer/common/definitions.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2018 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os.path\nimport requests\nimport shutil\nimport simplejson\nimport subprocess\nimport sys\nimport tempfile\n\nfrom progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength\n\nfrom common.file_utils import calculate_sha3_384, is_dumb_terminal\nfrom vm_providers.errors import (\n ProviderMultipassDownloadFailed,\n ProviderMultipassInstallationFailed,\n)\n\nif sys.platform == \"win32\":\n import winreg\n\n\nlogger = logging.getLogger(__name__)\n\n\n_MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n_MULTIPASS_DL_VERSION = \"1.11.1\"\n_MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n\n# Download multipass installer and calculate hash:\n# python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n_MULTIPASS_DL_SHA3_384 = \"7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b\" # noqa: E501\n\n\ndef windows_reload_multipass_path_env():\n \"\"\"Update PATH to include installed Multipass, if not already set.\"\"\"\n\n assert sys.platform == \"win32\"\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Environment\")\n\n paths = os.environ[\"PATH\"].split(\";\")\n\n # Drop empty placeholder for trailing comma, if present.\n if paths[-1] == \"\":\n del paths[-1]\n\n reg_user_path, _ = winreg.QueryValueEx(key, \"Path\")\n for path in reg_user_path.split(\";\"):\n if path not in paths and \"Multipass\" in path:\n paths.append(path)\n\n # Restore path with trailing comma.\n os.environ[\"PATH\"] = \";\".join(paths) + \";\"\n\n\ndef _run_installer(installer_path: str, echoer):\n \"\"\"Execute multipass installer.\"\"\"\n\n echoer.info(\"Installing Multipass...\")\n\n # Multipass requires administrative privileges to install, which requires\n # the use of `runas` functionality. Some of the options included:\n # (1) https://stackoverflow.com/a/34216774\n # (2) ShellExecuteW and wait on installer by attempting to delete it.\n # Windows would prevent us from deleting installer with a PermissionError:\n # PermissionError: [WinError 32] The process cannot access the file because\n # it is being used by another process: <path>\n # (3) Use PowerShell's \"Start-Process\" with RunAs verb as shown below.\n # None of the options are quite ideal, but #3 will do.\n cmd = \"\"\"\n & {{\n try {{\n $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru\n }} catch {{\n [Environment]::Exit(1)\n }}\n }}\n \"\"\".format(\n path=installer_path\n )\n\n try:\n subprocess.check_call([\"powershell.exe\", \"-Command\", cmd])\n except subprocess.CalledProcessError:\n raise ProviderMultipassInstallationFailed(\"error launching installer\")\n\n # Reload path environment to see if we can find multipass now.\n windows_reload_multipass_path_env()\n\n if not shutil.which(\"multipass.exe\"):\n # Installation failed.\n raise ProviderMultipassInstallationFailed(\"installation did not complete successfully\")\n\n echoer.info(\"Multipass installation completed successfully.\")\n\n\ndef _requests_exception_hint(e: requests.RequestException) -> str:\n # Use the __doc__ description to give the user a hint. It seems to be a\n # a decent option over trying to enumerate all of possible types.\n if e.__doc__:\n split_lines = e.__doc__.splitlines()\n if split_lines:\n return e.__doc__.splitlines()[0].decode().strip()\n\n # Should never get here.\n return \"unknown download error\"\n\n\ndef _fetch_installer_url() -> str:\n \"\"\"Verify version set is a valid\n ref in GitHub and return the full\n URL.\n \"\"\"\n\n try:\n resp = requests.get(_MULTIPASS_RELEASES_API_URL)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n try:\n data = resp.json()\n except simplejson.JSONDecodeError:\n raise ProviderMultipassDownloadFailed(\n \"failed to fetch valid release data from {}\".format(_MULTIPASS_RELEASES_API_URL)\n )\n\n for assets in data:\n for asset in assets.get(\"assets\", list()):\n # Find matching name.\n if asset.get(\"name\") != _MULTIPASS_DL_NAME:\n continue\n\n return asset.get(\"browser_download_url\")\n\n # Something changed we don't know about - we will simply categorize\n # all possible events as an updated version we do not yet know about.\n raise ProviderMultipassDownloadFailed(\"ref specified is not a valid ref in GitHub\")\n\n\ndef _download_multipass(dl_dir: str, echoer) -> str:\n \"\"\"Creates temporary Downloads installer to temp directory.\"\"\"\n\n dl_url = _fetch_installer_url()\n dl_basename = os.path.basename(dl_url)\n dl_path = os.path.join(dl_dir, dl_basename)\n\n echoer.info(\"Downloading Multipass installer...\\n{} -> {}\".format(dl_url, dl_path))\n\n try:\n request = requests.get(dl_url, stream=True, allow_redirects=True)\n request.raise_for_status()\n download_requests_stream(request, dl_path)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n digest = calculate_sha3_384(dl_path)\n if digest != _MULTIPASS_DL_SHA3_384:\n raise ProviderMultipassDownloadFailed(\n \"download failed verification (expected={} but found={})\".format(\n _MULTIPASS_DL_SHA3_384, digest\n )\n )\n\n echoer.info(\"Verified installer successfully...\")\n return dl_path\n\n\ndef windows_install_multipass(echoer) -> None:\n \"\"\"Download and install multipass.\"\"\"\n\n assert sys.platform == \"win32\"\n\n dl_dir = tempfile.mkdtemp()\n dl_path = _download_multipass(dl_dir, echoer)\n _run_installer(dl_path, echoer)\n\n # Cleanup.\n shutil.rmtree(dl_dir)\n\n\ndef _init_progress_bar(total_length, destination, message=None):\n if not message:\n message = \"Downloading {!r}\".format(os.path.basename(destination))\n\n valid_length = total_length and total_length > 0\n\n if valid_length and is_dumb_terminal():\n widgets = [message, \" \", Percentage()]\n maxval = total_length\n elif valid_length and not is_dumb_terminal():\n widgets = [message, Bar(marker=\"=\", left=\"[\", right=\"]\"), \" \", Percentage()]\n maxval = total_length\n elif not valid_length and is_dumb_terminal():\n widgets = [message]\n maxval = UnknownLength\n else:\n widgets = [message, AnimatedMarker()]\n maxval = UnknownLength\n\n return ProgressBar(widgets=widgets, maxval=maxval)\n\n\ndef download_requests_stream(request_stream, destination, message=None, total_read=0):\n \"\"\"This is a facility to download a request with nice progress bars.\"\"\"\n\n # Doing len(request_stream.content) may defeat the purpose of a\n # progress bar\n total_length = 0\n if not request_stream.headers.get(\"Content-Encoding\", \"\"):\n total_length = int(request_stream.headers.get(\"Content-Length\", \"0\"))\n # Content-Length in the case of resuming will be\n # Content-Length - total_read so we add back up to have the feel of\n # resuming\n if os.path.exists(destination):\n total_length += total_read\n\n progress_bar = _init_progress_bar(total_length, destination, message)\n progress_bar.start()\n\n if os.path.exists(destination):\n mode = \"ab\"\n else:\n mode = \"wb\"\n with open(destination, mode) as destination_file:\n for buf in request_stream.iter_content(1024):\n destination_file.write(buf)\n if not is_dumb_terminal():\n total_read += len(buf)\n progress_bar.update(total_read)\n progress_bar.finish()\n", "path": "installer/vm_providers/_multipass/_windows.py"}]}
| 3,883 | 602 |
gh_patches_debug_16541
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-2051
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Advancing msrest version dependency
### Use this [query](https://github.com/Microsoft/botbuilder-python/issues?q=is%3Aissue+is%3Aopen++label%3Afeature-request+) to search for the most popular feature requests.
_No open issues are reported for msrest._
**Is your feature request related to a problem? Please describe.**
When installing a solution accelerator for OpenAI, the solution requires the bot framework. However, aligning the requirements requires downgrading the msrest package.
botbuilder-schema 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.
botframework-connector 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.
Several key azure packages depend on msrest:
azure-mgmt-authorization 3.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.
azure-mgmt-containerregistry 10.1.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.
azure-mgmt-resource 22.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.
azure-mgmt-search 9.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.
azure-mgmt-storage 21.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.
**Describe the solution you'd like**
Align the entire botbuilder-python with msrest >= 0.7.1
**Describe alternatives you've considered**
Since my part of the group collaboration does not require development on the botbuilder-python, my workaround in installing msrest back to 0.7.1. Though, it would be good for botbuilder-python to be aligned with the azure-mgmt packages.
**Additional context**
Our team is forking this solution accelerator for customers deploying an accelerator solution which includes resources including Azure OpenAI, Azure Cognitive Search, and Azure CosmosDB (in addition to the bot), so the alignment will be important since the lost functionality in azure-mgmt is unknown. The original source is at https://github.com/MSUSAzureAccelerators/Azure-Cognitive-Search-Azure-OpenAI-Accelerator
</issue>
<code>
[start of libraries/botframework-connector/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 NAME = "botframework-connector"
8 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0"
9 REQUIRES = [
10 "msrest==0.6.*",
11 # "requests>=2.23.0,<2.26",
12 "PyJWT>=2.4.0",
13 "botbuilder-schema==4.15.0",
14 "msal==1.*",
15 ]
16
17 root = os.path.abspath(os.path.dirname(__file__))
18
19 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
20 long_description = f.read()
21
22 setup(
23 name=NAME,
24 version=VERSION,
25 description="Microsoft Bot Framework Bot Builder SDK for Python.",
26 author="Microsoft",
27 url="https://www.github.com/Microsoft/botbuilder-python",
28 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
29 install_requires=REQUIRES,
30 packages=[
31 "botframework.connector",
32 "botframework.connector.auth",
33 "botframework.connector.async_mixin",
34 "botframework.connector.operations",
35 "botframework.connector.models",
36 "botframework.connector.aio",
37 "botframework.connector.aio.operations_async",
38 "botframework.connector.skills",
39 "botframework.connector.teams",
40 "botframework.connector.teams.operations",
41 "botframework.connector.token_api",
42 "botframework.connector.token_api.aio",
43 "botframework.connector.token_api.aio.operations_async",
44 "botframework.connector.token_api.models",
45 "botframework.connector.token_api.operations",
46 ],
47 include_package_data=True,
48 long_description=long_description,
49 long_description_content_type="text/x-rst",
50 license="MIT",
51 classifiers=[
52 "Programming Language :: Python :: 3.7",
53 "Intended Audience :: Developers",
54 "License :: OSI Approved :: MIT License",
55 "Operating System :: OS Independent",
56 "Development Status :: 5 - Production/Stable",
57 "Topic :: Scientific/Engineering :: Artificial Intelligence",
58 ],
59 )
60
[end of libraries/botframework-connector/setup.py]
[start of libraries/botbuilder-schema/setup.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 import os
5 from setuptools import setup
6
7 NAME = "botbuilder-schema"
8 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0"
9 REQUIRES = ["msrest==0.6.*", "urllib3<2.0.0"]
10
11 root = os.path.abspath(os.path.dirname(__file__))
12
13 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
14 long_description = f.read()
15
16 setup(
17 name=NAME,
18 version=VERSION,
19 description="BotBuilder Schema",
20 author="Microsoft",
21 url="https://github.com/Microsoft/botbuilder-python",
22 keywords=["BotBuilderSchema", "bots", "ai", "botframework", "botbuilder"],
23 long_description=long_description,
24 long_description_content_type="text/x-rst",
25 license="MIT",
26 install_requires=REQUIRES,
27 packages=[
28 "botbuilder.schema",
29 "botbuilder.schema.teams",
30 ],
31 include_package_data=True,
32 classifiers=[
33 "Programming Language :: Python :: 3.7",
34 "Intended Audience :: Developers",
35 "License :: OSI Approved :: MIT License",
36 "Operating System :: OS Independent",
37 "Development Status :: 5 - Production/Stable",
38 "Topic :: Scientific/Engineering :: Artificial Intelligence",
39 ],
40 )
41
[end of libraries/botbuilder-schema/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py
--- a/libraries/botbuilder-schema/setup.py
+++ b/libraries/botbuilder-schema/setup.py
@@ -6,7 +6,7 @@
NAME = "botbuilder-schema"
VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0"
-REQUIRES = ["msrest==0.6.*", "urllib3<2.0.0"]
+REQUIRES = ["msrest== 0.7.*", "urllib3<2.0.0"]
root = os.path.abspath(os.path.dirname(__file__))
diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py
--- a/libraries/botframework-connector/setup.py
+++ b/libraries/botframework-connector/setup.py
@@ -7,7 +7,7 @@
NAME = "botframework-connector"
VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0"
REQUIRES = [
- "msrest==0.6.*",
+ "msrest==0.7.*",
# "requests>=2.23.0,<2.26",
"PyJWT>=2.4.0",
"botbuilder-schema==4.15.0",
|
{"golden_diff": "diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py\n--- a/libraries/botbuilder-schema/setup.py\n+++ b/libraries/botbuilder-schema/setup.py\n@@ -6,7 +6,7 @@\n \r\n NAME = \"botbuilder-schema\"\r\n VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\r\n-REQUIRES = [\"msrest==0.6.*\", \"urllib3<2.0.0\"]\r\n+REQUIRES = [\"msrest== 0.7.*\", \"urllib3<2.0.0\"]\r\n \r\n root = os.path.abspath(os.path.dirname(__file__))\r\n \r\ndiff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -7,7 +7,7 @@\n NAME = \"botframework-connector\"\n VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\n REQUIRES = [\n- \"msrest==0.6.*\",\n+ \"msrest==0.7.*\",\n # \"requests>=2.23.0,<2.26\",\n \"PyJWT>=2.4.0\",\n \"botbuilder-schema==4.15.0\",\n", "issue": "Advancing msrest version dependency\n### Use this [query](https://github.com/Microsoft/botbuilder-python/issues?q=is%3Aissue+is%3Aopen++label%3Afeature-request+) to search for the most popular feature requests.\r\n\r\n_No open issues are reported for msrest._\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nWhen installing a solution accelerator for OpenAI, the solution requires the bot framework. However, aligning the requirements requires downgrading the msrest package.\r\n\r\nbotbuilder-schema 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.\r\nbotframework-connector 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.\r\n\r\nSeveral key azure packages depend on msrest:\r\nazure-mgmt-authorization 3.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-containerregistry 10.1.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-resource 22.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-search 9.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-storage 21.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\n\r\n**Describe the solution you'd like**\r\nAlign the entire botbuilder-python with msrest >= 0.7.1 \r\n\r\n**Describe alternatives you've considered**\r\nSince my part of the group collaboration does not require development on the botbuilder-python, my workaround in installing msrest back to 0.7.1. Though, it would be good for botbuilder-python to be aligned with the azure-mgmt packages.\r\n\r\n**Additional context**\r\nOur team is forking this solution accelerator for customers deploying an accelerator solution which includes resources including Azure OpenAI, Azure Cognitive Search, and Azure CosmosDB (in addition to the bot), so the alignment will be important since the lost functionality in azure-mgmt is unknown. The original source is at https://github.com/MSUSAzureAccelerators/Azure-Cognitive-Search-Azure-OpenAI-Accelerator\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.6.*\",\n # \"requests>=2.23.0,<2.26\",\n \"PyJWT>=2.4.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.*\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nimport os\r\nfrom setuptools import setup\r\n\r\nNAME = \"botbuilder-schema\"\r\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\r\nREQUIRES = [\"msrest==0.6.*\", \"urllib3<2.0.0\"]\r\n\r\nroot = os.path.abspath(os.path.dirname(__file__))\r\n\r\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\r\n long_description = f.read()\r\n\r\nsetup(\r\n name=NAME,\r\n version=VERSION,\r\n description=\"BotBuilder Schema\",\r\n author=\"Microsoft\",\r\n url=\"https://github.com/Microsoft/botbuilder-python\",\r\n keywords=[\"BotBuilderSchema\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\r\n long_description=long_description,\r\n long_description_content_type=\"text/x-rst\",\r\n license=\"MIT\",\r\n install_requires=REQUIRES,\r\n packages=[\r\n \"botbuilder.schema\",\r\n \"botbuilder.schema.teams\",\r\n ],\r\n include_package_data=True,\r\n classifiers=[\r\n \"Programming Language :: Python :: 3.7\",\r\n \"Intended Audience :: Developers\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n \"Development Status :: 5 - Production/Stable\",\r\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\r\n ],\r\n)\r\n", "path": "libraries/botbuilder-schema/setup.py"}]}
| 2,067 | 308 |
gh_patches_debug_15306
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-2531
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
</issue>
<code>
[start of great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py]
1 from typing import Optional
2
3 from great_expectations.core import ExpectationConfiguration
4 from great_expectations.execution_engine import (
5 ExecutionEngine,
6 PandasExecutionEngine,
7 SparkDFExecutionEngine,
8 )
9 from great_expectations.execution_engine.sqlalchemy_execution_engine import (
10 SqlAlchemyExecutionEngine,
11 )
12 from great_expectations.expectations.metrics.column_aggregate_metric import (
13 ColumnMetricProvider,
14 column_aggregate_partial,
15 column_aggregate_value,
16 )
17 from great_expectations.expectations.metrics.column_aggregate_metric import sa as sa
18 from great_expectations.expectations.metrics.metric_provider import metric_value
19 from great_expectations.validator.validation_graph import MetricConfiguration
20
21
22 def unique_proportion(_metrics):
23 total_values = _metrics.get("table.row_count")
24 unique_values = _metrics.get("column.distinct_values.count")
25 null_count = _metrics.get("column_values.nonnull.unexpected_count")
26
27 if total_values > 0:
28 return unique_values / (total_values - null_count)
29 else:
30 return 0
31
32
33 class ColumnUniqueProportion(ColumnMetricProvider):
34 metric_name = "column.unique_proportion"
35
36 @metric_value(engine=PandasExecutionEngine)
37 def _pandas(*args, metrics, **kwargs):
38 return unique_proportion(metrics)
39
40 @metric_value(engine=SqlAlchemyExecutionEngine)
41 def _sqlalchemy(*args, metrics, **kwargs):
42 return unique_proportion(metrics)
43
44 @metric_value(engine=SparkDFExecutionEngine)
45 def _spark(*args, metrics, **kwargs):
46 return unique_proportion(metrics)
47
48 @classmethod
49 def _get_evaluation_dependencies(
50 cls,
51 metric: MetricConfiguration,
52 configuration: Optional[ExpectationConfiguration] = None,
53 execution_engine: Optional[ExecutionEngine] = None,
54 runtime_configuration: Optional[dict] = None,
55 ):
56 table_domain_kwargs = {
57 k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
58 }
59 return {
60 "column.distinct_values.count": MetricConfiguration(
61 "column.distinct_values.count", metric.metric_domain_kwargs
62 ),
63 "table.row_count": MetricConfiguration(
64 "table.row_count", table_domain_kwargs
65 ),
66 "column_values.nonnull.unexpected_count": MetricConfiguration(
67 "column_values.nonnull.unexpected_count", metric.metric_domain_kwargs
68 ),
69 }
70
[end of great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py
@@ -20,11 +20,13 @@
def unique_proportion(_metrics):
+ """Computes the proportion of unique non-null values out of all non-null values"""
total_values = _metrics.get("table.row_count")
unique_values = _metrics.get("column.distinct_values.count")
null_count = _metrics.get("column_values.nonnull.unexpected_count")
- if total_values > 0:
+ # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)
+ if total_values > 0 and total_values != null_count:
return unique_values / (total_values - null_count)
else:
return 0
|
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n@@ -20,11 +20,13 @@\n \n \n def unique_proportion(_metrics):\n+ \"\"\"Computes the proportion of unique non-null values out of all non-null values\"\"\"\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n \n- if total_values > 0:\n+ # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)\n+ if total_values > 0 and total_values != null_count:\n return unique_values / (total_values - null_count)\n else:\n return 0\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Optional\n\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import (\n ColumnMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import sa as sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.validator.validation_graph import MetricConfiguration\n\n\ndef unique_proportion(_metrics):\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n\n if total_values > 0:\n return unique_values / (total_values - null_count)\n else:\n return 0\n\n\nclass ColumnUniqueProportion(ColumnMetricProvider):\n metric_name = \"column.unique_proportion\"\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n table_domain_kwargs = {\n k: v for k, v in metric.metric_domain_kwargs.items() if k != \"column\"\n }\n return {\n \"column.distinct_values.count\": MetricConfiguration(\n \"column.distinct_values.count\", metric.metric_domain_kwargs\n ),\n \"table.row_count\": MetricConfiguration(\n \"table.row_count\", table_domain_kwargs\n ),\n \"column_values.nonnull.unexpected_count\": MetricConfiguration(\n \"column_values.nonnull.unexpected_count\", metric.metric_domain_kwargs\n ),\n }\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py"}]}
| 1,220 | 253 |
gh_patches_debug_3587
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-1942
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crazy error message shown when two widgets have the same key
# Steps to reproduce
1. Run this code
```
import streamlit as st
st.button("OK")
st.button("OK")
```
2. Observe!
## Expected behavior:
You should get one button plus an error message explaining you can't have to `st.button` calls with the same key.
## Actual behavior:
The error message complains about `st.-3952690150221448179` :scream_cat:
See screenshot:

## Is this a regression?
yes
# Debug info
- Streamlit version: 0.65.0
</issue>
<code>
[start of lib/streamlit/elements/utils.py]
1 import textwrap
2
3 from streamlit import type_util
4 from streamlit.report_thread import get_report_ctx
5 from streamlit.errors import DuplicateWidgetID
6 from typing import Optional, Any
7
8
9 class NoValue(object):
10 """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget()
11 call to return None. This is needed because `DeltaGenerator._enqueue`
12 replaces `None` with a `DeltaGenerator` (for use in non-widget elements).
13 """
14
15 pass
16
17
18 def _clean_text(text):
19 return textwrap.dedent(str(text)).strip()
20
21
22 def _build_duplicate_widget_message(
23 widget_func_name: str, user_key: Optional[str] = None
24 ) -> str:
25 if user_key is not None:
26 message = textwrap.dedent(
27 """
28 There are multiple identical `st.{widget_type}` widgets with
29 `key='{user_key}'`.
30
31 To fix this, please make sure that the `key` argument is unique for
32 each `st.{widget_type}` you create.
33 """
34 )
35 else:
36 message = textwrap.dedent(
37 """
38 There are multiple identical `st.{widget_type}` widgets with the
39 same generated key.
40
41 (When a widget is created, it's assigned an internal key based on
42 its structure. Multiple widgets with an identical structure will
43 result in the same internal key, which causes this error.)
44
45 To fix this, please pass a unique `key` argument to
46 `st.{widget_type}`.
47 """
48 )
49
50 return message.strip("\n").format(widget_type=widget_func_name, user_key=user_key)
51
52
53 def _set_widget_id(
54 element_type: str,
55 element_proto: Any,
56 user_key: Optional[str] = None,
57 widget_func_name: Optional[str] = None,
58 ) -> None:
59 """Set the widget id.
60
61 Parameters
62 ----------
63 element_type : str
64 The type of the element as stored in proto.
65 element_proto : proto
66 The proto of the specified type (e.g. Button/Multiselect/Slider proto)
67 user_key : str or None
68 Optional user-specified key to use for the widget ID.
69 If this is None, we'll generate an ID by hashing the element.
70 widget_func_name : str or None
71 The widget's DeltaGenerator function name, if it's different from
72 its element_type. Custom components are a special case: they all have
73 the element_type "component_instance", but are instantiated with
74 dynamically-named functions.
75
76 """
77
78 if widget_func_name is None:
79 widget_func_name = element_type
80
81 # Identify the widget with a hash of type + contents
82 element_hash = hash((element_type, element_proto.SerializeToString()))
83 if user_key is not None:
84 widget_id = "%s-%s" % (user_key, element_hash)
85 else:
86 widget_id = "%s" % element_hash
87
88 ctx = get_report_ctx()
89 if ctx is not None:
90 added = ctx.widget_ids_this_run.add(widget_id)
91 if not added:
92 raise DuplicateWidgetID(
93 _build_duplicate_widget_message(widget_id, user_key)
94 )
95 element_proto.id = widget_id
96
97
98 def _get_widget_ui_value(
99 element_type: str,
100 element_proto: Any,
101 user_key: Optional[str] = None,
102 widget_func_name: Optional[str] = None,
103 ) -> Any:
104 """Get the widget ui_value from the report context.
105 NOTE: This function should be called after the proto has been filled.
106
107 Parameters
108 ----------
109 element_type : str
110 The type of the element as stored in proto.
111 element : proto
112 The proto of the specified type (e.g. Button/Multiselect/Slider proto)
113 user_key : str
114 Optional user-specified string to use as the widget ID.
115 If this is None, we'll generate an ID by hashing the element.
116 widget_func_name : str or None
117 The widget's DeltaGenerator function name, if it's different from
118 its element_type. Custom components are a special case: they all have
119 the element_type "component_instance", but are instantiated with
120 dynamically-named functions.
121
122 Returns
123 -------
124 ui_value : any
125 The value of the widget set by the client or
126 the default value passed. If the report context
127 doesn't exist, None will be returned.
128
129 """
130 _set_widget_id(element_type, element_proto, user_key, widget_func_name)
131 ctx = get_report_ctx()
132 ui_value = ctx.widgets.get_widget_value(element_proto.id) if ctx else None
133 return ui_value
134
135
136 def last_index_for_melted_dataframes(data):
137 if type_util.is_dataframe_compatible(data):
138 data = type_util.convert_anything_to_df(data)
139
140 if data.index.size > 0:
141 return data.index[-1]
142
143 return None
144
[end of lib/streamlit/elements/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/streamlit/elements/utils.py b/lib/streamlit/elements/utils.py
--- a/lib/streamlit/elements/utils.py
+++ b/lib/streamlit/elements/utils.py
@@ -90,7 +90,7 @@
added = ctx.widget_ids_this_run.add(widget_id)
if not added:
raise DuplicateWidgetID(
- _build_duplicate_widget_message(widget_id, user_key)
+ _build_duplicate_widget_message(widget_func_name, user_key)
)
element_proto.id = widget_id
|
{"golden_diff": "diff --git a/lib/streamlit/elements/utils.py b/lib/streamlit/elements/utils.py\n--- a/lib/streamlit/elements/utils.py\n+++ b/lib/streamlit/elements/utils.py\n@@ -90,7 +90,7 @@\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n- _build_duplicate_widget_message(widget_id, user_key)\n+ _build_duplicate_widget_message(widget_func_name, user_key)\n )\n element_proto.id = widget_id\n", "issue": "Crazy error message shown when two widgets have the same key\n# Steps to reproduce\r\n\r\n1. Run this code\r\n ```\r\n import streamlit as st\r\n\r\n st.button(\"OK\")\r\n st.button(\"OK\")\r\n ```\r\n2. Observe!\r\n\r\n## Expected behavior:\r\n\r\nYou should get one button plus an error message explaining you can't have to `st.button` calls with the same key.\r\n\r\n## Actual behavior:\r\n\r\nThe error message complains about `st.-3952690150221448179` :scream_cat: \r\n\r\nSee screenshot:\r\n\r\n\r\n## Is this a regression?\r\n\r\nyes\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.65.0\r\n\n", "before_files": [{"content": "import textwrap\n\nfrom streamlit import type_util\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.errors import DuplicateWidgetID\nfrom typing import Optional, Any\n\n\nclass NoValue(object):\n \"\"\"Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget()\n call to return None. This is needed because `DeltaGenerator._enqueue`\n replaces `None` with a `DeltaGenerator` (for use in non-widget elements).\n \"\"\"\n\n pass\n\n\ndef _clean_text(text):\n return textwrap.dedent(str(text)).strip()\n\n\ndef _build_duplicate_widget_message(\n widget_func_name: str, user_key: Optional[str] = None\n) -> str:\n if user_key is not None:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with\n `key='{user_key}'`.\n\n To fix this, please make sure that the `key` argument is unique for\n each `st.{widget_type}` you create.\n \"\"\"\n )\n else:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with the\n same generated key.\n\n (When a widget is created, it's assigned an internal key based on\n its structure. Multiple widgets with an identical structure will\n result in the same internal key, which causes this error.)\n\n To fix this, please pass a unique `key` argument to\n `st.{widget_type}`.\n \"\"\"\n )\n\n return message.strip(\"\\n\").format(widget_type=widget_func_name, user_key=user_key)\n\n\ndef _set_widget_id(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> None:\n \"\"\"Set the widget id.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element_proto : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str or None\n Optional user-specified key to use for the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n \"\"\"\n\n if widget_func_name is None:\n widget_func_name = element_type\n\n # Identify the widget with a hash of type + contents\n element_hash = hash((element_type, element_proto.SerializeToString()))\n if user_key is not None:\n widget_id = \"%s-%s\" % (user_key, element_hash)\n else:\n widget_id = \"%s\" % element_hash\n\n ctx = get_report_ctx()\n if ctx is not None:\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n _build_duplicate_widget_message(widget_id, user_key)\n )\n element_proto.id = widget_id\n\n\ndef _get_widget_ui_value(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> Any:\n \"\"\"Get the widget ui_value from the report context.\n NOTE: This function should be called after the proto has been filled.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str\n Optional user-specified string to use as the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n Returns\n -------\n ui_value : any\n The value of the widget set by the client or\n the default value passed. If the report context\n doesn't exist, None will be returned.\n\n \"\"\"\n _set_widget_id(element_type, element_proto, user_key, widget_func_name)\n ctx = get_report_ctx()\n ui_value = ctx.widgets.get_widget_value(element_proto.id) if ctx else None\n return ui_value\n\n\ndef last_index_for_melted_dataframes(data):\n if type_util.is_dataframe_compatible(data):\n data = type_util.convert_anything_to_df(data)\n\n if data.index.size > 0:\n return data.index[-1]\n\n return None\n", "path": "lib/streamlit/elements/utils.py"}]}
| 2,136 | 112 |
gh_patches_debug_23329
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6647
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tickets can be created with end date greater than event end date
**Describe the bug**
Tickets can be created with end date greater than event end date.
**Expected behavior**
There should be a server-side check to check that this doesn't happen
**Additional context**
- [x] Taking this up
</issue>
<code>
[start of app/api/tickets.py]
1 from flask import request, current_app
2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
3 from flask_rest_jsonapi.exceptions import ObjectNotFound
4 from flask_jwt_extended import current_user, verify_jwt_in_request
5 from sqlalchemy.orm.exc import NoResultFound
6
7 from app.api.bootstrap import api
8 from app.api.helpers.db import safe_query
9 from app.api.helpers.permission_manager import has_access
10 from app.api.helpers.query import event_query
11 from app.api.helpers.utilities import require_relationship
12 from app.api.schema.tickets import TicketSchema, TicketSchemaPublic
13 from app.models import db
14 from app.models.access_code import AccessCode
15 from app.models.discount_code import DiscountCode
16 from app.models.order import Order
17 from app.models.ticket import Ticket, TicketTag, ticket_tags_table
18 from app.models.event import Event
19 from app.models.ticket_holder import TicketHolder
20 from app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity
21 from app.api.helpers.db import get_count
22
23 class TicketListPost(ResourceList):
24 """
25 Create and List Tickets
26 """
27 def before_post(self, args, kwargs, data):
28 """
29 before post method to check for required relationship and proper permission
30 :param args:
31 :param kwargs:
32 :param data:
33 :return:
34 """
35 require_relationship(['event'], data)
36 if not has_access('is_coorganizer', event_id=data['event']):
37 raise ObjectNotFound({'parameter': 'event_id'},
38 "Event: {} not found".format(data['event']))
39
40 if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),
41 deleted_at=None)) > 0:
42 raise ConflictException({'pointer': '/data/attributes/name'}, "Ticket already exists")
43
44 def before_create_object(self, data, view_kwargs):
45 """
46 before create method to check if paid ticket has a paymentMethod enabled
47 :param data:
48 :param view_kwargs:
49 :return:
50 """
51 if data.get('type') == 'paid' and data.get('event'):
52 try:
53 event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()
54 except NoResultFound:
55 raise UnprocessableEntity({'event_id': data['event']}, "Event does not exist")
56 if not event.is_payment_enabled():
57 raise UnprocessableEntity(
58 {'event_id': data['event']}, "Event having paid ticket must have a payment method")
59
60 schema = TicketSchema
61 methods = ['POST', ]
62 data_layer = {'session': db.session,
63 'model': Ticket,
64 'methods': {
65 'before_create_object': before_create_object,
66 'before_post': before_post
67 }}
68
69
70 class TicketList(ResourceList):
71 """
72 List Tickets based on different params
73 """
74 def before_get(self, args, view_kwargs):
75 """
76 before get method to get the resource id for assigning schema
77 :param args:
78 :param view_kwargs:
79 :return:
80 """
81 if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):
82 self.schema = TicketSchemaPublic
83
84 def query(self, view_kwargs):
85 """
86 query method for resource list
87 :param view_kwargs:
88 :return:
89 """
90
91 if 'Authorization' in request.headers:
92 verify_jwt_in_request()
93 if current_user.is_super_admin or current_user.is_admin:
94 query_ = self.session.query(Ticket)
95 elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):
96 query_ = self.session.query(Ticket)
97 else:
98 query_ = self.session.query(Ticket).filter_by(is_hidden=False)
99 else:
100 query_ = self.session.query(Ticket).filter_by(is_hidden=False)
101
102 if view_kwargs.get('ticket_tag_id'):
103 ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')
104 query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)
105 query_ = event_query(self, query_, view_kwargs)
106 if view_kwargs.get('access_code_id'):
107 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')
108 # access_code - ticket :: many-to-many relationship
109 query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))
110
111 if view_kwargs.get('discount_code_id'):
112 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')
113 # discount_code - ticket :: many-to-many relationship
114 query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))
115
116 if view_kwargs.get('order_identifier'):
117 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')
118 ticket_ids = []
119 for ticket in order.tickets:
120 ticket_ids.append(ticket.id)
121 query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))
122
123 return query_
124
125 view_kwargs = True
126 methods = ['GET', ]
127 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
128 fetch_as="event_id", model=Ticket, methods="POST",
129 check=lambda a: a.get('event_id') or a.get('event_identifier')),)
130 schema = TicketSchema
131 data_layer = {'session': db.session,
132 'model': Ticket,
133 'methods': {
134 'query': query,
135 }}
136
137
138 class TicketDetail(ResourceDetail):
139 """
140 Ticket Resource
141 """
142 def before_get(self, args, view_kwargs):
143 """
144 before get method to get the resource id for assigning schema
145 :param args:
146 :param view_kwargs:
147 :return:
148 """
149 if view_kwargs.get('attendee_id'):
150 self.schema = TicketSchemaPublic
151
152 def before_get_object(self, view_kwargs):
153 """
154 before get object method to get the resource id for fetching details
155 :param view_kwargs:
156 :return:
157 """
158 if view_kwargs.get('attendee_id') is not None:
159 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')
160 if attendee.ticket_id is not None:
161 view_kwargs['id'] = attendee.ticket_id
162 else:
163 view_kwargs['id'] = None
164
165 def before_update_object(self, ticket, data, view_kwargs):
166 """
167 method to check if paid ticket has payment method before updating ticket object
168 :param ticket:
169 :param data:
170 :param view_kwargs:
171 :return:
172 """
173 if ticket.type == 'paid':
174 try:
175 event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()
176 except NoResultFound:
177 raise UnprocessableEntity({'event_id': ticket.event.id}, "Event does not exist")
178 if not event.is_payment_enabled():
179 raise UnprocessableEntity(
180 {'event_id': ticket.event.id}, "Event having paid ticket must have a payment method")
181
182 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
183 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),)
184 schema = TicketSchema
185 data_layer = {'session': db.session,
186 'model': Ticket,
187 'methods': {
188 'before_get_object': before_get_object,
189 'before_update_object': before_update_object
190 }}
191
192
193 class TicketRelationshipRequired(ResourceRelationship):
194 """
195 Tickets Relationship (Required)
196 """
197 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
198 fetch_as="event_id", model=Ticket, methods="PATCH"),)
199 methods = ['GET', 'PATCH']
200 schema = TicketSchema
201 data_layer = {'session': db.session,
202 'model': Ticket}
203
204
205 class TicketRelationshipOptional(ResourceRelationship):
206 """
207 Tickets Relationship (Optional)
208 """
209 decorators = (api.has_permission('is_coorganizer', fetch='event_id',
210 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),)
211 schema = TicketSchema
212 data_layer = {'session': db.session,
213 'model': Ticket}
214
[end of app/api/tickets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/tickets.py b/app/api/tickets.py
--- a/app/api/tickets.py
+++ b/app/api/tickets.py
@@ -48,14 +48,20 @@
:param view_kwargs:
:return:
"""
- if data.get('type') == 'paid' and data.get('event'):
+ if data.get('event'):
try:
event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()
except NoResultFound:
raise UnprocessableEntity({'event_id': data['event']}, "Event does not exist")
- if not event.is_payment_enabled():
- raise UnprocessableEntity(
- {'event_id': data['event']}, "Event having paid ticket must have a payment method")
+
+ if data.get('type') == 'paid':
+ if not event.is_payment_enabled():
+ raise UnprocessableEntity(
+ {'event_id': data['event']}, "Event having paid ticket must have a payment method")
+
+ if data.get('sales_ends_at') > event.ends_at:
+ raise UnprocessableEntity({'sales_ends_at': '/data/attributes/sales-ends-at'},
+ "Ticket end date cannot be greater than event end date")
schema = TicketSchema
methods = ['POST', ]
|
{"golden_diff": "diff --git a/app/api/tickets.py b/app/api/tickets.py\n--- a/app/api/tickets.py\n+++ b/app/api/tickets.py\n@@ -48,14 +48,20 @@\n :param view_kwargs:\n :return:\n \"\"\"\n- if data.get('type') == 'paid' and data.get('event'):\n+ if data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n- if not event.is_payment_enabled():\n- raise UnprocessableEntity(\n- {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n+\n+ if data.get('type') == 'paid':\n+ if not event.is_payment_enabled():\n+ raise UnprocessableEntity(\n+ {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n+\n+ if data.get('sales_ends_at') > event.ends_at:\n+ raise UnprocessableEntity({'sales_ends_at': '/data/attributes/sales-ends-at'},\n+ \"Ticket end date cannot be greater than event end date\")\n \n schema = TicketSchema\n methods = ['POST', ]\n", "issue": "Tickets can be created with end date greater than event end date\n**Describe the bug**\r\nTickets can be created with end date greater than event end date.\r\n\r\n\r\n**Expected behavior**\r\nThere should be a server-side check to check that this doesn't happen\r\n\r\n\r\n**Additional context**\r\n- [x] Taking this up\n", "before_files": [{"content": "from flask import request, current_app\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask_jwt_extended import current_user, verify_jwt_in_request\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.tickets import TicketSchema, TicketSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.models.ticket_holder import TicketHolder\nfrom app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\nfrom app.api.helpers.db import get_count\n\nclass TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),\n deleted_at=None)) > 0:\n raise ConflictException({'pointer': '/data/attributes/name'}, \"Ticket already exists\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create method to check if paid ticket has a paymentMethod enabled\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('type') == 'paid' and data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n\n schema = TicketSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass TicketList(ResourceList):\n \"\"\"\n List Tickets based on different params\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):\n self.schema = TicketSchemaPublic\n\n def query(self, view_kwargs):\n \"\"\"\n query method for resource list\n :param view_kwargs:\n :return:\n \"\"\"\n\n if 'Authorization' in request.headers:\n verify_jwt_in_request()\n if current_user.is_super_admin or current_user.is_admin:\n query_ = self.session.query(Ticket)\n elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):\n query_ = self.session.query(Ticket)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n\n if view_kwargs.get('ticket_tag_id'):\n ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('access_code_id'):\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n # access_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))\n\n if view_kwargs.get('discount_code_id'):\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n # discount_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n ticket_ids = []\n for ticket in order.tickets:\n ticket_ids.append(ticket.id)\n query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))\n\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"POST\",\n check=lambda a: a.get('event_id') or a.get('event_identifier')),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'query': query,\n }}\n\n\nclass TicketDetail(ResourceDetail):\n \"\"\"\n Ticket Resource\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n self.schema = TicketSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.ticket_id is not None:\n view_kwargs['id'] = attendee.ticket_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, ticket, data, view_kwargs):\n \"\"\"\n method to check if paid ticket has payment method before updating ticket object\n :param ticket:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if ticket.type == 'paid':\n try:\n event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': ticket.event.id}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object\n }}\n\n\nclass TicketRelationshipRequired(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n\n\nclass TicketRelationshipOptional(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Optional)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/tickets.py"}]}
| 2,904 | 288 |
gh_patches_debug_35583
|
rasdani/github-patches
|
git_diff
|
lightly-ai__lightly-431
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error: number of embeddings does not match number of samples
I got an error while using `lightly-magic` on a larger dataset.
My gut feeling tells me that this could be related to the number of workers or the batch size. Let's debug and fix the issue :)
I used the following command:
```
lightly-magic token='TOKEN' dataset_id='DATASET_ID' input_dir=/datasets/data trainer.max_epochs=40 loader.batch_size=256 loader.num_workers=12
```
The dataset consists of `74202` images.
```
Epoch 39: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████| 289/289 [02:24<00:00, 2.00it/s, loss=4.66, v_num=0]
Best model is stored at: /datasets/lightly_outputs/2021-06-07/07-44-35/lightly_epoch_9.ckpt
########## Starting to embed your dataset.
Compute efficiency: 0.03: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 290/290 [00:31<00:00, 9.25it/s]
Embeddings are stored at /datasets/lightly_outputs/2021-06-07/07-44-35/embeddings.csv
########## Starting to upload your dataset to the Lightly platform.
Uploading images (with 12 workers).
0%|▏ | 99/74202 [00:09<1:31:21, 13.52imgs/s]/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly_utils/image_processing/metadata.py:53: RuntimeWarning: divide by zero encountered in double_scalars
return float(np.where(std == 0., 0, mean / std))
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 74202/74202 [3:47:52<00:00, 4.35imgs/s]Finished the upload of the dataset.
Starting upload of embeddings.
Traceback (most recent call last):
File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py", line 80, in lightly_cli
return _lightly_cli(cfg)
File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py", line 38, in _lightly_cli
_upload_cli(cfg)
File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/upload_cli.py", line 76, in _upload_cli
path_to_embeddings_csv=path_to_embeddings, name=name
File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py", line 71, in upload_embeddings
path_to_embeddings_csv=path_to_embeddings_csv)
File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py", line 104, in _order_csv_by_filenames
raise ValueError(f'There are {len(filenames)} rows in the embedding file, but '
ValueError: There are 74202 rows in the embedding file, but 74208 filenames/samples on the server.
Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 74202/74202 [3:48:50<00:00, 5.40imgs/s]
```
</issue>
<code>
[start of lightly/api/api_workflow_upload_dataset.py]
1 import os
2 import warnings
3 from concurrent.futures.thread import ThreadPoolExecutor
4 from typing import Union
5
6 import lightly_utils.image_processing
7 import tqdm
8
9 from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls
10 from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest
11 from lightly.api.utils import check_filename, PIL_to_bytes
12 from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest
13 from lightly.data.dataset import LightlyDataset
14
15 from lightly.api.utils import retry
16
17 from lightly_utils import image_processing
18
19
20
21 class _UploadDatasetMixin:
22
23 def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8,
24 mode: str = 'thumbnails', verbose: bool = True):
25 """Uploads a dataset to to the Lightly cloud solution.
26
27 Args:
28 input:
29 one of the following:
30 - the path to the dataset, e.g. "path/to/dataset"
31 - the dataset in form of a LightlyDataset
32 max_workers:
33 Maximum number of workers uploading images in parallel.
34 max_requests:
35 Maximum number of requests a single worker can do before he has
36 to wait for the others.
37 mode:
38 One of [full, thumbnails, metadata]. Whether to upload thumbnails,
39 full images, or metadata only.
40
41 Raises:
42 ValueError if dataset is too large or input has the wrong type
43 RuntimeError if the connection to the server failed.
44
45 """
46 no_tags_on_server = len(self._get_all_tags())
47 if no_tags_on_server > 0:
48 warnings.warn(f"Dataset with id {self.dataset_id} has already been completely uploaded to the platform. Skipping upload.")
49 return
50
51 # Check input variable 'input'
52 if isinstance(input, str):
53 dataset = LightlyDataset(input_dir=input)
54 elif isinstance(input, LightlyDataset):
55 dataset = input
56 else:
57 raise ValueError(f"input must either be a LightlyDataset or the path to the dataset as str, "
58 f"but is of type {type(input)}")
59
60 # check the allowed dataset size
61 max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size()
62 max_dataset_size = int(max_dataset_size_str)
63 if len(dataset) > max_dataset_size:
64 msg = f'Your dataset has {len(dataset)} samples which'
65 msg += f' is more than the allowed maximum of {max_dataset_size}'
66 raise ValueError(msg)
67
68 # handle the case where len(dataset) < max_workers
69 max_workers = min(len(dataset), max_workers)
70 max_workers = max(max_workers, 1)
71
72 # upload the samples
73 if verbose:
74 print(f'Uploading images (with {max_workers} workers).', flush=True)
75
76 pbar = tqdm.tqdm(unit='imgs', total=len(dataset))
77 tqdm_lock = tqdm.tqdm.get_lock()
78
79 # calculate the files size more efficiently
80 lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0
81
82 # define lambda function for concurrent upload
83 def lambda_(i):
84 # load image
85 image, label, filename = dataset[i]
86 filepath = dataset.get_filepath_from_filename(filename, image)
87 # try to upload image
88 try:
89 self._upload_single_image(
90 image=image,
91 label=label,
92 filename=filename,
93 filepath=filepath,
94 mode=mode,
95 )
96 success = True
97 except Exception as e:
98 warnings.warn(f"Upload of image {filename} failed with error {e}")
99 success = False
100
101 # update the progress bar
102 tqdm_lock.acquire() # lock
103 pbar.update(1) # update
104 tqdm_lock.release() # unlock
105 # return whether the upload was successful
106 return success
107
108 with ThreadPoolExecutor(max_workers=max_workers) as executor:
109 results = list(executor.map(
110 lambda_, [i for i in range(len(dataset))], chunksize=1))
111
112 if not all(results):
113 msg = 'Warning: Unsuccessful upload(s)! '
114 msg += 'This could cause problems when uploading embeddings.'
115 msg += 'Failed at image: {}'.format(results.index(False))
116 warnings.warn(msg)
117
118 # set image type of data and create initial tag
119 if mode == 'full':
120 img_type = 'full'
121 elif mode == 'thumbnails':
122 img_type = 'thumbnail'
123 else:
124 img_type = 'meta'
125
126 initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP)
127 self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id)
128
129 def _upload_single_image(self, image, label, filename: str, filepath: str, mode):
130 """Uploads a single image to the Lightly platform.
131
132 """
133 self.samples_api: SamplesApi
134
135 # check whether the filename is too long
136 basename = filename
137 if not check_filename(basename):
138 msg = (f'Filename {basename} is longer than the allowed maximum of '
139 'characters and will be skipped.')
140 warnings.warn(msg)
141 return False
142
143 # calculate metadata, and check if corrupted
144 metadata = image_processing.Metadata(image).to_dict()
145 metadata["sizeInBytes"] = os.path.getsize(filepath)
146
147 # try to get exif data
148 try:
149 exifdata = image_processing.Exifdata(image)
150 except Exception:
151 exifdata = None
152
153 # generate thumbnail if necessary
154 thumbname = None
155 if not metadata['is_corrupted'] and mode in ["thumbnails", "full"]:
156 thumbname = '.'.join(basename.split('.')[:-1]) + '_thumb.webp'
157
158 body = SampleCreateRequest(
159 file_name=basename,
160 thumb_name=thumbname,
161 meta_data=metadata,
162 exif=exifdata if exifdata is None else exifdata.to_dict(),
163 )
164 sample_id = retry(
165 self.samples_api.create_sample_by_dataset_id,
166 body=body,
167 dataset_id=self.dataset_id
168 ).id
169
170 if not metadata['is_corrupted'] and mode in ["thumbnails", "full"]:
171
172 def upload_thumbnail(image, signed_url):
173 thumbnail = image_processing.Thumbnail(image)
174 image_to_upload = thumbnail.to_bytes()
175 retry(
176 self.upload_file_with_signed_url,
177 image_to_upload,
178 signed_url
179 )
180 thumbnail.thumbnail.close()
181
182 def upload_full_image(filepath, signed_url):
183 with open(filepath, 'rb') as image_to_upload:
184 retry(
185 self.upload_file_with_signed_url,
186 image_to_upload,
187 signed_url
188 )
189
190 if mode == "thumbnails":
191 thumbnail_url = retry(
192 self.samples_api.get_sample_image_write_url_by_id,
193 dataset_id=self.dataset_id,
194 sample_id=sample_id,
195 is_thumbnail=True
196 )
197 upload_thumbnail(image, thumbnail_url)
198 elif mode == "full":
199 sample_write_urls: SampleWriteUrls = retry(
200 self.samples_api.get_sample_image_write_urls_by_id, dataset_id=self.dataset_id, sample_id=sample_id
201 )
202 upload_thumbnail(image, sample_write_urls.thumb)
203 upload_full_image(filepath, sample_write_urls.full)
204
205
206
207 image.close()
208
209
210
[end of lightly/api/api_workflow_upload_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lightly/api/api_workflow_upload_dataset.py b/lightly/api/api_workflow_upload_dataset.py
--- a/lightly/api/api_workflow_upload_dataset.py
+++ b/lightly/api/api_workflow_upload_dataset.py
@@ -1,12 +1,12 @@
import os
import warnings
from concurrent.futures.thread import ThreadPoolExecutor
-from typing import Union
+from typing import Union, List
import lightly_utils.image_processing
import tqdm
-from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls
+from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls, SampleData
from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest
from lightly.api.utils import check_filename, PIL_to_bytes
from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest
@@ -73,16 +73,29 @@
if verbose:
print(f'Uploading images (with {max_workers} workers).', flush=True)
- pbar = tqdm.tqdm(unit='imgs', total=len(dataset))
- tqdm_lock = tqdm.tqdm.get_lock()
# calculate the files size more efficiently
lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0
+ # get the filenames of the samples already on the server
+ self.samples_api: SamplesApi
+ samples: List[SampleData] = self.samples_api.get_samples_by_dataset_id(dataset_id=self.dataset_id)
+ filenames = [sample.file_name for sample in samples]
+ if len(filenames) > 0:
+ print(f"Found {len(filenames)} images already on the server, they are skipped during the upload.")
+ filenames_set = set(filenames)
+
+ pbar = tqdm.tqdm(unit='imgs', total=len(dataset)-len(filenames))
+ tqdm_lock = tqdm.tqdm.get_lock()
+
# define lambda function for concurrent upload
def lambda_(i):
# load image
image, label, filename = dataset[i]
+ if filename in filenames_set:
+ # the sample was already uploaded
+ return True
+
filepath = dataset.get_filepath_from_filename(filename, image)
# try to upload image
try:
|
{"golden_diff": "diff --git a/lightly/api/api_workflow_upload_dataset.py b/lightly/api/api_workflow_upload_dataset.py\n--- a/lightly/api/api_workflow_upload_dataset.py\n+++ b/lightly/api/api_workflow_upload_dataset.py\n@@ -1,12 +1,12 @@\n import os\n import warnings\n from concurrent.futures.thread import ThreadPoolExecutor\n-from typing import Union\n+from typing import Union, List\n \n import lightly_utils.image_processing\n import tqdm\n \n-from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls\n+from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls, SampleData\n from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest\n from lightly.api.utils import check_filename, PIL_to_bytes\n from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest\n@@ -73,16 +73,29 @@\n if verbose:\n print(f'Uploading images (with {max_workers} workers).', flush=True)\n \n- pbar = tqdm.tqdm(unit='imgs', total=len(dataset))\n- tqdm_lock = tqdm.tqdm.get_lock()\n \n # calculate the files size more efficiently\n lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0\n \n+ # get the filenames of the samples already on the server\n+ self.samples_api: SamplesApi\n+ samples: List[SampleData] = self.samples_api.get_samples_by_dataset_id(dataset_id=self.dataset_id)\n+ filenames = [sample.file_name for sample in samples]\n+ if len(filenames) > 0:\n+ print(f\"Found {len(filenames)} images already on the server, they are skipped during the upload.\")\n+ filenames_set = set(filenames)\n+\n+ pbar = tqdm.tqdm(unit='imgs', total=len(dataset)-len(filenames))\n+ tqdm_lock = tqdm.tqdm.get_lock()\n+\n # define lambda function for concurrent upload\n def lambda_(i):\n # load image\n image, label, filename = dataset[i]\n+ if filename in filenames_set:\n+ # the sample was already uploaded\n+ return True\n+\n filepath = dataset.get_filepath_from_filename(filename, image)\n # try to upload image\n try:\n", "issue": "Error: number of embeddings does not match number of samples\nI got an error while using `lightly-magic` on a larger dataset.\r\nMy gut feeling tells me that this could be related to the number of workers or the batch size. Let's debug and fix the issue :)\r\n\r\nI used the following command:\r\n```\r\nlightly-magic token='TOKEN' dataset_id='DATASET_ID' input_dir=/datasets/data trainer.max_epochs=40 loader.batch_size=256 loader.num_workers=12\r\n```\r\n\r\nThe dataset consists of `74202` images.\r\n\r\n```\r\nEpoch 39: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 289/289 [02:24<00:00, 2.00it/s, loss=4.66, v_num=0]\r\nBest model is stored at: /datasets/lightly_outputs/2021-06-07/07-44-35/lightly_epoch_9.ckpt \r\n########## Starting to embed your dataset.\r\nCompute efficiency: 0.03: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 290/290 [00:31<00:00, 9.25it/s]\r\nEmbeddings are stored at /datasets/lightly_outputs/2021-06-07/07-44-35/embeddings.csv \r\n########## Starting to upload your dataset to the Lightly platform.\r\nUploading images (with 12 workers).\r\n 0%|\u258f | 99/74202 [00:09<1:31:21, 13.52imgs/s]/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly_utils/image_processing/metadata.py:53: RuntimeWarning: divide by zero encountered in double_scalars \r\n return float(np.where(std == 0., 0, mean / std))\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 74202/74202 [3:47:52<00:00, 4.35imgs/s]Finished the upload of the dataset.\r\nStarting upload of embeddings.\r\nTraceback (most recent call last):\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py\", line 80, in lightly_cli \r\n return _lightly_cli(cfg)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py\", line 38, in _lightly_cli \r\n _upload_cli(cfg)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/upload_cli.py\", line 76, in _upload_cli \r\n path_to_embeddings_csv=path_to_embeddings, name=name\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py\", line 71, in upload_embeddings \r\n path_to_embeddings_csv=path_to_embeddings_csv)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py\", line 104, in _order_csv_by_filenames \r\n raise ValueError(f'There are {len(filenames)} rows in the embedding file, but '\r\nValueError: There are 74202 rows in the embedding file, but 74208 filenames/samples on the server. \r\n\r\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 74202/74202 [3:48:50<00:00, 5.40imgs/s]\r\n```\n", "before_files": [{"content": "import os\nimport warnings\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom typing import Union\n\nimport lightly_utils.image_processing\nimport tqdm\n\nfrom lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls\nfrom lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest\nfrom lightly.api.utils import check_filename, PIL_to_bytes\nfrom lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest\nfrom lightly.data.dataset import LightlyDataset\n\nfrom lightly.api.utils import retry\n\nfrom lightly_utils import image_processing\n\n\n\nclass _UploadDatasetMixin:\n\n def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8,\n mode: str = 'thumbnails', verbose: bool = True):\n \"\"\"Uploads a dataset to to the Lightly cloud solution.\n\n Args:\n input:\n one of the following:\n - the path to the dataset, e.g. \"path/to/dataset\"\n - the dataset in form of a LightlyDataset\n max_workers:\n Maximum number of workers uploading images in parallel.\n max_requests:\n Maximum number of requests a single worker can do before he has\n to wait for the others.\n mode:\n One of [full, thumbnails, metadata]. Whether to upload thumbnails,\n full images, or metadata only.\n\n Raises:\n ValueError if dataset is too large or input has the wrong type\n RuntimeError if the connection to the server failed.\n\n \"\"\"\n no_tags_on_server = len(self._get_all_tags())\n if no_tags_on_server > 0:\n warnings.warn(f\"Dataset with id {self.dataset_id} has already been completely uploaded to the platform. Skipping upload.\")\n return\n\n # Check input variable 'input'\n if isinstance(input, str):\n dataset = LightlyDataset(input_dir=input)\n elif isinstance(input, LightlyDataset):\n dataset = input\n else:\n raise ValueError(f\"input must either be a LightlyDataset or the path to the dataset as str, \"\n f\"but is of type {type(input)}\")\n\n # check the allowed dataset size\n max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size()\n max_dataset_size = int(max_dataset_size_str)\n if len(dataset) > max_dataset_size:\n msg = f'Your dataset has {len(dataset)} samples which'\n msg += f' is more than the allowed maximum of {max_dataset_size}'\n raise ValueError(msg)\n\n # handle the case where len(dataset) < max_workers\n max_workers = min(len(dataset), max_workers)\n max_workers = max(max_workers, 1)\n\n # upload the samples\n if verbose:\n print(f'Uploading images (with {max_workers} workers).', flush=True)\n\n pbar = tqdm.tqdm(unit='imgs', total=len(dataset))\n tqdm_lock = tqdm.tqdm.get_lock()\n\n # calculate the files size more efficiently\n lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0\n\n # define lambda function for concurrent upload\n def lambda_(i):\n # load image\n image, label, filename = dataset[i]\n filepath = dataset.get_filepath_from_filename(filename, image)\n # try to upload image\n try:\n self._upload_single_image(\n image=image,\n label=label,\n filename=filename,\n filepath=filepath,\n mode=mode,\n )\n success = True\n except Exception as e:\n warnings.warn(f\"Upload of image {filename} failed with error {e}\")\n success = False\n\n # update the progress bar\n tqdm_lock.acquire() # lock\n pbar.update(1) # update\n tqdm_lock.release() # unlock\n # return whether the upload was successful\n return success\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n results = list(executor.map(\n lambda_, [i for i in range(len(dataset))], chunksize=1))\n\n if not all(results):\n msg = 'Warning: Unsuccessful upload(s)! '\n msg += 'This could cause problems when uploading embeddings.'\n msg += 'Failed at image: {}'.format(results.index(False))\n warnings.warn(msg)\n\n # set image type of data and create initial tag\n if mode == 'full':\n img_type = 'full'\n elif mode == 'thumbnails':\n img_type = 'thumbnail'\n else:\n img_type = 'meta'\n\n initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP)\n self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id)\n\n def _upload_single_image(self, image, label, filename: str, filepath: str, mode):\n \"\"\"Uploads a single image to the Lightly platform.\n\n \"\"\"\n self.samples_api: SamplesApi\n\n # check whether the filename is too long\n basename = filename\n if not check_filename(basename):\n msg = (f'Filename {basename} is longer than the allowed maximum of '\n 'characters and will be skipped.')\n warnings.warn(msg)\n return False\n\n # calculate metadata, and check if corrupted\n metadata = image_processing.Metadata(image).to_dict()\n metadata[\"sizeInBytes\"] = os.path.getsize(filepath)\n\n # try to get exif data\n try:\n exifdata = image_processing.Exifdata(image)\n except Exception:\n exifdata = None\n\n # generate thumbnail if necessary\n thumbname = None\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n thumbname = '.'.join(basename.split('.')[:-1]) + '_thumb.webp'\n\n body = SampleCreateRequest(\n file_name=basename,\n thumb_name=thumbname,\n meta_data=metadata,\n exif=exifdata if exifdata is None else exifdata.to_dict(),\n )\n sample_id = retry(\n self.samples_api.create_sample_by_dataset_id,\n body=body,\n dataset_id=self.dataset_id\n ).id\n\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n\n def upload_thumbnail(image, signed_url):\n thumbnail = image_processing.Thumbnail(image)\n image_to_upload = thumbnail.to_bytes()\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n thumbnail.thumbnail.close()\n\n def upload_full_image(filepath, signed_url):\n with open(filepath, 'rb') as image_to_upload:\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n\n if mode == \"thumbnails\":\n thumbnail_url = retry(\n self.samples_api.get_sample_image_write_url_by_id,\n dataset_id=self.dataset_id,\n sample_id=sample_id,\n is_thumbnail=True\n )\n upload_thumbnail(image, thumbnail_url)\n elif mode == \"full\":\n sample_write_urls: SampleWriteUrls = retry(\n self.samples_api.get_sample_image_write_urls_by_id, dataset_id=self.dataset_id, sample_id=sample_id\n )\n upload_thumbnail(image, sample_write_urls.thumb)\n upload_full_image(filepath, sample_write_urls.full)\n\n\n\n image.close()\n\n\n", "path": "lightly/api/api_workflow_upload_dataset.py"}]}
| 3,571 | 483 |
gh_patches_debug_1543
|
rasdani/github-patches
|
git_diff
|
ansible__molecule-659
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
If raw_env_vars contains a var whose value is not a string, molecule silently fails to execute Ansible
# Issue Type
- Bug report
# Molecule and Ansible details
```
ansible --version
2.2.0.0
molecule --version
1.14.1.dev37
```
Sha256 of commit is 8eed3f539f0b30b04d98f8ab418a2a5d55cce01f, molecule --version for source checkout seems to be stuck on 1.14
- Molecule installation method: source
# Desired Behaviour
There is no possible key value pair in the ``ansible: {raw_env_vars: {}}`` section that prevents Ansible from getting executed
# Actual Behaviour (Bug report only)
Except from molecule.yml:
```
raw_env_vars:
RETRY_FILES_ENABLED: 0
```
```
molecule --debug syntax; echo $?
--> Checking playbook's syntax...
DEBUG: COMMAND
/usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff
255
```
```
$ /usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff; echo $?
Using $ROLE_DIR/ansible.cfg as config file
Set default localhost to localhost
3 plays in tests/test.yml
playbook: tests/test.yml
0
```
The same happens with ``molecule converge``.
This is especially bad since that one is really hard to debug, because there is no error message at all.
I am not even sure if this is the fault of molecule, because [the doc to sh.pys `_env` arg only mentions it must be a dict, not that all values must be string types](https://amoffat.github.io/sh/sections/special_arguments.html?highlight=_env#env).
</issue>
<code>
[start of molecule/ansible_playbook.py]
1 # Copyright (c) 2015-2016 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 import os
22 import sh
23
24 from molecule import util
25
26
27 class AnsiblePlaybook(object):
28 def __init__(self,
29 args,
30 connection_params,
31 raw_ansible_args=None,
32 _env=None,
33 _out=util.callback_info,
34 _err=util.callback_error,
35 debug=False):
36 """
37 Sets up requirements for ansible-playbook and returns None.
38
39 :param args: A dict containing arguments to pass to ansible-playbook.
40 :param connection_params: A dict containing driver specific connection
41 params to pass to ansible-playbook.
42 :param _env: An optional environment to pass to underlying :func:`sh`
43 call.
44 :param _out: An optional function to process STDOUT for underlying
45 :func:`sh` call.
46 :param _err: An optional function to process STDERR for underlying
47 :func:`sh` call.
48 :param debug: An optional bool to toggle debug output.
49 :return: None
50 """
51 self._playbook = None
52 self._ansible = None
53 self._cli = {}
54 self._cli_pos = []
55 self._raw_ansible_args = raw_ansible_args
56 self._env = _env if _env else os.environ.copy()
57 self._debug = debug
58
59 for k, v in args.iteritems():
60 self.parse_arg(k, v)
61
62 for k, v in connection_params.items():
63 self.add_cli_arg(k, v)
64
65 self.add_env_arg('PYTHONUNBUFFERED', '1')
66 self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')
67
68 self.add_cli_arg('_out', _out)
69 self.add_cli_arg('_err', _err)
70
71 @property
72 def env(self):
73 return self._env
74
75 def bake(self):
76 """
77 Bake ansible-playbook command so it's ready to execute and returns
78 None.
79
80 :return: None
81 """
82 self._ansible = sh.ansible_playbook.bake(
83 self._playbook, *self._cli_pos, _env=self._env, **self._cli)
84 if self._raw_ansible_args:
85 self._ansible = self._ansible.bake(self._raw_ansible_args)
86
87 def parse_arg(self, name, value):
88 """
89 Adds argument to CLI or environment and returns None.
90
91 :param name: A string containing the name of argument to be added.
92 :param value: The value of argument to be added.
93 :return: None
94 """
95
96 if name == 'raw_env_vars':
97 for k, v in value.iteritems():
98 self.add_env_arg(k, v)
99 return
100
101 if name == 'host_key_checking':
102 self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())
103 return
104
105 if name == 'raw_ssh_args':
106 self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))
107 return
108
109 if name == 'config_file':
110 self.add_env_arg('ANSIBLE_CONFIG', value)
111 return
112
113 if name == 'playbook':
114 self._playbook = value
115 return
116
117 if name == 'host_vars' or name == 'group_vars':
118 return
119
120 # verbose is weird, must be -vvvv not verbose=vvvv
121 if name == 'verbose' and value:
122 # for cases where someone passes in verbose: True
123 if value is True:
124 value = 'vvvv'
125 self._cli_pos.append('-' + value)
126 return
127
128 self.add_cli_arg(name, value)
129
130 def add_cli_arg(self, name, value):
131 """
132 Adds argument to CLI passed to ansible-playbook and returns None.
133
134 :param name: A string containing the name of argument to be added.
135 :param value: The value of argument to be added.
136 :return: None
137 """
138 if value:
139 self._cli[name] = value
140
141 def remove_cli_arg(self, name):
142 """
143 Removes CLI argument and returns None.
144
145 :param name: A string containing the name of argument to be removed.
146 :return: None
147 """
148 self._cli.pop(name, None)
149
150 def add_env_arg(self, name, value):
151 """
152 Adds argument to environment passed to ansible-playbook and returns
153 None.
154
155 :param name: A string containing the name of argument to be added.
156 :param value: The value of argument to be added.
157 :return: None
158 """
159 self._env[name] = value
160
161 def remove_env_arg(self, name):
162 """
163 Removes environment argument and returns None.
164
165 :param name: A string containing the name of argument to be removed.
166 :return: None
167 """
168 self._env.pop(name, None)
169
170 def execute(self, hide_errors=False):
171 """
172 Executes ansible-playbook and returns command's stdout.
173
174 :param hide_errors: An optional bool to toggle output of errors.
175 :return: The command's output, otherwise sys.exit on command failure.
176 """
177 if self._ansible is None:
178 self.bake()
179
180 try:
181 return None, util.run_command(
182 self._ansible, debug=self._debug).stdout
183 except sh.ErrorReturnCode as e:
184 if not hide_errors:
185 util.print_error(str(e))
186
187 return e.exit_code, None
188
[end of molecule/ansible_playbook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py
--- a/molecule/ansible_playbook.py
+++ b/molecule/ansible_playbook.py
@@ -95,6 +95,8 @@
if name == 'raw_env_vars':
for k, v in value.iteritems():
+ if not isinstance(v, basestring):
+ v = unicode(v)
self.add_env_arg(k, v)
return
|
{"golden_diff": "diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py\n--- a/molecule/ansible_playbook.py\n+++ b/molecule/ansible_playbook.py\n@@ -95,6 +95,8 @@\n \n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n+ if not isinstance(v, basestring):\n+ v = unicode(v)\n self.add_env_arg(k, v)\n return\n", "issue": "If raw_env_vars contains a var whose value is not a string, molecule silently fails to execute Ansible\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version\r\n2.2.0.0\r\n\r\nmolecule --version\r\n\r\n1.14.1.dev37\r\n```\r\n\r\nSha256 of commit is 8eed3f539f0b30b04d98f8ab418a2a5d55cce01f, molecule --version for source checkout seems to be stuck on 1.14\r\n\r\n- Molecule installation method: source\r\n\r\n# Desired Behaviour\r\n\r\nThere is no possible key value pair in the ``ansible: {raw_env_vars: {}}`` section that prevents Ansible from getting executed\r\n\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nExcept from molecule.yml:\r\n\r\n```\r\n raw_env_vars:\r\n RETRY_FILES_ENABLED: 0\r\n```\r\n\r\n```\r\nmolecule --debug syntax; echo $?\r\n--> Checking playbook's syntax...\r\nDEBUG: COMMAND\r\n/usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff\r\n255\r\n```\r\n\r\n```\r\n$ /usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff; echo $?\r\nUsing $ROLE_DIR/ansible.cfg as config file\r\nSet default localhost to localhost\r\n3 plays in tests/test.yml\r\n\r\nplaybook: tests/test.yml\r\n0\r\n```\r\nThe same happens with ``molecule converge``.\r\n\r\nThis is especially bad since that one is really hard to debug, because there is no error message at all.\r\n\r\nI am not even sure if this is the fault of molecule, because [the doc to sh.pys `_env` arg only mentions it must be a dict, not that all values must be string types](https://amoffat.github.io/sh/sections/special_arguments.html?highlight=_env#env).\n", "before_files": [{"content": "# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport sh\n\nfrom molecule import util\n\n\nclass AnsiblePlaybook(object):\n def __init__(self,\n args,\n connection_params,\n raw_ansible_args=None,\n _env=None,\n _out=util.callback_info,\n _err=util.callback_error,\n debug=False):\n \"\"\"\n Sets up requirements for ansible-playbook and returns None.\n\n :param args: A dict containing arguments to pass to ansible-playbook.\n :param connection_params: A dict containing driver specific connection\n params to pass to ansible-playbook.\n :param _env: An optional environment to pass to underlying :func:`sh`\n call.\n :param _out: An optional function to process STDOUT for underlying\n :func:`sh` call.\n :param _err: An optional function to process STDERR for underlying\n :func:`sh` call.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n self._playbook = None\n self._ansible = None\n self._cli = {}\n self._cli_pos = []\n self._raw_ansible_args = raw_ansible_args\n self._env = _env if _env else os.environ.copy()\n self._debug = debug\n\n for k, v in args.iteritems():\n self.parse_arg(k, v)\n\n for k, v in connection_params.items():\n self.add_cli_arg(k, v)\n\n self.add_env_arg('PYTHONUNBUFFERED', '1')\n self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')\n\n self.add_cli_arg('_out', _out)\n self.add_cli_arg('_err', _err)\n\n @property\n def env(self):\n return self._env\n\n def bake(self):\n \"\"\"\n Bake ansible-playbook command so it's ready to execute and returns\n None.\n\n :return: None\n \"\"\"\n self._ansible = sh.ansible_playbook.bake(\n self._playbook, *self._cli_pos, _env=self._env, **self._cli)\n if self._raw_ansible_args:\n self._ansible = self._ansible.bake(self._raw_ansible_args)\n\n def parse_arg(self, name, value):\n \"\"\"\n Adds argument to CLI or environment and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n\n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n self.add_env_arg(k, v)\n return\n\n if name == 'host_key_checking':\n self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())\n return\n\n if name == 'raw_ssh_args':\n self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))\n return\n\n if name == 'config_file':\n self.add_env_arg('ANSIBLE_CONFIG', value)\n return\n\n if name == 'playbook':\n self._playbook = value\n return\n\n if name == 'host_vars' or name == 'group_vars':\n return\n\n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n # for cases where someone passes in verbose: True\n if value is True:\n value = 'vvvv'\n self._cli_pos.append('-' + value)\n return\n\n self.add_cli_arg(name, value)\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Adds argument to CLI passed to ansible-playbook and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n if value:\n self._cli[name] = value\n\n def remove_cli_arg(self, name):\n \"\"\"\n Removes CLI argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._cli.pop(name, None)\n\n def add_env_arg(self, name, value):\n \"\"\"\n Adds argument to environment passed to ansible-playbook and returns\n None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n self._env[name] = value\n\n def remove_env_arg(self, name):\n \"\"\"\n Removes environment argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._env.pop(name, None)\n\n def execute(self, hide_errors=False):\n \"\"\"\n Executes ansible-playbook and returns command's stdout.\n\n :param hide_errors: An optional bool to toggle output of errors.\n :return: The command's output, otherwise sys.exit on command failure.\n \"\"\"\n if self._ansible is None:\n self.bake()\n\n try:\n return None, util.run_command(\n self._ansible, debug=self._debug).stdout\n except sh.ErrorReturnCode as e:\n if not hide_errors:\n util.print_error(str(e))\n\n return e.exit_code, None\n", "path": "molecule/ansible_playbook.py"}]}
| 2,837 | 100 |
gh_patches_debug_6408
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py]
1 #!/usr/bin/env python
2 # -*- encoding: utf-8 -*-
3
4 """
5 Initialize new tokenizer for continual pre-training
6 """
7
8 import argparse
9 import os
10 import json
11 from typing import List, Union
12
13 from transformers.models.llama.tokenization_llama import LlamaTokenizer
14 from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
15
16 from colossalai.logging import get_dist_logger
17
18 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
19
20 logger = get_dist_logger()
21
22
23 def expand_vocab_tokenizer(
24 source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]
25 ) -> None:
26 """Expand tokenizer for continue pre-training."""
27 if os.path.exists(target_tokenizer_dir):
28 raise RuntimeError(f"Find existed directory {target_tokenizer_dir}")
29
30 source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)
31 logger.info(source_tokenizer)
32 source_sp_processor = source_tokenizer.sp_model
33 source_spm = sp_pb2_model.ModelProto()
34 source_spm.ParseFromString(source_sp_processor.serialized_model_proto())
35
36 logger.info(f"Source tokenizer size: {len(source_sp_processor)}")
37
38 # Add new tokens to source tokenizer.
39 source_spm_tokens = set([p.piece for p in source_spm.pieces])
40 for piece in new_tokens:
41 assert isinstance(piece, str), f"Invalid token({piece}) type {type(piece)}"
42 if piece in source_spm_tokens:
43 # Skip existed token.
44 continue
45 new_p = sp_pb2_model.ModelProto().SentencePiece()
46 new_p.piece = piece
47 new_p.score = 0
48 source_spm.pieces.append(new_p)
49 logger.info(f"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}")
50
51 # Save
52 os.makedirs(target_tokenizer_dir)
53 target_tokenizer_model_path = os.path.join(target_tokenizer_dir, "tokenizer.model")
54 with open(file=target_tokenizer_model_path, mode="wb") as fp:
55 fp.write(source_spm.SerializeToString())
56
57 target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)
58 target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)
59 logger.info(f"Successfully save expand tokenizer to {target_tokenizer_dir}")
60
61
62 def main():
63 parser = argparse.ArgumentParser()
64 parser.add_argument(
65 "--source_tokenizer_dir", type=str, required=True, default=None, help="Source tokenizer directory"
66 )
67 parser.add_argument(
68 "--target_tokenizer_dir", type=str, required=True, default=None, help="Target tokenizer directory"
69 )
70 parser.add_argument(
71 "--expand_tokens_file",
72 type=str,
73 required=True,
74 default=None,
75 help="Path of the file containing tokens to be extended",
76 )
77 args = parser.parse_args()
78
79 expand_tokens = []
80 with open(file=args.expand_tokens_file, mode="r", encoding="utf-8") as fp_reader:
81 for line in fp_reader:
82 item = json.loads(line)
83 # e.g., {"piece": "你好"}
84 token = item["piece"]
85 if token in expand_tokens:
86 continue
87 expand_tokens.append(token)
88 expand_tokens.sort(key=lambda t: len(t), reverse=False)
89
90 expand_vocab_tokenizer(
91 source_tokenizer_dir=args.source_tokenizer_dir,
92 target_tokenizer_dir=args.target_tokenizer_dir,
93 new_tokens=expand_tokens,
94 )
95
96
97 if __name__ == "__main__":
98 main()
99
[end of applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
--- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
+++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py
@@ -6,12 +6,12 @@
"""
import argparse
-import os
import json
+import os
from typing import List, Union
-from transformers.models.llama.tokenization_llama import LlamaTokenizer
from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model
+from transformers.models.llama.tokenization_llama import LlamaTokenizer
from colossalai.logging import get_dist_logger
|
{"golden_diff": "diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n--- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n+++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n@@ -6,12 +6,12 @@\n \"\"\"\n \n import argparse\n-import os\n import json\n+import os\n from typing import List, Union\n \n-from transformers.models.llama.tokenization_llama import LlamaTokenizer\n from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n+from transformers.models.llama.tokenization_llama import LlamaTokenizer\n \n from colossalai.logging import get_dist_logger\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\nInitialize new tokenizer for continual pre-training\n\"\"\"\n\nimport argparse\nimport os\nimport json\nfrom typing import List, Union\n\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\nfrom sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n\nfrom colossalai.logging import get_dist_logger\n\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nlogger = get_dist_logger()\n\n\ndef expand_vocab_tokenizer(\n source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]\n) -> None:\n \"\"\"Expand tokenizer for continue pre-training.\"\"\"\n if os.path.exists(target_tokenizer_dir):\n raise RuntimeError(f\"Find existed directory {target_tokenizer_dir}\")\n\n source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)\n logger.info(source_tokenizer)\n source_sp_processor = source_tokenizer.sp_model\n source_spm = sp_pb2_model.ModelProto()\n source_spm.ParseFromString(source_sp_processor.serialized_model_proto())\n\n logger.info(f\"Source tokenizer size: {len(source_sp_processor)}\")\n\n # Add new tokens to source tokenizer.\n source_spm_tokens = set([p.piece for p in source_spm.pieces])\n for piece in new_tokens:\n assert isinstance(piece, str), f\"Invalid token({piece}) type {type(piece)}\"\n if piece in source_spm_tokens:\n # Skip existed token.\n continue\n new_p = sp_pb2_model.ModelProto().SentencePiece()\n new_p.piece = piece\n new_p.score = 0\n source_spm.pieces.append(new_p)\n logger.info(f\"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}\")\n\n # Save\n os.makedirs(target_tokenizer_dir)\n target_tokenizer_model_path = os.path.join(target_tokenizer_dir, \"tokenizer.model\")\n with open(file=target_tokenizer_model_path, mode=\"wb\") as fp:\n fp.write(source_spm.SerializeToString())\n\n target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)\n target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)\n logger.info(f\"Successfully save expand tokenizer to {target_tokenizer_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--source_tokenizer_dir\", type=str, required=True, default=None, help=\"Source tokenizer directory\"\n )\n parser.add_argument(\n \"--target_tokenizer_dir\", type=str, required=True, default=None, help=\"Target tokenizer directory\"\n )\n parser.add_argument(\n \"--expand_tokens_file\",\n type=str,\n required=True,\n default=None,\n help=\"Path of the file containing tokens to be extended\",\n )\n args = parser.parse_args()\n\n expand_tokens = []\n with open(file=args.expand_tokens_file, mode=\"r\", encoding=\"utf-8\") as fp_reader:\n for line in fp_reader:\n item = json.loads(line)\n # e.g., {\"piece\": \"\u4f60\u597d\"}\n token = item[\"piece\"]\n if token in expand_tokens:\n continue\n expand_tokens.append(token)\n expand_tokens.sort(key=lambda t: len(t), reverse=False)\n\n expand_vocab_tokenizer(\n source_tokenizer_dir=args.source_tokenizer_dir,\n target_tokenizer_dir=args.target_tokenizer_dir,\n new_tokens=expand_tokens,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py"}]}
| 1,542 | 191 |
gh_patches_debug_16830
|
rasdani/github-patches
|
git_diff
|
voicepaw__so-vits-svc-fork-45
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prepare model for inference
**Is your feature request related to a problem? Please describe.**
The first time inference occurs takes the longest, where as the next inferences afterwards are faster. Since normally the first time is in the sounddevice callback, it's likely that audio will not be processed in time and will end up delayed.
**Describe the solution you'd like**
After loading the model, run an initial inference with some dummy data, perhaps torch.zeros of appropriate sizes.
**Additional context**
On my computer with a RTX 3050, the first time inference takes about 3 seconds to complete. Otherwise I get a Realtime coef of ~28
</issue>
<code>
[start of src/so_vits_svc_fork/inference_main.py]
1 from __future__ import annotations
2
3 from logging import getLogger
4 from pathlib import Path
5 from typing import Literal
6
7 import librosa
8 import numpy as np
9 import soundfile
10 import torch
11
12 from .inference.infer_tool import RealtimeVC, RealtimeVC2, Svc
13
14 LOG = getLogger(__name__)
15
16
17 def infer(
18 *,
19 # paths
20 input_path: Path | str,
21 output_path: Path | str,
22 model_path: Path | str,
23 config_path: Path | str,
24 # svc config
25 speaker: int | str,
26 cluster_model_path: Path | str | None = None,
27 transpose: int = 0,
28 auto_predict_f0: bool = False,
29 cluster_infer_ratio: float = 0,
30 noise_scale: float = 0.4,
31 f0_method: Literal["crepe", "parselmouth", "dio", "harvest"] = "crepe",
32 # slice config
33 db_thresh: int = -40,
34 pad_seconds: float = 0.5,
35 chunk_seconds: float = 0.5,
36 absolute_thresh: bool = False,
37 device: Literal["cpu", "cuda"] = "cuda" if torch.cuda.is_available() else "cpu",
38 ):
39 model_path = Path(model_path)
40 output_path = Path(output_path)
41 input_path = Path(input_path)
42 config_path = Path(config_path)
43 cluster_model_path = Path(cluster_model_path) if cluster_model_path else None
44 svc_model = Svc(
45 net_g_path=model_path.as_posix(),
46 config_path=config_path.as_posix(),
47 cluster_model_path=cluster_model_path.as_posix()
48 if cluster_model_path
49 else None,
50 device=device,
51 )
52
53 audio, _ = librosa.load(input_path, sr=svc_model.target_sample)
54 audio = svc_model.infer_silence(
55 audio.astype(np.float32),
56 speaker=speaker,
57 transpose=transpose,
58 auto_predict_f0=auto_predict_f0,
59 cluster_infer_ratio=cluster_infer_ratio,
60 noise_scale=noise_scale,
61 f0_method=f0_method,
62 db_thresh=db_thresh,
63 pad_seconds=pad_seconds,
64 chunk_seconds=chunk_seconds,
65 absolute_thresh=absolute_thresh,
66 )
67
68 soundfile.write(output_path, audio, svc_model.target_sample)
69
70
71 def realtime(
72 *,
73 # paths
74 model_path: Path | str,
75 config_path: Path | str,
76 # svc config
77 speaker: str,
78 cluster_model_path: Path | str | None = None,
79 transpose: int = 0,
80 auto_predict_f0: bool = False,
81 cluster_infer_ratio: float = 0,
82 noise_scale: float = 0.4,
83 f0_method: Literal["crepe", "parselmouth", "dio", "harvest"] = "crepe",
84 # slice config
85 db_thresh: int = -40,
86 pad_seconds: float = 0.5,
87 chunk_seconds: float = 0.5,
88 # realtime config
89 crossfade_seconds: float = 0.05,
90 block_seconds: float = 0.5,
91 version: int = 2,
92 input_device: int | str | None = None,
93 output_device: int | str | None = None,
94 device: Literal["cpu", "cuda"] = "cuda" if torch.cuda.is_available() else "cpu",
95 ):
96 import sounddevice as sd
97
98 model_path = Path(model_path)
99 config_path = Path(config_path)
100 cluster_model_path = Path(cluster_model_path) if cluster_model_path else None
101 svc_model = Svc(
102 net_g_path=model_path.as_posix(),
103 config_path=config_path.as_posix(),
104 cluster_model_path=cluster_model_path.as_posix()
105 if cluster_model_path
106 else None,
107 device=device,
108 )
109 if version == 1:
110 model = RealtimeVC(
111 svc_model=svc_model,
112 crossfade_len=int(crossfade_seconds * svc_model.target_sample),
113 )
114 else:
115 model = RealtimeVC2(
116 svc_model=svc_model,
117 )
118
119 # LOG all device info
120 devices = sd.query_devices()
121 LOG.info(f"Device: {devices}")
122 if isinstance(input_device, str):
123 input_device_candidates = [
124 i for i, d in enumerate(devices) if d["name"] == input_device
125 ]
126 if len(input_device_candidates) == 0:
127 LOG.warning(f"Input device {input_device} not found, using default")
128 input_device = None
129 else:
130 input_device = input_device_candidates[0]
131 if isinstance(output_device, str):
132 output_device_candidates = [
133 i for i, d in enumerate(devices) if d["name"] == output_device
134 ]
135 if len(output_device_candidates) == 0:
136 LOG.warning(f"Output device {output_device} not found, using default")
137 output_device = None
138 else:
139 output_device = output_device_candidates[0]
140 if input_device is None or input_device >= len(devices):
141 input_device = sd.default.device[0]
142 if output_device is None or output_device >= len(devices):
143 output_device = sd.default.device[1]
144 LOG.info(
145 f"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}"
146 )
147
148 def callback(
149 indata: np.ndarray,
150 outdata: np.ndarray,
151 frames: int,
152 time: int,
153 status: sd.CallbackFlags,
154 ) -> None:
155 LOG.debug(
156 f"Frames: {frames}, Status: {status}, Shape: {indata.shape}, Time: {time}"
157 )
158
159 kwargs = dict(
160 input_audio=indata.mean(axis=1).astype(np.float32),
161 # svc config
162 speaker=speaker,
163 transpose=transpose,
164 auto_predict_f0=auto_predict_f0,
165 cluster_infer_ratio=cluster_infer_ratio,
166 noise_scale=noise_scale,
167 f0_method=f0_method,
168 # slice config
169 db_thresh=db_thresh,
170 # pad_seconds=pad_seconds,
171 chunk_seconds=chunk_seconds,
172 )
173 if version == 1:
174 kwargs["pad_seconds"] = pad_seconds
175 outdata[:] = model.process(
176 **kwargs,
177 ).reshape(-1, 1)
178
179 with sd.Stream(
180 device=(input_device, output_device),
181 channels=1,
182 callback=callback,
183 samplerate=svc_model.target_sample,
184 blocksize=int(block_seconds * svc_model.target_sample),
185 ):
186 while True:
187 sd.sleep(1)
188
[end of src/so_vits_svc_fork/inference_main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/so_vits_svc_fork/inference_main.py b/src/so_vits_svc_fork/inference_main.py
--- a/src/so_vits_svc_fork/inference_main.py
+++ b/src/so_vits_svc_fork/inference_main.py
@@ -145,6 +145,21 @@
f"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}"
)
+ # the model realtime coef is somewhat significantly low only in the first inference
+ # there could be no better way to warm up the model than to do a dummy inference
+ # (there are not differences in the behavior of the model between the first and the later inferences)
+ # so we do a dummy inference to warm up the model (1 second of audio)
+ LOG.info("Warming up the model...")
+ svc_model.infer(
+ speaker=speaker,
+ transpose=transpose,
+ auto_predict_f0=auto_predict_f0,
+ cluster_infer_ratio=cluster_infer_ratio,
+ noise_scale=noise_scale,
+ f0_method=f0_method,
+ audio=np.zeros(svc_model.target_sample, dtype=np.float32),
+ )
+
def callback(
indata: np.ndarray,
outdata: np.ndarray,
|
{"golden_diff": "diff --git a/src/so_vits_svc_fork/inference_main.py b/src/so_vits_svc_fork/inference_main.py\n--- a/src/so_vits_svc_fork/inference_main.py\n+++ b/src/so_vits_svc_fork/inference_main.py\n@@ -145,6 +145,21 @@\n f\"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}\"\n )\n \n+ # the model realtime coef is somewhat significantly low only in the first inference\n+ # there could be no better way to warm up the model than to do a dummy inference\n+ # (there are not differences in the behavior of the model between the first and the later inferences)\n+ # so we do a dummy inference to warm up the model (1 second of audio)\n+ LOG.info(\"Warming up the model...\")\n+ svc_model.infer(\n+ speaker=speaker,\n+ transpose=transpose,\n+ auto_predict_f0=auto_predict_f0,\n+ cluster_infer_ratio=cluster_infer_ratio,\n+ noise_scale=noise_scale,\n+ f0_method=f0_method,\n+ audio=np.zeros(svc_model.target_sample, dtype=np.float32),\n+ )\n+\n def callback(\n indata: np.ndarray,\n outdata: np.ndarray,\n", "issue": "Prepare model for inference\n**Is your feature request related to a problem? Please describe.**\r\nThe first time inference occurs takes the longest, where as the next inferences afterwards are faster. Since normally the first time is in the sounddevice callback, it's likely that audio will not be processed in time and will end up delayed.\r\n\r\n**Describe the solution you'd like**\r\nAfter loading the model, run an initial inference with some dummy data, perhaps torch.zeros of appropriate sizes.\r\n\r\n**Additional context**\r\nOn my computer with a RTX 3050, the first time inference takes about 3 seconds to complete. Otherwise I get a Realtime coef of ~28\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Literal\n\nimport librosa\nimport numpy as np\nimport soundfile\nimport torch\n\nfrom .inference.infer_tool import RealtimeVC, RealtimeVC2, Svc\n\nLOG = getLogger(__name__)\n\n\ndef infer(\n *,\n # paths\n input_path: Path | str,\n output_path: Path | str,\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: int | str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n absolute_thresh: bool = False,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n model_path = Path(model_path)\n output_path = Path(output_path)\n input_path = Path(input_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n\n audio, _ = librosa.load(input_path, sr=svc_model.target_sample)\n audio = svc_model.infer_silence(\n audio.astype(np.float32),\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n db_thresh=db_thresh,\n pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n absolute_thresh=absolute_thresh,\n )\n\n soundfile.write(output_path, audio, svc_model.target_sample)\n\n\ndef realtime(\n *,\n # paths\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n # realtime config\n crossfade_seconds: float = 0.05,\n block_seconds: float = 0.5,\n version: int = 2,\n input_device: int | str | None = None,\n output_device: int | str | None = None,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n import sounddevice as sd\n\n model_path = Path(model_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n if version == 1:\n model = RealtimeVC(\n svc_model=svc_model,\n crossfade_len=int(crossfade_seconds * svc_model.target_sample),\n )\n else:\n model = RealtimeVC2(\n svc_model=svc_model,\n )\n\n # LOG all device info\n devices = sd.query_devices()\n LOG.info(f\"Device: {devices}\")\n if isinstance(input_device, str):\n input_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == input_device\n ]\n if len(input_device_candidates) == 0:\n LOG.warning(f\"Input device {input_device} not found, using default\")\n input_device = None\n else:\n input_device = input_device_candidates[0]\n if isinstance(output_device, str):\n output_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == output_device\n ]\n if len(output_device_candidates) == 0:\n LOG.warning(f\"Output device {output_device} not found, using default\")\n output_device = None\n else:\n output_device = output_device_candidates[0]\n if input_device is None or input_device >= len(devices):\n input_device = sd.default.device[0]\n if output_device is None or output_device >= len(devices):\n output_device = sd.default.device[1]\n LOG.info(\n f\"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}\"\n )\n\n def callback(\n indata: np.ndarray,\n outdata: np.ndarray,\n frames: int,\n time: int,\n status: sd.CallbackFlags,\n ) -> None:\n LOG.debug(\n f\"Frames: {frames}, Status: {status}, Shape: {indata.shape}, Time: {time}\"\n )\n\n kwargs = dict(\n input_audio=indata.mean(axis=1).astype(np.float32),\n # svc config\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n # slice config\n db_thresh=db_thresh,\n # pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n )\n if version == 1:\n kwargs[\"pad_seconds\"] = pad_seconds\n outdata[:] = model.process(\n **kwargs,\n ).reshape(-1, 1)\n\n with sd.Stream(\n device=(input_device, output_device),\n channels=1,\n callback=callback,\n samplerate=svc_model.target_sample,\n blocksize=int(block_seconds * svc_model.target_sample),\n ):\n while True:\n sd.sleep(1)\n", "path": "src/so_vits_svc_fork/inference_main.py"}]}
| 2,583 | 295 |
gh_patches_debug_7274
|
rasdani/github-patches
|
git_diff
|
cupy__cupy-186
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make cupy.sort support arrays with rank two or more.
# Background
Arrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55.
# Problem
This issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness.
# Approach
**Rank two**
For an array with the rank of two,
```
[[4, 3]
[2, 1]]
```
treating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library:
```
if floor(i / 2) < floor(j / 2) then return true;
else if floor(i / 2) > floor(j / 2) then return false;
else return data[i] < data[j];
```
where `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`,
we get the C-contiguous array sorted with the last axis.
```
[[3, 4]
[1, 2]]
```
**Rank N**
Generalized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works:
```
if floor(i / d_n-1) < floor(j / d_n-1) then return true;
else if floor(i / d_n-1) > floor(j / d_n-1) then return false;
else return data[i] < data[j];
```
</issue>
<code>
[start of cupy/sorting/sort.py]
1 import cupy
2 import numpy
3
4 if cupy.cuda.thrust_enabled:
5 from cupy.cuda import thrust
6
7
8 def sort(a):
9 """Returns a sorted copy of an array with a stable sorting algorithm.
10
11 Args:
12 a (cupy.ndarray): Array to be sorted.
13
14 Returns:
15 cupy.ndarray: Array of the same type and shape as ``a``.
16
17 .. note::
18 For its implementation reason, ``cupy.sort`` currently supports only
19 arrays with their rank of one and does not support ``axis``, ``kind``
20 and ``order`` parameters that ``numpy.sort`` does support.
21
22 .. seealso:: :func:`numpy.sort`
23
24 """
25 ret = a.copy()
26 ret.sort()
27 return ret
28
29
30 def lexsort(keys):
31 """Perform an indirect sort using an array of keys.
32
33 Args:
34 keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped
35 arrays. The ``k`` different "rows" to be sorted. The last row is
36 the primary sort key.
37
38 Returns:
39 cupy.ndarray: Array of indices that sort the keys.
40
41 .. note::
42 For its implementation reason, ``cupy.lexsort`` currently supports only
43 keys with their rank of one or two and does not support ``axis``
44 parameter that ``numpy.lexsort`` supports.
45
46 .. seealso:: :func:`numpy.lexsort`
47
48 """
49
50 # TODO(takagi): Support axis argument.
51
52 if not cupy.cuda.thrust_enabled:
53 raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '
54 'install CUDA Toolkit with Thrust then reinstall '
55 'CuPy after uninstalling it.')
56
57 if keys.ndim == ():
58 # as numpy.lexsort() raises
59 raise TypeError('need sequence of keys with len > 0 in lexsort')
60
61 if keys.ndim == 1:
62 return 0
63
64 # TODO(takagi): Support ranks of three or more.
65 if keys.ndim > 2:
66 raise NotImplementedError('Keys with the rank of three or more is not '
67 'supported in lexsort')
68
69 idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)
70 k = keys._shape[0]
71 n = keys._shape[1]
72 thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)
73
74 return idx_array
75
76
77 def argsort(a):
78 """Return the indices that would sort an array with a stable sorting.
79
80 Args:
81 a (cupy.ndarray): Array to sort.
82
83 Returns:
84 cupy.ndarray: Array of indices that sort ``a``.
85
86 .. note::
87 For its implementation reason, ``cupy.argsort`` currently supports only
88 arrays with their rank of one and does not support ``axis``, ``kind``
89 and ``order`` parameters that ``numpy.argsort`` supports.
90
91 .. seealso:: :func:`numpy.argsort`
92
93 """
94 return a.argsort()
95
96
97 # TODO(okuta): Implement msort
98
99
100 # TODO(okuta): Implement sort_complex
101
102
103 # TODO(okuta): Implement partition
104
105
106 # TODO(okuta): Implement argpartition
107
[end of cupy/sorting/sort.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py
--- a/cupy/sorting/sort.py
+++ b/cupy/sorting/sort.py
@@ -15,9 +15,9 @@
cupy.ndarray: Array of the same type and shape as ``a``.
.. note::
- For its implementation reason, ``cupy.sort`` currently supports only
- arrays with their rank of one and does not support ``axis``, ``kind``
- and ``order`` parameters that ``numpy.sort`` does support.
+ For its implementation reason, ``cupy.sort`` currently does not support
+ ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does
+ support.
.. seealso:: :func:`numpy.sort`
|
{"golden_diff": "diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py\n--- a/cupy/sorting/sort.py\n+++ b/cupy/sorting/sort.py\n@@ -15,9 +15,9 @@\n cupy.ndarray: Array of the same type and shape as ``a``.\n \n .. note::\n- For its implementation reason, ``cupy.sort`` currently supports only\n- arrays with their rank of one and does not support ``axis``, ``kind``\n- and ``order`` parameters that ``numpy.sort`` does support.\n+ For its implementation reason, ``cupy.sort`` currently does not support\n+ ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does\n+ support.\n \n .. seealso:: :func:`numpy.sort`\n", "issue": "Make cupy.sort support arrays with rank two or more.\n# Background\r\nArrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55.\r\n\r\n# Problem\r\nThis issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness.\r\n\r\n# Approach\r\n\r\n**Rank two**\r\n\r\nFor an array with the rank of two, \r\n\r\n```\r\n[[4, 3]\r\n [2, 1]]\r\n```\r\n\r\ntreating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library:\r\n\r\n```\r\nif floor(i / 2) < floor(j / 2) then return true;\r\nelse if floor(i / 2) > floor(j / 2) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\r\nwhere `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`,\r\n\r\nwe get the C-contiguous array sorted with the last axis.\r\n\r\n```\r\n[[3, 4]\r\n [1, 2]]\r\n```\r\n\r\n**Rank N**\r\n\r\nGeneralized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works:\r\n\r\n```\r\nif floor(i / d_n-1) < floor(j / d_n-1) then return true;\r\nelse if floor(i / d_n-1) > floor(j / d_n-1) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\n", "before_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.sort`` does support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n ret = a.copy()\n ret.sort()\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a):\n \"\"\"Return the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.argsort`` supports.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort()\n\n\n# TODO(okuta): Implement msort\n\n\n# TODO(okuta): Implement sort_complex\n\n\n# TODO(okuta): Implement partition\n\n\n# TODO(okuta): Implement argpartition\n", "path": "cupy/sorting/sort.py"}]}
| 1,830 | 176 |
gh_patches_debug_28265
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-424
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for retain_graph=True
**Is your feature request related to a problem? Please describe.**
Some models require retain_graph=True, but it's not possible to set it in the .backward() call inside of Trainer.__run_training_batch(...)
**Describe the solution you'd like**
Add train_graph member function the LightningModule have the trainer read this option and then pass it into the .backward() call.
**Describe alternatives you've considered**
Driving a version of Trainer to support retain_graph=True is tough because __run_training_batch and other functions are name-mangled.
</issue>
<code>
[start of pytorch_lightning/trainer/train_loop_mixin.py]
1 import numpy as np
2
3 try:
4 from apex import amp
5
6 APEX_AVAILABLE = True
7 except ImportError:
8 APEX_AVAILABLE = False
9
10
11 class TrainerTrainLoopMixin(object):
12
13 def train(self):
14 # run all epochs
15 for epoch_nb in range(self.current_epoch, self.max_nb_epochs):
16 # set seed for distributed sampler (enables shuffling for each epoch)
17 if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
18 self.get_train_dataloader().sampler.set_epoch(epoch_nb)
19
20 # get model
21 model = self.get_model()
22
23 # update training progress in trainer and model
24 model.current_epoch = epoch_nb
25 self.current_epoch = epoch_nb
26 self.total_batches = self.nb_training_batches + self.nb_val_batches
27 self.batch_loss_value = 0 # accumulated grads
28
29 # limit the number of batches to 1 in fast_dev_run
30 if self.fast_dev_run:
31 self.total_batches = 1
32
33 # init progress_bar when requested
34 if self.show_progress_bar:
35 nb_iterations = self.total_batches
36
37 # for iterable train loader, the progress bar never ends
38 if self.is_iterable_train_dataloader:
39 nb_iterations = float('inf')
40 self.progress_bar.reset(nb_iterations)
41
42 # changing gradient according accumulation_scheduler
43 self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)
44
45 # -----------------
46 # RUN TNG EPOCH
47 # -----------------
48 self.run_training_epoch()
49
50 # update LR schedulers
51 if self.lr_schedulers is not None:
52 for lr_scheduler in self.lr_schedulers:
53 lr_scheduler.step(self.current_epoch)
54
55 # early stopping
56 met_min_epochs = epoch_nb > self.min_nb_epochs
57 if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):
58 should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,
59 logs=self.callback_metrics)
60 # stop training
61 stop = should_stop and met_min_epochs
62 if stop:
63 return
64
65 if self.logger is not None:
66 self.logger.finalize("success")
67
68 def run_training_epoch(self):
69 # before epoch hook
70 if self.is_function_implemented('on_epoch_start'):
71 model = self.get_model()
72 model.on_epoch_start()
73
74 # run epoch
75 for batch_nb, batch in enumerate(self.get_train_dataloader()):
76 self.batch_nb = batch_nb
77
78 model = self.get_model()
79 model.global_step = self.global_step
80
81 # ---------------
82 # RUN TRAIN STEP
83 # ---------------
84 output = self.run_training_batch(batch, batch_nb)
85 batch_result, grad_norm_dic, batch_step_metrics = output
86
87 # when returning -1 from train_step, we end epoch early
88 early_stop_epoch = batch_result == -1
89
90 # ---------------
91 # RUN VAL STEP
92 # ---------------
93 is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0
94 can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
95 should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)
96
97 # fast_dev_run always forces val checking after train batch
98 if self.fast_dev_run or should_check_val:
99 self.run_evaluation(test=self.testing)
100
101 # when logs should be saved
102 should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch
103 if should_save_log or self.fast_dev_run:
104 if self.proc_rank == 0 and self.logger is not None:
105 self.logger.save()
106
107 # when metrics should be logged
108 should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch
109 if should_log_metrics or self.fast_dev_run:
110 # logs user requested information to logger
111 self.log_metrics(batch_step_metrics, grad_norm_dic)
112
113 self.global_step += 1
114 self.total_batch_nb += 1
115
116 # end epoch early
117 # stop when the flag is changed or we've gone past the amount
118 # requested in the batches
119 if early_stop_epoch or self.fast_dev_run:
120 break
121
122 # stop epoch if we limited nb batches
123 met_batch_limit = batch_nb >= self.nb_training_batches
124 if met_batch_limit:
125 break
126
127 # epoch end hook
128 if self.is_function_implemented('on_epoch_end'):
129 model = self.get_model()
130 model.on_epoch_end()
131
132 def run_training_batch(self, batch, batch_nb):
133 # track grad norms
134 grad_norm_dic = {}
135
136 # track all metrics for callbacks
137 all_callback_metrics = []
138
139 # track metrics to log
140 all_log_metrics = []
141
142 if batch is None:
143 return 0, grad_norm_dic
144
145 # hook
146 if self.is_function_implemented('on_batch_start'):
147 model_ref = self.get_model()
148 response = model_ref.on_batch_start(batch)
149
150 if response == -1:
151 return -1, grad_norm_dic
152
153 if self.show_progress_bar:
154 self.progress_bar.update(1)
155
156 # call training_step once per optimizer
157 for opt_idx, optimizer in enumerate(self.optimizers):
158
159 # wrap the forward step in a closure so second order methods work
160 def optimizer_closure():
161 # forward pass
162 output = self.training_forward(batch, batch_nb, opt_idx)
163 closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output
164
165 # track metrics for callbacks
166 all_callback_metrics.append(callback_metrics)
167
168 # track progress bar metrics
169 self.add_tqdm_metrics(progress_bar_metrics)
170 all_log_metrics.append(log_metrics)
171
172 # accumulate loss
173 # (if accumulate_grad_batches = 1 no effect)
174 closure_loss = closure_loss / self.accumulate_grad_batches
175
176 # backward pass
177 if self.use_amp:
178 with amp.scale_loss(closure_loss, optimizer) as scaled_loss:
179 scaled_loss.backward()
180 else:
181 closure_loss.backward()
182
183 # insert after step hook
184 if self.is_function_implemented('on_after_backward'):
185 model_ref = self.get_model()
186 model_ref.on_after_backward()
187
188 return closure_loss
189
190 # calculate loss
191 loss = optimizer_closure()
192
193 # nan grads
194 if self.print_nan_grads:
195 self.print_nan_gradients()
196
197 # track total loss for logging (avoid mem leaks)
198 self.batch_loss_value += loss.item()
199
200 # gradient update with accumulated gradients
201 if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:
202
203 # track gradient norms when requested
204 if batch_nb % self.row_log_interval == 0:
205 if self.track_grad_norm > 0:
206 model = self.get_model()
207 grad_norm_dic = model.grad_norm(self.track_grad_norm)
208
209 # clip gradients
210 self.clip_gradients()
211
212 # calls .step(), .zero_grad()
213 # override function to modify this behavior
214 model = self.get_model()
215 model.optimizer_step(self.current_epoch, batch_nb,
216 optimizer, opt_idx, optimizer_closure)
217
218 # calculate running loss for display
219 self.running_loss.append(self.batch_loss_value)
220 self.batch_loss_value = 0
221 self.avg_loss = np.mean(self.running_loss[-100:])
222
223 # update progress bar
224 if self.show_progress_bar:
225 # add model specific metrics
226 tqdm_metrics = self.training_tqdm_dict
227 self.progress_bar.set_postfix(**tqdm_metrics)
228
229 # activate batch end hook
230 if self.is_function_implemented('on_batch_end'):
231 model = self.get_model()
232 model.on_batch_end()
233
234 # collapse all metrics into one dict
235 all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
236
237 # track all metrics for callbacks
238 self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}
239
240 return 0, grad_norm_dic, all_log_metrics
241
242 def training_forward(self, batch, batch_nb, opt_idx):
243 """
244 Handle forward for each training case (distributed, single gpu, etc...)
245 :param batch:
246 :param batch_nb:
247 :return:
248 """
249 # ---------------
250 # FORWARD
251 # ---------------
252 # enable not needing to add opt_idx to training_step
253 args = [batch, batch_nb]
254 if len(self.optimizers) > 1:
255 args.append(opt_idx)
256
257 if self.use_ddp or self.use_ddp2:
258 output = self.model(*args)
259 elif self.use_dp:
260 output = self.model(*args)
261 elif self.single_gpu:
262 gpu_id = 0
263 if type(self.data_parallel_device_ids) is list:
264 gpu_id = self.data_parallel_device_ids[0]
265 batch = self.transfer_batch_to_gpu(batch, gpu_id)
266 args[0] = batch
267 output = self.model.training_step(*args)
268
269 else:
270 output = self.model.training_step(*args)
271
272 # format and reduce outputs accordingly
273 output = self.process_output(output, train=True)
274 loss, progress_bar_metrics, log_metrics, callback_metrics = output
275 return loss, progress_bar_metrics, log_metrics, callback_metrics
276
[end of pytorch_lightning/trainer/train_loop_mixin.py]
[start of pytorch_lightning/root_module/hooks.py]
1 import torch
2
3
4 class ModelHooks(torch.nn.Module):
5
6 def on_sanity_check_start(self):
7 """
8 Called before starting evaluate
9 :return:
10 """
11 pass
12
13 def on_batch_start(self, batch):
14 pass
15
16 def on_batch_end(self):
17 pass
18
19 def on_epoch_start(self):
20 pass
21
22 def on_epoch_end(self):
23 pass
24
25 def on_pre_performance_check(self):
26 pass
27
28 def on_post_performance_check(self):
29 pass
30
31 def on_before_zero_grad(self, optimizer):
32 """
33 Called after optimizer.step() and before optimizer.zero_grad()
34
35 for optimizer in optimizers:
36 optimizer.step()
37 model.on_before_zero_grad(optimizer) # < ---- called here
38 optimizer.zero_grad
39
40 :param optimizer:
41 :return:
42 """
43 pass
44
45 def on_after_backward(self):
46 """
47 Called after loss.backward() and before optimizers do anything
48 :return:
49 """
50 pass
51
[end of pytorch_lightning/root_module/hooks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_lightning/root_module/hooks.py b/pytorch_lightning/root_module/hooks.py
--- a/pytorch_lightning/root_module/hooks.py
+++ b/pytorch_lightning/root_module/hooks.py
@@ -1,6 +1,14 @@
import torch
+try:
+ from apex import amp
+
+ APEX_AVAILABLE = True
+except ImportError:
+ APEX_AVAILABLE = False
+
+
class ModelHooks(torch.nn.Module):
def on_sanity_check_start(self):
@@ -48,3 +56,17 @@
:return:
"""
pass
+
+ def backward(self, use_amp, loss, optimizer):
+ """
+ Override backward with your own implementation if you need to
+ :param use_amp: Whether amp was requested or not
+ :param loss: Loss is already scaled by accumulated grads
+ :param optimizer: Current optimizer being used
+ :return:
+ """
+ if use_amp:
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
+ scaled_loss.backward()
+ else:
+ loss.backward()
diff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py
--- a/pytorch_lightning/trainer/train_loop_mixin.py
+++ b/pytorch_lightning/trainer/train_loop_mixin.py
@@ -174,11 +174,9 @@
closure_loss = closure_loss / self.accumulate_grad_batches
# backward pass
- if self.use_amp:
- with amp.scale_loss(closure_loss, optimizer) as scaled_loss:
- scaled_loss.backward()
- else:
- closure_loss.backward()
+ # done in hook so user can overwrite if needed
+ model_ref = self.get_model()
+ model_ref.backward(self.use_amp, closure_loss, optimizer)
# insert after step hook
if self.is_function_implemented('on_after_backward'):
|
{"golden_diff": "diff --git a/pytorch_lightning/root_module/hooks.py b/pytorch_lightning/root_module/hooks.py\n--- a/pytorch_lightning/root_module/hooks.py\n+++ b/pytorch_lightning/root_module/hooks.py\n@@ -1,6 +1,14 @@\n import torch\n \n \n+try:\n+ from apex import amp\n+\n+ APEX_AVAILABLE = True\n+except ImportError:\n+ APEX_AVAILABLE = False\n+\n+\n class ModelHooks(torch.nn.Module):\n \n def on_sanity_check_start(self):\n@@ -48,3 +56,17 @@\n :return:\n \"\"\"\n pass\n+\n+ def backward(self, use_amp, loss, optimizer):\n+ \"\"\"\n+ Override backward with your own implementation if you need to\n+ :param use_amp: Whether amp was requested or not\n+ :param loss: Loss is already scaled by accumulated grads\n+ :param optimizer: Current optimizer being used\n+ :return:\n+ \"\"\"\n+ if use_amp:\n+ with amp.scale_loss(loss, optimizer) as scaled_loss:\n+ scaled_loss.backward()\n+ else:\n+ loss.backward()\ndiff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py\n--- a/pytorch_lightning/trainer/train_loop_mixin.py\n+++ b/pytorch_lightning/trainer/train_loop_mixin.py\n@@ -174,11 +174,9 @@\n closure_loss = closure_loss / self.accumulate_grad_batches\n \n # backward pass\n- if self.use_amp:\n- with amp.scale_loss(closure_loss, optimizer) as scaled_loss:\n- scaled_loss.backward()\n- else:\n- closure_loss.backward()\n+ # done in hook so user can overwrite if needed\n+ model_ref = self.get_model()\n+ model_ref.backward(self.use_amp, closure_loss, optimizer)\n \n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n", "issue": "Support for retain_graph=True\n**Is your feature request related to a problem? Please describe.**\r\nSome models require retain_graph=True, but it's not possible to set it in the .backward() call inside of Trainer.__run_training_batch(...)\r\n\r\n**Describe the solution you'd like**\r\nAdd train_graph member function the LightningModule have the trainer read this option and then pass it into the .backward() call.\r\n\r\n**Describe alternatives you've considered**\r\nDriving a version of Trainer to support retain_graph=True is tough because __run_training_batch and other functions are name-mangled.\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerTrainLoopMixin(object):\n\n def train(self):\n # run all epochs\n for epoch_nb in range(self.current_epoch, self.max_nb_epochs):\n # set seed for distributed sampler (enables shuffling for each epoch)\n if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):\n self.get_train_dataloader().sampler.set_epoch(epoch_nb)\n\n # get model\n model = self.get_model()\n\n # update training progress in trainer and model\n model.current_epoch = epoch_nb\n self.current_epoch = epoch_nb\n self.total_batches = self.nb_training_batches + self.nb_val_batches\n self.batch_loss_value = 0 # accumulated grads\n\n # limit the number of batches to 1 in fast_dev_run\n if self.fast_dev_run:\n self.total_batches = 1\n\n # init progress_bar when requested\n if self.show_progress_bar:\n nb_iterations = self.total_batches\n\n # for iterable train loader, the progress bar never ends\n if self.is_iterable_train_dataloader:\n nb_iterations = float('inf')\n self.progress_bar.reset(nb_iterations)\n\n # changing gradient according accumulation_scheduler\n self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)\n\n # -----------------\n # RUN TNG EPOCH\n # -----------------\n self.run_training_epoch()\n\n # update LR schedulers\n if self.lr_schedulers is not None:\n for lr_scheduler in self.lr_schedulers:\n lr_scheduler.step(self.current_epoch)\n\n # early stopping\n met_min_epochs = epoch_nb > self.min_nb_epochs\n if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):\n should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,\n logs=self.callback_metrics)\n # stop training\n stop = should_stop and met_min_epochs\n if stop:\n return\n\n if self.logger is not None:\n self.logger.finalize(\"success\")\n\n def run_training_epoch(self):\n # before epoch hook\n if self.is_function_implemented('on_epoch_start'):\n model = self.get_model()\n model.on_epoch_start()\n\n # run epoch\n for batch_nb, batch in enumerate(self.get_train_dataloader()):\n self.batch_nb = batch_nb\n\n model = self.get_model()\n model.global_step = self.global_step\n\n # ---------------\n # RUN TRAIN STEP\n # ---------------\n output = self.run_training_batch(batch, batch_nb)\n batch_result, grad_norm_dic, batch_step_metrics = output\n\n # when returning -1 from train_step, we end epoch early\n early_stop_epoch = batch_result == -1\n\n # ---------------\n # RUN VAL STEP\n # ---------------\n is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0\n can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0\n should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)\n\n # fast_dev_run always forces val checking after train batch\n if self.fast_dev_run or should_check_val:\n self.run_evaluation(test=self.testing)\n\n # when logs should be saved\n should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch\n if should_save_log or self.fast_dev_run:\n if self.proc_rank == 0 and self.logger is not None:\n self.logger.save()\n\n # when metrics should be logged\n should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch\n if should_log_metrics or self.fast_dev_run:\n # logs user requested information to logger\n self.log_metrics(batch_step_metrics, grad_norm_dic)\n\n self.global_step += 1\n self.total_batch_nb += 1\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if early_stop_epoch or self.fast_dev_run:\n break\n\n # stop epoch if we limited nb batches\n met_batch_limit = batch_nb >= self.nb_training_batches\n if met_batch_limit:\n break\n\n # epoch end hook\n if self.is_function_implemented('on_epoch_end'):\n model = self.get_model()\n model.on_epoch_end()\n\n def run_training_batch(self, batch, batch_nb):\n # track grad norms\n grad_norm_dic = {}\n\n # track all metrics for callbacks\n all_callback_metrics = []\n\n # track metrics to log\n all_log_metrics = []\n\n if batch is None:\n return 0, grad_norm_dic\n\n # hook\n if self.is_function_implemented('on_batch_start'):\n model_ref = self.get_model()\n response = model_ref.on_batch_start(batch)\n\n if response == -1:\n return -1, grad_norm_dic\n\n if self.show_progress_bar:\n self.progress_bar.update(1)\n\n # call training_step once per optimizer\n for opt_idx, optimizer in enumerate(self.optimizers):\n\n # wrap the forward step in a closure so second order methods work\n def optimizer_closure():\n # forward pass\n output = self.training_forward(batch, batch_nb, opt_idx)\n closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output\n\n # track metrics for callbacks\n all_callback_metrics.append(callback_metrics)\n\n # track progress bar metrics\n self.add_tqdm_metrics(progress_bar_metrics)\n all_log_metrics.append(log_metrics)\n\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n closure_loss = closure_loss / self.accumulate_grad_batches\n\n # backward pass\n if self.use_amp:\n with amp.scale_loss(closure_loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n closure_loss.backward()\n\n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n model_ref = self.get_model()\n model_ref.on_after_backward()\n\n return closure_loss\n\n # calculate loss\n loss = optimizer_closure()\n\n # nan grads\n if self.print_nan_grads:\n self.print_nan_gradients()\n\n # track total loss for logging (avoid mem leaks)\n self.batch_loss_value += loss.item()\n\n # gradient update with accumulated gradients\n if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:\n\n # track gradient norms when requested\n if batch_nb % self.row_log_interval == 0:\n if self.track_grad_norm > 0:\n model = self.get_model()\n grad_norm_dic = model.grad_norm(self.track_grad_norm)\n\n # clip gradients\n self.clip_gradients()\n\n # calls .step(), .zero_grad()\n # override function to modify this behavior\n model = self.get_model()\n model.optimizer_step(self.current_epoch, batch_nb,\n optimizer, opt_idx, optimizer_closure)\n\n # calculate running loss for display\n self.running_loss.append(self.batch_loss_value)\n self.batch_loss_value = 0\n self.avg_loss = np.mean(self.running_loss[-100:])\n\n # update progress bar\n if self.show_progress_bar:\n # add model specific metrics\n tqdm_metrics = self.training_tqdm_dict\n self.progress_bar.set_postfix(**tqdm_metrics)\n\n # activate batch end hook\n if self.is_function_implemented('on_batch_end'):\n model = self.get_model()\n model.on_batch_end()\n\n # collapse all metrics into one dict\n all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}\n\n # track all metrics for callbacks\n self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}\n\n return 0, grad_norm_dic, all_log_metrics\n\n def training_forward(self, batch, batch_nb, opt_idx):\n \"\"\"\n Handle forward for each training case (distributed, single gpu, etc...)\n :param batch:\n :param batch_nb:\n :return:\n \"\"\"\n # ---------------\n # FORWARD\n # ---------------\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_nb]\n if len(self.optimizers) > 1:\n args.append(opt_idx)\n\n if self.use_ddp or self.use_ddp2:\n output = self.model(*args)\n elif self.use_dp:\n output = self.model(*args)\n elif self.single_gpu:\n gpu_id = 0\n if type(self.data_parallel_device_ids) is list:\n gpu_id = self.data_parallel_device_ids[0]\n batch = self.transfer_batch_to_gpu(batch, gpu_id)\n args[0] = batch\n output = self.model.training_step(*args)\n\n else:\n output = self.model.training_step(*args)\n\n # format and reduce outputs accordingly\n output = self.process_output(output, train=True)\n loss, progress_bar_metrics, log_metrics, callback_metrics = output\n return loss, progress_bar_metrics, log_metrics, callback_metrics\n", "path": "pytorch_lightning/trainer/train_loop_mixin.py"}, {"content": "import torch\n\n\nclass ModelHooks(torch.nn.Module):\n\n def on_sanity_check_start(self):\n \"\"\"\n Called before starting evaluate\n :return:\n \"\"\"\n pass\n\n def on_batch_start(self, batch):\n pass\n\n def on_batch_end(self):\n pass\n\n def on_epoch_start(self):\n pass\n\n def on_epoch_end(self):\n pass\n\n def on_pre_performance_check(self):\n pass\n\n def on_post_performance_check(self):\n pass\n\n def on_before_zero_grad(self, optimizer):\n \"\"\"\n Called after optimizer.step() and before optimizer.zero_grad()\n\n for optimizer in optimizers:\n optimizer.step()\n model.on_before_zero_grad(optimizer) # < ---- called here\n optimizer.zero_grad\n\n :param optimizer:\n :return:\n \"\"\"\n pass\n\n def on_after_backward(self):\n \"\"\"\n Called after loss.backward() and before optimizers do anything\n :return:\n \"\"\"\n pass\n", "path": "pytorch_lightning/root_module/hooks.py"}]}
| 3,729 | 429 |
gh_patches_debug_34051
|
rasdani/github-patches
|
git_diff
|
Netflix__lemur-4595
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Session timeout can be reduced
Currently the JWT Session Token currently has an excessive session timeout. The length of the timeout can be reduced to lower the risk of an attacker gaining access.
If a user leaves their computer unattended, a nearby attacker could access the user’s computer and any open applications. Automatically logging a user out after an extended period of inactivity can limit the time that an attacker could make use of any hijacked sessions.
References:
[OWASP's Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html)
[OWASP's Page on Session Timeout](https://owasp.org/www-community/Session_Timeout)
---
The referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)
</issue>
<code>
[start of lemur/auth/service.py]
1 """
2 .. module: lemur.auth.service
3 :platform: Unix
4 :synopsis: This module contains all of the authentication duties for
5 lemur
6 :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
7 :license: Apache, see LICENSE for more details.
8 .. moduleauthor:: Kevin Glisson <[email protected]>
9
10 """
11 import jwt
12 import json
13 import binascii
14
15 from functools import wraps
16 from datetime import datetime, timedelta
17
18 from flask import g, current_app, jsonify, request
19
20 from flask_restful import Resource
21 from flask_principal import identity_loaded, RoleNeed, UserNeed
22
23 from flask_principal import Identity, identity_changed
24
25 from cryptography.hazmat.backends import default_backend
26 from cryptography.hazmat.primitives import serialization
27 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
28
29 from lemur.users import service as user_service
30 from lemur.api_keys import service as api_key_service
31 from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed
32
33
34 def get_rsa_public_key(n, e):
35 """
36 Retrieve an RSA public key based on a module and exponent as provided by the JWKS format.
37
38 :param n:
39 :param e:
40 :return: a RSA Public Key in PEM format
41 """
42 n = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(n, "utf-8"))), 16)
43 e = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(e, "utf-8"))), 16)
44
45 pub = RSAPublicNumbers(e, n).public_key(default_backend())
46 return pub.public_bytes(
47 encoding=serialization.Encoding.PEM,
48 format=serialization.PublicFormat.SubjectPublicKeyInfo,
49 )
50
51
52 def create_token(user, aid=None, ttl=None):
53 """
54 Create a valid JWT for a given user/api key, this token is then used to authenticate
55 sessions until the token expires.
56
57 :param user:
58 :return:
59 """
60 expiration_delta = timedelta(
61 days=int(current_app.config.get("LEMUR_TOKEN_EXPIRATION", 1))
62 )
63 payload = {"iat": datetime.utcnow(), "exp": datetime.utcnow() + expiration_delta}
64
65 # Handle Just a User ID & User Object.
66 if isinstance(user, int):
67 payload["sub"] = user
68 else:
69 payload["sub"] = user.id
70 if aid is not None:
71 payload["aid"] = aid
72 # Custom TTLs are only supported on Access Keys.
73 if ttl is not None and aid is not None:
74 # Tokens that are forever until revoked.
75 if ttl == -1:
76 del payload["exp"]
77 else:
78 payload["exp"] = datetime.utcnow() + timedelta(days=ttl)
79 token = jwt.encode(payload, current_app.config["LEMUR_TOKEN_SECRET"])
80 return token
81
82
83 def login_required(f):
84 """
85 Validates the JWT and ensures that is has not expired and the user is still active.
86
87 :param f:
88 :return:
89 """
90
91 @wraps(f)
92 def decorated_function(*args, **kwargs):
93 if not request.headers.get("Authorization"):
94 response = jsonify(message="Missing authorization header")
95 response.status_code = 401
96 return response
97
98 try:
99 token = request.headers.get("Authorization").split()[1]
100 except Exception as e:
101 return dict(message="Token is invalid"), 403
102
103 try:
104 header_data = fetch_token_header(token)
105 payload = jwt.decode(token, current_app.config["LEMUR_TOKEN_SECRET"], algorithms=[header_data["alg"]])
106 except jwt.DecodeError:
107 return dict(message="Token is invalid"), 403
108 except jwt.ExpiredSignatureError:
109 return dict(message="Token has expired"), 403
110 except jwt.InvalidTokenError:
111 return dict(message="Token is invalid"), 403
112
113 if "aid" in payload:
114 access_key = api_key_service.get(payload["aid"])
115 if access_key.revoked:
116 return dict(message="Token has been revoked"), 403
117 if access_key.ttl != -1:
118 current_time = datetime.utcnow()
119 # API key uses days
120 expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl)
121 if current_time >= expired_time:
122 return dict(message="Token has expired"), 403
123 if access_key.application_name:
124 g.caller_application = access_key.application_name
125
126 user = user_service.get(payload["sub"])
127
128 if not user.active:
129 return dict(message="User is not currently active"), 403
130
131 g.current_user = user
132
133 if not g.current_user:
134 return dict(message="You are not logged in"), 403
135
136 # Tell Flask-Principal the identity changed
137 identity_changed.send(
138 current_app._get_current_object(), identity=Identity(g.current_user.id)
139 )
140
141 return f(*args, **kwargs)
142
143 return decorated_function
144
145
146 def fetch_token_header(token):
147 """
148 Fetch the header out of the JWT token.
149
150 :param token:
151 :return: :raise jwt.DecodeError:
152 """
153 token = token.encode("utf-8")
154 try:
155 signing_input, crypto_segment = token.rsplit(b".", 1)
156 header_segment, payload_segment = signing_input.split(b".", 1)
157 except ValueError:
158 raise jwt.DecodeError("Not enough segments")
159
160 try:
161 return json.loads(jwt.utils.base64url_decode(header_segment).decode("utf-8"))
162 except TypeError as e:
163 current_app.logger.exception(e)
164 raise jwt.DecodeError("Invalid header padding")
165
166
167 @identity_loaded.connect
168 def on_identity_loaded(sender, identity):
169 """
170 Sets the identity of a given option, assigns additional permissions based on
171 the role that the user is a part of.
172
173 :param sender:
174 :param identity:
175 """
176 # load the user
177 user = user_service.get(identity.id)
178
179 # add the UserNeed to the identity
180 identity.provides.add(UserNeed(identity.id))
181
182 # identity with the roles that the user provides
183 if hasattr(user, "roles"):
184 for role in user.roles:
185 identity.provides.add(RoleNeed(role.name))
186 identity.provides.add(RoleMemberNeed(role.id))
187
188 # apply ownership for authorities
189 if hasattr(user, "authorities"):
190 for authority in user.authorities:
191 identity.provides.add(AuthorityCreatorNeed(authority.id))
192
193 g.user = user
194
195
196 class AuthenticatedResource(Resource):
197 """
198 Inherited by all resources that need to be protected by authentication.
199 """
200
201 method_decorators = [login_required]
202
203 def __init__(self):
204 super(AuthenticatedResource, self).__init__()
205
[end of lemur/auth/service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lemur/auth/service.py b/lemur/auth/service.py
--- a/lemur/auth/service.py
+++ b/lemur/auth/service.py
@@ -8,27 +8,23 @@
.. moduleauthor:: Kevin Glisson <[email protected]>
"""
-import jwt
import json
-import binascii
-
-from functools import wraps
from datetime import datetime, timedelta
+from functools import wraps
-from flask import g, current_app, jsonify, request
-
-from flask_restful import Resource
-from flask_principal import identity_loaded, RoleNeed, UserNeed
-
-from flask_principal import Identity, identity_changed
-
+import binascii
+import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
+from flask import g, current_app, jsonify, request
+from flask_principal import Identity, identity_changed
+from flask_principal import identity_loaded, RoleNeed, UserNeed
+from flask_restful import Resource
-from lemur.users import service as user_service
from lemur.api_keys import service as api_key_service
from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed
+from lemur.users import service as user_service
def get_rsa_public_key(n, e):
@@ -57,9 +53,21 @@
:param user:
:return:
"""
- expiration_delta = timedelta(
- days=int(current_app.config.get("LEMUR_TOKEN_EXPIRATION", 1))
- )
+ expiration_delta = timedelta(days=1)
+ custom_expiry = current_app.config.get("LEMUR_TOKEN_EXPIRATION")
+ if custom_expiry:
+ if isinstance(custom_expiry, str) and custom_expiry.endswith("m"):
+ expiration_delta = timedelta(
+ minutes=int(custom_expiry.rstrip("m"))
+ )
+ elif isinstance(custom_expiry, str) and custom_expiry.endswith("h"):
+ expiration_delta = timedelta(
+ hours=int(custom_expiry.rstrip("h"))
+ )
+ else:
+ expiration_delta = timedelta(
+ days=int(custom_expiry)
+ )
payload = {"iat": datetime.utcnow(), "exp": datetime.utcnow() + expiration_delta}
# Handle Just a User ID & User Object.
|
{"golden_diff": "diff --git a/lemur/auth/service.py b/lemur/auth/service.py\n--- a/lemur/auth/service.py\n+++ b/lemur/auth/service.py\n@@ -8,27 +8,23 @@\n .. moduleauthor:: Kevin Glisson <[email protected]>\n \n \"\"\"\n-import jwt\n import json\n-import binascii\n-\n-from functools import wraps\n from datetime import datetime, timedelta\n+from functools import wraps\n \n-from flask import g, current_app, jsonify, request\n-\n-from flask_restful import Resource\n-from flask_principal import identity_loaded, RoleNeed, UserNeed\n-\n-from flask_principal import Identity, identity_changed\n-\n+import binascii\n+import jwt\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import serialization\n from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n+from flask import g, current_app, jsonify, request\n+from flask_principal import Identity, identity_changed\n+from flask_principal import identity_loaded, RoleNeed, UserNeed\n+from flask_restful import Resource\n \n-from lemur.users import service as user_service\n from lemur.api_keys import service as api_key_service\n from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed\n+from lemur.users import service as user_service\n \n \n def get_rsa_public_key(n, e):\n@@ -57,9 +53,21 @@\n :param user:\n :return:\n \"\"\"\n- expiration_delta = timedelta(\n- days=int(current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\", 1))\n- )\n+ expiration_delta = timedelta(days=1)\n+ custom_expiry = current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\")\n+ if custom_expiry:\n+ if isinstance(custom_expiry, str) and custom_expiry.endswith(\"m\"):\n+ expiration_delta = timedelta(\n+ minutes=int(custom_expiry.rstrip(\"m\"))\n+ )\n+ elif isinstance(custom_expiry, str) and custom_expiry.endswith(\"h\"):\n+ expiration_delta = timedelta(\n+ hours=int(custom_expiry.rstrip(\"h\"))\n+ )\n+ else:\n+ expiration_delta = timedelta(\n+ days=int(custom_expiry)\n+ )\n payload = {\"iat\": datetime.utcnow(), \"exp\": datetime.utcnow() + expiration_delta}\n \n # Handle Just a User ID & User Object.\n", "issue": "Session timeout can be reduced\nCurrently the JWT Session Token currently has an excessive session timeout. The length of the timeout can be reduced to lower the risk of an attacker gaining access.\r\n\r\nIf a user leaves their computer unattended, a nearby attacker could access the user\u2019s computer and any open applications. Automatically logging a user out after an extended period of inactivity can limit the time that an attacker could make use of any hijacked sessions.\r\n\r\nReferences:\r\n[OWASP's Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html)\r\n[OWASP's Page on Session Timeout](https://owasp.org/www-community/Session_Timeout)\r\n\r\n---\r\n\r\nThe referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.auth.service\n :platform: Unix\n :synopsis: This module contains all of the authentication duties for\n lemur\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport jwt\nimport json\nimport binascii\n\nfrom functools import wraps\nfrom datetime import datetime, timedelta\n\nfrom flask import g, current_app, jsonify, request\n\nfrom flask_restful import Resource\nfrom flask_principal import identity_loaded, RoleNeed, UserNeed\n\nfrom flask_principal import Identity, identity_changed\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n\nfrom lemur.users import service as user_service\nfrom lemur.api_keys import service as api_key_service\nfrom lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed\n\n\ndef get_rsa_public_key(n, e):\n \"\"\"\n Retrieve an RSA public key based on a module and exponent as provided by the JWKS format.\n\n :param n:\n :param e:\n :return: a RSA Public Key in PEM format\n \"\"\"\n n = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(n, \"utf-8\"))), 16)\n e = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(e, \"utf-8\"))), 16)\n\n pub = RSAPublicNumbers(e, n).public_key(default_backend())\n return pub.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n\n\ndef create_token(user, aid=None, ttl=None):\n \"\"\"\n Create a valid JWT for a given user/api key, this token is then used to authenticate\n sessions until the token expires.\n\n :param user:\n :return:\n \"\"\"\n expiration_delta = timedelta(\n days=int(current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\", 1))\n )\n payload = {\"iat\": datetime.utcnow(), \"exp\": datetime.utcnow() + expiration_delta}\n\n # Handle Just a User ID & User Object.\n if isinstance(user, int):\n payload[\"sub\"] = user\n else:\n payload[\"sub\"] = user.id\n if aid is not None:\n payload[\"aid\"] = aid\n # Custom TTLs are only supported on Access Keys.\n if ttl is not None and aid is not None:\n # Tokens that are forever until revoked.\n if ttl == -1:\n del payload[\"exp\"]\n else:\n payload[\"exp\"] = datetime.utcnow() + timedelta(days=ttl)\n token = jwt.encode(payload, current_app.config[\"LEMUR_TOKEN_SECRET\"])\n return token\n\n\ndef login_required(f):\n \"\"\"\n Validates the JWT and ensures that is has not expired and the user is still active.\n\n :param f:\n :return:\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.headers.get(\"Authorization\"):\n response = jsonify(message=\"Missing authorization header\")\n response.status_code = 401\n return response\n\n try:\n token = request.headers.get(\"Authorization\").split()[1]\n except Exception as e:\n return dict(message=\"Token is invalid\"), 403\n\n try:\n header_data = fetch_token_header(token)\n payload = jwt.decode(token, current_app.config[\"LEMUR_TOKEN_SECRET\"], algorithms=[header_data[\"alg\"]])\n except jwt.DecodeError:\n return dict(message=\"Token is invalid\"), 403\n except jwt.ExpiredSignatureError:\n return dict(message=\"Token has expired\"), 403\n except jwt.InvalidTokenError:\n return dict(message=\"Token is invalid\"), 403\n\n if \"aid\" in payload:\n access_key = api_key_service.get(payload[\"aid\"])\n if access_key.revoked:\n return dict(message=\"Token has been revoked\"), 403\n if access_key.ttl != -1:\n current_time = datetime.utcnow()\n # API key uses days\n expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl)\n if current_time >= expired_time:\n return dict(message=\"Token has expired\"), 403\n if access_key.application_name:\n g.caller_application = access_key.application_name\n\n user = user_service.get(payload[\"sub\"])\n\n if not user.active:\n return dict(message=\"User is not currently active\"), 403\n\n g.current_user = user\n\n if not g.current_user:\n return dict(message=\"You are not logged in\"), 403\n\n # Tell Flask-Principal the identity changed\n identity_changed.send(\n current_app._get_current_object(), identity=Identity(g.current_user.id)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function\n\n\ndef fetch_token_header(token):\n \"\"\"\n Fetch the header out of the JWT token.\n\n :param token:\n :return: :raise jwt.DecodeError:\n \"\"\"\n token = token.encode(\"utf-8\")\n try:\n signing_input, crypto_segment = token.rsplit(b\".\", 1)\n header_segment, payload_segment = signing_input.split(b\".\", 1)\n except ValueError:\n raise jwt.DecodeError(\"Not enough segments\")\n\n try:\n return json.loads(jwt.utils.base64url_decode(header_segment).decode(\"utf-8\"))\n except TypeError as e:\n current_app.logger.exception(e)\n raise jwt.DecodeError(\"Invalid header padding\")\n\n\n@identity_loaded.connect\ndef on_identity_loaded(sender, identity):\n \"\"\"\n Sets the identity of a given option, assigns additional permissions based on\n the role that the user is a part of.\n\n :param sender:\n :param identity:\n \"\"\"\n # load the user\n user = user_service.get(identity.id)\n\n # add the UserNeed to the identity\n identity.provides.add(UserNeed(identity.id))\n\n # identity with the roles that the user provides\n if hasattr(user, \"roles\"):\n for role in user.roles:\n identity.provides.add(RoleNeed(role.name))\n identity.provides.add(RoleMemberNeed(role.id))\n\n # apply ownership for authorities\n if hasattr(user, \"authorities\"):\n for authority in user.authorities:\n identity.provides.add(AuthorityCreatorNeed(authority.id))\n\n g.user = user\n\n\nclass AuthenticatedResource(Resource):\n \"\"\"\n Inherited by all resources that need to be protected by authentication.\n \"\"\"\n\n method_decorators = [login_required]\n\n def __init__(self):\n super(AuthenticatedResource, self).__init__()\n", "path": "lemur/auth/service.py"}]}
| 2,705 | 495 |
gh_patches_debug_14378
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-1649
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pearson Correlation Coefficient raises error when 2D tensor but single task
## 🐛 Bug
I have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error,
```bash
ValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1
```
Changing the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it.
My current workout around,
```python
import torchmetrics.functional as Fm
# predictions are [n, 1] for single task/output
Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])
```
There are other metrics that handle this,
```python
metrics = {
"mse": Fm.mean_squared_error(predictions, targets, squared=True),
"rmse": Fm.mean_squared_error(predictions, targets, squared=False),
"mae": Fm.mean_absolute_error(predictions, targets),
"r2": Fm.r2_score(predictions, targets, multioutput="raw_values"),
"mape": Fm.mean_absolute_percentage_error(predictions, targets),
# TODO: Raise issue on torchmetrics
"pcc": (
Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else
Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])
),
}
```
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior...
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):
- Python & PyTorch Version (e.g., 1.0):
- Any other relevant information such as OS (e.g., Linux):
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of src/torchmetrics/functional/regression/utils.py]
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from torch import Tensor
15
16
17 def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:
18 """Check that predictions and target have the correct shape, else raise error."""
19 if preds.ndim > 2 or target.ndim > 2:
20 raise ValueError(
21 f"Expected both predictions and target to be either 1- or 2-dimensional tensors,"
22 f" but got {target.ndim} and {preds.ndim}."
23 )
24 if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):
25 raise ValueError(
26 f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}"
27 f" and {preds.shape[1]}."
28 )
29
[end of src/torchmetrics/functional/regression/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py
--- a/src/torchmetrics/functional/regression/utils.py
+++ b/src/torchmetrics/functional/regression/utils.py
@@ -21,7 +21,9 @@
f"Expected both predictions and target to be either 1- or 2-dimensional tensors,"
f" but got {target.ndim} and {preds.ndim}."
)
- if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):
+ cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)
+ cond2 = num_outputs > 1 and num_outputs != preds.shape[1]
+ if cond1 or cond2:
raise ValueError(
f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}"
f" and {preds.shape[1]}."
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py\n--- a/src/torchmetrics/functional/regression/utils.py\n+++ b/src/torchmetrics/functional/regression/utils.py\n@@ -21,7 +21,9 @@\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n- if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n+ cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)\n+ cond2 = num_outputs > 1 and num_outputs != preds.shape[1]\n+ if cond1 or cond2:\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n", "issue": "Pearson Correlation Coefficient raises error when 2D tensor but single task\n## \ud83d\udc1b Bug\r\n\r\nI have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error,\r\n\r\n```bash\r\nValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1\r\n```\r\n\r\nChanging the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it.\r\nMy current workout around,\r\n```python\r\nimport torchmetrics.functional as Fm\r\n\r\n# predictions are [n, 1] for single task/output\r\nFm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n```\r\n\r\nThere are other metrics that handle this,\r\n```python\r\n metrics = {\r\n \"mse\": Fm.mean_squared_error(predictions, targets, squared=True),\r\n \"rmse\": Fm.mean_squared_error(predictions, targets, squared=False),\r\n \"mae\": Fm.mean_absolute_error(predictions, targets),\r\n \"r2\": Fm.r2_score(predictions, targets, multioutput=\"raw_values\"),\r\n \"mape\": Fm.mean_absolute_percentage_error(predictions, targets),\r\n # TODO: Raise issue on torchmetrics\r\n \"pcc\": (\r\n Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else\r\n Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n ),\r\n }\r\n```\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):\r\n- Python & PyTorch Version (e.g., 1.0):\r\n- Any other relevant information such as OS (e.g., Linux):\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torch import Tensor\n\n\ndef _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:\n \"\"\"Check that predictions and target have the correct shape, else raise error.\"\"\"\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n )\n", "path": "src/torchmetrics/functional/regression/utils.py"}]}
| 1,469 | 231 |
gh_patches_debug_27807
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-2214
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.5 deprecation FutureWarning in Nilearn 0.6.0
Python 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable.
@GaelVaroquaux
</issue>
<code>
[start of nilearn/__init__.py]
1 """
2 Machine Learning module for NeuroImaging in python
3 --------------------------------------------------
4
5 Documentation is available in the docstrings and online at
6 http://nilearn.github.io.
7
8 Contents
9 --------
10 Nilearn aims at simplifying the use of the scikit-learn package in the context of
11 neuroimaging. It provides specific input/output functions, algorithms and
12 visualization tools.
13
14 Submodules
15 ---------
16 datasets --- Utilities to download NeuroImaging datasets
17 decoding --- Decoding tools and algorithms
18 decomposition --- Includes a subject level variant of the ICA
19 algorithm called Canonical ICA
20 connectome --- Set of tools for computing functional connectivity matrices
21 and for sparse multi-subjects learning of Gaussian graphical models
22 image --- Set of functions defining mathematical operations
23 working on Niimg-like objects
24 input_data --- includes scikit-learn tranformers and tools to
25 preprocess neuro-imaging data
26 masking --- Utilities to compute and operate on brain masks
27 mass_univariate --- Defines a Massively Univariate Linear Model
28 estimated with OLS and permutation test
29 plotting --- Plotting code for nilearn
30 region --- Set of functions for extracting region-defined
31 signals, clustering methods, connected regions extraction
32 signal --- Set of preprocessing functions for time series
33 """
34
35 import gzip
36 import sys
37 import warnings
38 import os
39
40 from distutils.version import LooseVersion
41
42 from .version import _check_module_dependencies, __version__
43
44 # Workaround issue discovered in intel-openmp 2019.5:
45 # https://github.com/ContinuumIO/anaconda-issues/issues/11294
46 #
47 # see also https://github.com/scikit-learn/scikit-learn/pull/15020
48 os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
49
50 def _py2_deprecation_warning():
51 py2_warning = ('Python2 support is deprecated and will be removed in '
52 'the next release. Consider switching to Python 3.6 or 3.7.'
53 )
54 warnings.filterwarnings('once', message=py2_warning)
55 warnings.warn(message=py2_warning,
56 category=DeprecationWarning,
57 stacklevel=3,
58 )
59
60 def _py34_deprecation_warning():
61 py34_warning = ('Python 3.4 support is deprecated and will be removed in '
62 'the next release. Consider switching to Python 3.6 or 3.7.'
63 )
64 warnings.filterwarnings('once', message=py34_warning)
65 warnings.warn(message=py34_warning,
66 category=DeprecationWarning,
67 stacklevel=3,
68 )
69
70
71 def _python_deprecation_warnings():
72 if sys.version_info.major == 2:
73 _py2_deprecation_warning()
74 elif sys.version_info.major == 3 and sys.version_info.minor == 4:
75 _py34_deprecation_warning()
76
77
78 _check_module_dependencies()
79 _python_deprecation_warnings()
80
81 # Temporary work around to address formatting issues in doc tests
82 # with NumPy 1.14. NumPy had made more consistent str/repr formatting
83 # of numpy arrays. Hence we print the options to old versions.
84 import numpy as np
85 if LooseVersion(np.__version__) >= LooseVersion("1.14"):
86 # See issue #1600 in nilearn for reason to add try and except
87 try:
88 from ._utils.testing import is_nose_running
89 if is_nose_running():
90 np.set_printoptions(legacy='1.13')
91 except ImportError:
92 pass
93
94 # Monkey-patch gzip to have faster reads on large gzip files
95 if hasattr(gzip.GzipFile, 'max_read_chunk'):
96 gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb
97
98 # Boolean controlling the default globbing technique when using check_niimg
99 # and the os.path.expanduser usage in CacheMixin.
100 # Default value it True, set it to False to completely deactivate this
101 # behavior.
102 EXPAND_PATH_WILDCARDS = True
103
104 # Boolean controlling whether the joblib caches should be
105 # flushed if the version of certain modules changes (eg nibabel, as it
106 # does not respect the backward compatibility in some of its internal
107 # structures
108 # This is used in nilearn._utils.cache_mixin
109 CHECK_CACHE_VERSION = True
110
111 # list all submodules available in nilearn and version
112 __all__ = ['datasets', 'decoding', 'decomposition', 'connectome',
113 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',
114 'region', 'signal', 'surface', 'parcellations', '__version__']
115
116
[end of nilearn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nilearn/__init__.py b/nilearn/__init__.py
--- a/nilearn/__init__.py
+++ b/nilearn/__init__.py
@@ -47,32 +47,21 @@
# see also https://github.com/scikit-learn/scikit-learn/pull/15020
os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
-def _py2_deprecation_warning():
- py2_warning = ('Python2 support is deprecated and will be removed in '
- 'the next release. Consider switching to Python 3.6 or 3.7.'
- )
- warnings.filterwarnings('once', message=py2_warning)
- warnings.warn(message=py2_warning,
- category=DeprecationWarning,
- stacklevel=3,
- )
-def _py34_deprecation_warning():
- py34_warning = ('Python 3.4 support is deprecated and will be removed in '
- 'the next release. Consider switching to Python 3.6 or 3.7.'
- )
- warnings.filterwarnings('once', message=py34_warning)
- warnings.warn(message=py34_warning,
- category=DeprecationWarning,
+def _py35_deprecation_warning():
+ py35_warning = ('Python 3.5 support is deprecated and will be removed in '
+ 'a future release. Consider switching to Python 3.6 or 3.7'
+ )
+ warnings.filterwarnings('once', message=py35_warning)
+ warnings.warn(message=py35_warning,
+ category=FutureWarning,
stacklevel=3,
)
def _python_deprecation_warnings():
- if sys.version_info.major == 2:
- _py2_deprecation_warning()
- elif sys.version_info.major == 3 and sys.version_info.minor == 4:
- _py34_deprecation_warning()
+ if sys.version_info.major == 3 and sys.version_info.minor == 5:
+ _py35_deprecation_warning()
_check_module_dependencies()
|
{"golden_diff": "diff --git a/nilearn/__init__.py b/nilearn/__init__.py\n--- a/nilearn/__init__.py\n+++ b/nilearn/__init__.py\n@@ -47,32 +47,21 @@\n # see also https://github.com/scikit-learn/scikit-learn/pull/15020\n os.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n \n-def _py2_deprecation_warning():\n- py2_warning = ('Python2 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py2_warning)\n- warnings.warn(message=py2_warning,\n- category=DeprecationWarning,\n- stacklevel=3,\n- )\n \n-def _py34_deprecation_warning():\n- py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py34_warning)\n- warnings.warn(message=py34_warning,\n- category=DeprecationWarning,\n+def _py35_deprecation_warning():\n+ py35_warning = ('Python 3.5 support is deprecated and will be removed in '\n+ 'a future release. Consider switching to Python 3.6 or 3.7'\n+ )\n+ warnings.filterwarnings('once', message=py35_warning)\n+ warnings.warn(message=py35_warning,\n+ category=FutureWarning,\n stacklevel=3,\n )\n \n \n def _python_deprecation_warnings():\n- if sys.version_info.major == 2:\n- _py2_deprecation_warning()\n- elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n- _py34_deprecation_warning()\n+ if sys.version_info.major == 3 and sys.version_info.minor == 5:\n+ _py35_deprecation_warning()\n \n \n _check_module_dependencies()\n", "issue": "Python 3.5 deprecation FutureWarning in Nilearn 0.6.0\nPython 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable.\r\n@GaelVaroquaux \n", "before_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n--------------------------------------------------\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals, clustering methods, connected regions extraction\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\nimport sys\nimport warnings\nimport os\n\nfrom distutils.version import LooseVersion\n\nfrom .version import _check_module_dependencies, __version__\n\n# Workaround issue discovered in intel-openmp 2019.5:\n# https://github.com/ContinuumIO/anaconda-issues/issues/11294\n#\n# see also https://github.com/scikit-learn/scikit-learn/pull/15020\nos.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n\ndef _py2_deprecation_warning():\n py2_warning = ('Python2 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py2_warning)\n warnings.warn(message=py2_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\ndef _py34_deprecation_warning():\n py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py34_warning)\n warnings.warn(message=py34_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef _python_deprecation_warnings():\n if sys.version_info.major == 2:\n _py2_deprecation_warning()\n elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n _py34_deprecation_warning()\n\n\n_check_module_dependencies()\n_python_deprecation_warnings()\n\n# Temporary work around to address formatting issues in doc tests\n# with NumPy 1.14. NumPy had made more consistent str/repr formatting\n# of numpy arrays. Hence we print the options to old versions.\nimport numpy as np\nif LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n # See issue #1600 in nilearn for reason to add try and except\n try:\n from ._utils.testing import is_nose_running\n if is_nose_running():\n np.set_printoptions(legacy='1.13')\n except ImportError:\n pass\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# and the os.path.expanduser usage in CacheMixin.\n# Default value it True, set it to False to completely deactivate this\n# behavior.\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', 'surface', 'parcellations', '__version__']\n\n", "path": "nilearn/__init__.py"}]}
| 1,877 | 468 |
gh_patches_debug_350
|
rasdani/github-patches
|
git_diff
|
scikit-image__scikit-image-1124
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NameError on del version when init has ImportError
In `__init__.py`, `del version` gives `NameError` when `ImportError` happens.
```
try:
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
del version
```
should be
```
try:
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
else:
del version
```
</issue>
<code>
[start of skimage/__init__.py]
1 """Image Processing SciKit (Toolbox for SciPy)
2
3 ``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image
4 processing and computer vision.
5
6 The main package of ``skimage`` only provides a few utilities for converting
7 between image data types; for most features, you need to import one of the
8 following subpackages:
9
10 Subpackages
11 -----------
12 color
13 Color space conversion.
14 data
15 Test images and example data.
16 draw
17 Drawing primitives (lines, text, etc.) that operate on NumPy arrays.
18 exposure
19 Image intensity adjustment, e.g., histogram equalization, etc.
20 feature
21 Feature detection and extraction, e.g., texture analysis corners, etc.
22 filter
23 Sharpening, edge finding, rank filters, thresholding, etc.
24 graph
25 Graph-theoretic operations, e.g., shortest paths.
26 io
27 Reading, saving, and displaying images and video.
28 measure
29 Measurement of image properties, e.g., similarity and contours.
30 morphology
31 Morphological operations, e.g., opening or skeletonization.
32 novice
33 Simplified interface for teaching purposes.
34 restoration
35 Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.
36 segmentation
37 Partitioning an image into multiple regions.
38 transform
39 Geometric and other transforms, e.g., rotation or the Radon transform.
40 util
41 Generic utilities.
42 viewer
43 A simple graphical user interface for visualizing results and exploring
44 parameters.
45
46 Utility Functions
47 -----------------
48 img_as_float
49 Convert an image to floating point format, with values in [0, 1].
50 img_as_uint
51 Convert an image to unsigned integer format, with values in [0, 65535].
52 img_as_int
53 Convert an image to signed integer format, with values in [-32768, 32767].
54 img_as_ubyte
55 Convert an image to unsigned byte format, with values in [0, 255].
56
57 """
58
59 import os.path as _osp
60 import imp as _imp
61 import functools as _functools
62 import warnings as _warnings
63 from skimage._shared.utils import deprecated as _deprecated
64
65 pkg_dir = _osp.abspath(_osp.dirname(__file__))
66 data_dir = _osp.join(pkg_dir, 'data')
67
68 try:
69 from .version import version as __version__
70 except ImportError:
71 __version__ = "unbuilt-dev"
72 del version
73
74
75 try:
76 _imp.find_module('nose')
77 except ImportError:
78 def _test(verbose=False):
79 """This would run all unit tests, but nose couldn't be
80 imported so the test suite can not run.
81 """
82 raise ImportError("Could not load nose. Unit tests not available.")
83
84 def _doctest(verbose=False):
85 """This would run all doc tests, but nose couldn't be
86 imported so the test suite can not run.
87 """
88 raise ImportError("Could not load nose. Doctests not available.")
89 else:
90 def _test(doctest=False, verbose=False):
91 """Run all unit tests."""
92 import nose
93 args = ['', pkg_dir, '--exe', '--ignore-files=^_test']
94 if verbose:
95 args.extend(['-v', '-s'])
96 if doctest:
97 args.extend(['--with-doctest', '--ignore-files=^\.',
98 '--ignore-files=^setup\.py$$', '--ignore-files=test'])
99 # Make sure warnings do not break the doc tests
100 with _warnings.catch_warnings():
101 _warnings.simplefilter("ignore")
102 success = nose.run('skimage', argv=args)
103 else:
104 success = nose.run('skimage', argv=args)
105 # Return sys.exit code
106 if success:
107 return 0
108 else:
109 return 1
110
111
112 # do not use `test` as function name as this leads to a recursion problem with
113 # the nose test suite
114 test = _test
115 test_verbose = _functools.partial(test, verbose=True)
116 test_verbose.__doc__ = test.__doc__
117 doctest = _functools.partial(test, doctest=True)
118 doctest.__doc__ = doctest.__doc__
119 doctest_verbose = _functools.partial(test, doctest=True, verbose=True)
120 doctest_verbose.__doc__ = doctest.__doc__
121
122
123 class _Log(Warning):
124 pass
125
126
127 class _FakeLog(object):
128 def __init__(self, name):
129 """
130 Parameters
131 ----------
132 name : str
133 Name of the log.
134 repeat : bool
135 Whether to print repeating messages more than once (False by
136 default).
137 """
138 self._name = name
139
140 warnings.simplefilter("always", _Log)
141
142 self._warnings = _warnings
143
144 def _warn(self, msg, wtype):
145 self._warnings.warn('%s: %s' % (wtype, msg), _Log)
146
147 def debug(self, msg):
148 self._warn(msg, 'DEBUG')
149
150 def info(self, msg):
151 self._warn(msg, 'INFO')
152
153 def warning(self, msg):
154 self._warn(msg, 'WARNING')
155
156 warn = warning
157
158 def error(self, msg):
159 self._warn(msg, 'ERROR')
160
161 def critical(self, msg):
162 self._warn(msg, 'CRITICAL')
163
164 def addHandler(*args):
165 pass
166
167 def setLevel(*args):
168 pass
169
170
171 from .util.dtype import *
172
[end of skimage/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skimage/__init__.py b/skimage/__init__.py
--- a/skimage/__init__.py
+++ b/skimage/__init__.py
@@ -69,7 +69,8 @@
from .version import version as __version__
except ImportError:
__version__ = "unbuilt-dev"
-del version
+else:
+ del version
try:
|
{"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -69,7 +69,8 @@\n from .version import version as __version__\n except ImportError:\n __version__ = \"unbuilt-dev\"\n-del version\n+else:\n+ del version\n \n \n try:\n", "issue": "NameError on del version when init has ImportError\nIn `__init__.py`, `del version` gives `NameError` when `ImportError` happens.\n\n```\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\ndel version\n```\n\nshould be\n\n```\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\nelse:\n del version\n```\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilter\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\nimport imp as _imp\nimport functools as _functools\nimport warnings as _warnings\nfrom skimage._shared.utils import deprecated as _deprecated\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\ndel version\n\n\ntry:\n _imp.find_module('nose')\nexcept ImportError:\n def _test(verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\n def _doctest(verbose=False):\n \"\"\"This would run all doc tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Doctests not available.\")\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with _warnings.catch_warnings():\n _warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = _functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = _functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = _functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\nclass _Log(Warning):\n pass\n\n\nclass _FakeLog(object):\n def __init__(self, name):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the log.\n repeat : bool\n Whether to print repeating messages more than once (False by\n default).\n \"\"\"\n self._name = name\n\n warnings.simplefilter(\"always\", _Log)\n\n self._warnings = _warnings\n\n def _warn(self, msg, wtype):\n self._warnings.warn('%s: %s' % (wtype, msg), _Log)\n\n def debug(self, msg):\n self._warn(msg, 'DEBUG')\n\n def info(self, msg):\n self._warn(msg, 'INFO')\n\n def warning(self, msg):\n self._warn(msg, 'WARNING')\n\n warn = warning\n\n def error(self, msg):\n self._warn(msg, 'ERROR')\n\n def critical(self, msg):\n self._warn(msg, 'CRITICAL')\n\n def addHandler(*args):\n pass\n\n def setLevel(*args):\n pass\n\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]}
| 2,210 | 89 |
gh_patches_debug_21874
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-3459
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No man page with pip install
### Checklist
- [ ] This is a bug report.
- [x] This is a feature request.
- [ ] This is a plugin (improvement) request.
- [x] I have read the contribution guidelines.
### Description
When installing streamlink with pip, no man page gets installed
### Expected / Actual behavior
a man page gets installed during installation of streamlink with pip
### Reproduction steps / Explicit stream URLs to test
1. ``pip install --user streamlink``
2. ``man streamlink``
3. ``No manual entry for streamlink``
4. I get the same results when using ``pip install streamlink``
### Logs
```
[cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful
[cli][debug] Python: 3.6.3
[cli][debug] Streamlink: 0.12.1
[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)
usage: streamlink [OPTIONS] <URL> [STREAM]
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import codecs
3 from os import environ, path
4 from sys import argv, path as sys_path
5
6 from setuptools import find_packages, setup
7
8 import versioneer
9
10
11 deps = [
12 "requests>=2.21.0,<3.0",
13 "isodate",
14 "websocket-client",
15 # Support for SOCKS proxies
16 "PySocks!=1.5.7,>=1.5.6",
17 ]
18
19 # for encrypted streams
20 if environ.get("STREAMLINK_USE_PYCRYPTO"):
21 deps.append("pycrypto")
22 else:
23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6
24 deps.append("pycryptodome>=3.4.3,<4")
25
26 # for localization
27 if environ.get("STREAMLINK_USE_PYCOUNTRY"):
28 deps.append("pycountry")
29 else:
30 deps.append("iso-639")
31 deps.append("iso3166")
32
33 # When we build an egg for the Win32 bootstrap we don"t want dependency
34 # information built into it.
35 if environ.get("NO_DEPS"):
36 deps = []
37
38 this_directory = path.abspath(path.dirname(__file__))
39 srcdir = path.join(this_directory, "src/")
40 sys_path.insert(0, srcdir)
41
42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f:
43 long_description = f.read()
44
45
46 def is_wheel_for_windows():
47 if "bdist_wheel" in argv:
48 names = ["win32", "win-amd64", "cygwin"]
49 length = len(argv)
50 for pos in range(argv.index("bdist_wheel") + 1, length):
51 if argv[pos] == "--plat-name" and pos + 1 < length:
52 return argv[pos + 1] in names
53 elif argv[pos][:12] == "--plat-name=":
54 return argv[pos][12:] in names
55 return False
56
57
58 entry_points = {
59 "console_scripts": ["streamlink=streamlink_cli.main:main"]
60 }
61
62 if is_wheel_for_windows():
63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
64
65
66 setup(name="streamlink",
67 version=versioneer.get_version(),
68 cmdclass=versioneer.get_cmdclass(),
69 description="Streamlink is a command-line utility that extracts streams "
70 "from various services and pipes them into a video player of "
71 "choice.",
72 long_description=long_description,
73 long_description_content_type="text/markdown",
74 url="https://github.com/streamlink/streamlink",
75 project_urls={
76 "Documentation": "https://streamlink.github.io/",
77 "Tracker": "https://github.com/streamlink/streamlink/issues",
78 "Source": "https://github.com/streamlink/streamlink",
79 "Funding": "https://opencollective.com/streamlink"
80 },
81 author="Streamlink",
82 # temp until we have a mailing list / global email
83 author_email="[email protected]",
84 license="Simplified BSD",
85 packages=find_packages("src"),
86 package_dir={"": "src"},
87 entry_points=entry_points,
88 install_requires=deps,
89 test_suite="tests",
90 python_requires=">=3.6, <4",
91 classifiers=["Development Status :: 5 - Production/Stable",
92 "License :: OSI Approved :: BSD License",
93 "Environment :: Console",
94 "Intended Audience :: End Users/Desktop",
95 "Operating System :: POSIX",
96 "Operating System :: Microsoft :: Windows",
97 "Operating System :: MacOS",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3 :: Only",
100 "Programming Language :: Python :: 3.6",
101 "Programming Language :: Python :: 3.7",
102 "Programming Language :: Python :: 3.8",
103 "Programming Language :: Python :: 3.9",
104 "Topic :: Internet :: WWW/HTTP",
105 "Topic :: Multimedia :: Sound/Audio",
106 "Topic :: Multimedia :: Video",
107 "Topic :: Utilities"])
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -8,6 +8,7 @@
import versioneer
+data_files = []
deps = [
"requests>=2.21.0,<3.0",
"isodate",
@@ -63,6 +64,19 @@
entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"]
+additional_files = [
+ ("share/man/man1", ["docs/_build/man/streamlink.1"])
+]
+
+for destdir, srcfiles in additional_files:
+ files = []
+ for srcfile in srcfiles:
+ if path.exists(srcfile):
+ files.append(srcfile)
+ if files:
+ data_files.append((destdir, files))
+
+
setup(name="streamlink",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
@@ -85,6 +99,7 @@
packages=find_packages("src"),
package_dir={"": "src"},
entry_points=entry_points,
+ data_files=data_files,
install_requires=deps,
test_suite="tests",
python_requires=">=3.6, <4",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,6 +8,7 @@\n import versioneer\n \n \n+data_files = []\n deps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n@@ -63,6 +64,19 @@\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n \n \n+additional_files = [\n+ (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n+]\n+\n+for destdir, srcfiles in additional_files:\n+ files = []\n+ for srcfile in srcfiles:\n+ if path.exists(srcfile):\n+ files.append(srcfile)\n+ if files:\n+ data_files.append((destdir, files))\n+\n+\n setup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n@@ -85,6 +99,7 @@\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n+ data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n", "issue": "No man page with pip install\n### Checklist\r\n\r\n- [ ] This is a bug report.\r\n- [x] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [x] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nWhen installing streamlink with pip, no man page gets installed\r\n\r\n### Expected / Actual behavior\r\n\r\na man page gets installed during installation of streamlink with pip\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n1. ``pip install --user streamlink``\r\n2. ``man streamlink``\r\n3. ``No manual entry for streamlink``\r\n4. I get the same results when using ``pip install streamlink``\r\n\r\n### Logs\r\n\r\n```\r\n[cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful\r\n[cli][debug] Python: 3.6.3\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)\r\nusage: streamlink [OPTIONS] <URL> [STREAM]\r\n\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
| 1,904 | 272 |
gh_patches_debug_6747
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2770
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Circular import in target file accidentally triggers 'No module named ... found'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:
```
ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)
```
In this case my module I pass to the sanic server is `app:app`, from within `/api`.
### Code snippet
_No response_
### Expected Behavior
I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.
It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).
### How do you run Sanic?
Sanic CLI
### Operating System
Windows (Docker, Python:3.11)
### Sanic Version
23.3
### Additional context
_No response_
Circular import in target file accidentally triggers 'No module named ... found'
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:
```
ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)
```
In this case my module I pass to the sanic server is `app:app`, from within `/api`.
### Code snippet
_No response_
### Expected Behavior
I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.
It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).
### How do you run Sanic?
Sanic CLI
### Operating System
Windows (Docker, Python:3.11)
### Sanic Version
23.3
### Additional context
_No response_
</issue>
<code>
[start of sanic/cli/app.py]
1 import os
2 import shutil
3 import sys
4
5 from argparse import Namespace
6 from functools import partial
7 from textwrap import indent
8 from typing import List, Union
9
10 from sanic.app import Sanic
11 from sanic.application.logo import get_logo
12 from sanic.cli.arguments import Group
13 from sanic.cli.base import SanicArgumentParser, SanicHelpFormatter
14 from sanic.cli.inspector import make_inspector_parser
15 from sanic.cli.inspector_client import InspectorClient
16 from sanic.log import error_logger
17 from sanic.worker.loader import AppLoader
18
19
20 class SanicCLI:
21 DESCRIPTION = indent(
22 f"""
23 {get_logo(True)}
24
25 To start running a Sanic application, provide a path to the module, where
26 app is a Sanic() instance in the global scope:
27
28 $ sanic path.to.server:app
29
30 If the Sanic instance variable is called 'app', you can leave off the last
31 part, and only provide a path to the module where the instance is:
32
33 $ sanic path.to.server
34
35 Or, a path to a callable that returns a Sanic() instance:
36
37 $ sanic path.to.factory:create_app
38
39 Or, a path to a directory to run as a simple HTTP server:
40
41 $ sanic ./path/to/static
42 """,
43 prefix=" ",
44 )
45
46 def __init__(self) -> None:
47 width = shutil.get_terminal_size().columns
48 self.parser = SanicArgumentParser(
49 prog="sanic",
50 description=self.DESCRIPTION,
51 formatter_class=lambda prog: SanicHelpFormatter(
52 prog,
53 max_help_position=36 if width > 96 else 24,
54 indent_increment=4,
55 width=None,
56 ),
57 )
58 self.parser._positionals.title = "Required\n========\n Positional"
59 self.parser._optionals.title = "Optional\n========\n General"
60 self.main_process = (
61 os.environ.get("SANIC_RELOADER_PROCESS", "") != "true"
62 )
63 self.args: Namespace = Namespace()
64 self.groups: List[Group] = []
65 self.inspecting = False
66
67 def attach(self):
68 if len(sys.argv) > 1 and sys.argv[1] == "inspect":
69 self.inspecting = True
70 self.parser.description = get_logo(True)
71 make_inspector_parser(self.parser)
72 return
73
74 for group in Group._registry:
75 instance = group.create(self.parser)
76 instance.attach()
77 self.groups.append(instance)
78
79 def run(self, parse_args=None):
80 if self.inspecting:
81 self._inspector()
82 return
83
84 legacy_version = False
85 if not parse_args:
86 # This is to provide backwards compat -v to display version
87 legacy_version = len(sys.argv) == 2 and sys.argv[-1] == "-v"
88 parse_args = ["--version"] if legacy_version else None
89 elif parse_args == ["-v"]:
90 parse_args = ["--version"]
91
92 if not legacy_version:
93 parsed, unknown = self.parser.parse_known_args(args=parse_args)
94 if unknown and parsed.factory:
95 for arg in unknown:
96 if arg.startswith("--"):
97 self.parser.add_argument(arg.split("=")[0])
98
99 self.args = self.parser.parse_args(args=parse_args)
100 self._precheck()
101 app_loader = AppLoader(
102 self.args.target, self.args.factory, self.args.simple, self.args
103 )
104
105 try:
106 app = self._get_app(app_loader)
107 kwargs = self._build_run_kwargs()
108 except ValueError as e:
109 error_logger.exception(f"Failed to run app: {e}")
110 else:
111 for http_version in self.args.http:
112 app.prepare(**kwargs, version=http_version)
113 if self.args.single:
114 serve = Sanic.serve_single
115 else:
116 serve = partial(Sanic.serve, app_loader=app_loader)
117 serve(app)
118
119 def _inspector(self):
120 args = sys.argv[2:]
121 self.args, unknown = self.parser.parse_known_args(args=args)
122 if unknown:
123 for arg in unknown:
124 if arg.startswith("--"):
125 try:
126 key, value = arg.split("=")
127 key = key.lstrip("-")
128 except ValueError:
129 value = False if arg.startswith("--no-") else True
130 key = (
131 arg.replace("--no-", "")
132 .lstrip("-")
133 .replace("-", "_")
134 )
135 setattr(self.args, key, value)
136
137 kwargs = {**self.args.__dict__}
138 host = kwargs.pop("host")
139 port = kwargs.pop("port")
140 secure = kwargs.pop("secure")
141 raw = kwargs.pop("raw")
142 action = kwargs.pop("action") or "info"
143 api_key = kwargs.pop("api_key")
144 positional = kwargs.pop("positional", None)
145 if action == "<custom>" and positional:
146 action = positional[0]
147 if len(positional) > 1:
148 kwargs["args"] = positional[1:]
149 InspectorClient(host, port, secure, raw, api_key).do(action, **kwargs)
150
151 def _precheck(self):
152 # Custom TLS mismatch handling for better diagnostics
153 if self.main_process and (
154 # one of cert/key missing
155 bool(self.args.cert) != bool(self.args.key)
156 # new and old style self.args used together
157 or self.args.tls
158 and self.args.cert
159 # strict host checking without certs would always fail
160 or self.args.tlshost
161 and not self.args.tls
162 and not self.args.cert
163 ):
164 self.parser.print_usage(sys.stderr)
165 message = (
166 "TLS certificates must be specified by either of:\n"
167 " --cert certdir/fullchain.pem --key certdir/privkey.pem\n"
168 " --tls certdir (equivalent to the above)"
169 )
170 error_logger.error(message)
171 sys.exit(1)
172
173 def _get_app(self, app_loader: AppLoader):
174 try:
175 app = app_loader.load()
176 except ImportError as e:
177 if app_loader.module_name.startswith(e.name): # type: ignore
178 error_logger.error(
179 f"No module named {e.name} found.\n"
180 " Example File: project/sanic_server.py -> app\n"
181 " Example Module: project.sanic_server.app"
182 )
183 sys.exit(1)
184 else:
185 raise e
186 return app
187
188 def _build_run_kwargs(self):
189 for group in self.groups:
190 group.prepare(self.args)
191 ssl: Union[None, dict, str, list] = []
192 if self.args.tlshost:
193 ssl.append(None)
194 if self.args.cert is not None or self.args.key is not None:
195 ssl.append(dict(cert=self.args.cert, key=self.args.key))
196 if self.args.tls:
197 ssl += self.args.tls
198 if not ssl:
199 ssl = None
200 elif len(ssl) == 1 and ssl[0] is not None:
201 # Use only one cert, no TLSSelector.
202 ssl = ssl[0]
203
204 kwargs = {
205 "access_log": self.args.access_log,
206 "coffee": self.args.coffee,
207 "debug": self.args.debug,
208 "fast": self.args.fast,
209 "host": self.args.host,
210 "motd": self.args.motd,
211 "noisy_exceptions": self.args.noisy_exceptions,
212 "port": self.args.port,
213 "ssl": ssl,
214 "unix": self.args.unix,
215 "verbosity": self.args.verbosity or 0,
216 "workers": self.args.workers,
217 "auto_tls": self.args.auto_tls,
218 "single_process": self.args.single,
219 }
220
221 for maybe_arg in ("auto_reload", "dev"):
222 if getattr(self.args, maybe_arg, False):
223 kwargs[maybe_arg] = True
224
225 if self.args.path:
226 kwargs["auto_reload"] = True
227 kwargs["reload_dir"] = self.args.path
228
229 return kwargs
230
[end of sanic/cli/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/cli/app.py b/sanic/cli/app.py
--- a/sanic/cli/app.py
+++ b/sanic/cli/app.py
@@ -180,6 +180,10 @@
" Example File: project/sanic_server.py -> app\n"
" Example Module: project.sanic_server.app"
)
+ error_logger.error(
+ "\nThe error below might have caused the above one:\n"
+ f"{e.msg}"
+ )
sys.exit(1)
else:
raise e
|
{"golden_diff": "diff --git a/sanic/cli/app.py b/sanic/cli/app.py\n--- a/sanic/cli/app.py\n+++ b/sanic/cli/app.py\n@@ -180,6 +180,10 @@\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n )\n+ error_logger.error(\n+ \"\\nThe error below might have caused the above one:\\n\"\n+ f\"{e.msg}\"\n+ )\n sys.exit(1)\n else:\n raise e\n", "issue": "Circular import in target file accidentally triggers 'No module named ... found'\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nWhile developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:\r\n```\r\nImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)\r\n```\r\nIn this case my module I pass to the sanic server is `app:app`, from within `/api`.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nI had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.\r\n\r\nIt would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).\n\n### How do you run Sanic?\n\nSanic CLI\n\n### Operating System\n\nWindows (Docker, Python:3.11)\n\n### Sanic Version\n\n23.3\n\n### Additional context\n\n_No response_\nCircular import in target file accidentally triggers 'No module named ... found'\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nWhile developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:\r\n```\r\nImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)\r\n```\r\nIn this case my module I pass to the sanic server is `app:app`, from within `/api`.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nI had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.\r\n\r\nIt would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).\n\n### How do you run Sanic?\n\nSanic CLI\n\n### Operating System\n\nWindows (Docker, Python:3.11)\n\n### Sanic Version\n\n23.3\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import os\nimport shutil\nimport sys\n\nfrom argparse import Namespace\nfrom functools import partial\nfrom textwrap import indent\nfrom typing import List, Union\n\nfrom sanic.app import Sanic\nfrom sanic.application.logo import get_logo\nfrom sanic.cli.arguments import Group\nfrom sanic.cli.base import SanicArgumentParser, SanicHelpFormatter\nfrom sanic.cli.inspector import make_inspector_parser\nfrom sanic.cli.inspector_client import InspectorClient\nfrom sanic.log import error_logger\nfrom sanic.worker.loader import AppLoader\n\n\nclass SanicCLI:\n DESCRIPTION = indent(\n f\"\"\"\n{get_logo(True)}\n\nTo start running a Sanic application, provide a path to the module, where\napp is a Sanic() instance in the global scope:\n\n $ sanic path.to.server:app\n\nIf the Sanic instance variable is called 'app', you can leave off the last\npart, and only provide a path to the module where the instance is:\n\n $ sanic path.to.server\n\nOr, a path to a callable that returns a Sanic() instance:\n\n $ sanic path.to.factory:create_app\n\nOr, a path to a directory to run as a simple HTTP server:\n\n $ sanic ./path/to/static\n\"\"\",\n prefix=\" \",\n )\n\n def __init__(self) -> None:\n width = shutil.get_terminal_size().columns\n self.parser = SanicArgumentParser(\n prog=\"sanic\",\n description=self.DESCRIPTION,\n formatter_class=lambda prog: SanicHelpFormatter(\n prog,\n max_help_position=36 if width > 96 else 24,\n indent_increment=4,\n width=None,\n ),\n )\n self.parser._positionals.title = \"Required\\n========\\n Positional\"\n self.parser._optionals.title = \"Optional\\n========\\n General\"\n self.main_process = (\n os.environ.get(\"SANIC_RELOADER_PROCESS\", \"\") != \"true\"\n )\n self.args: Namespace = Namespace()\n self.groups: List[Group] = []\n self.inspecting = False\n\n def attach(self):\n if len(sys.argv) > 1 and sys.argv[1] == \"inspect\":\n self.inspecting = True\n self.parser.description = get_logo(True)\n make_inspector_parser(self.parser)\n return\n\n for group in Group._registry:\n instance = group.create(self.parser)\n instance.attach()\n self.groups.append(instance)\n\n def run(self, parse_args=None):\n if self.inspecting:\n self._inspector()\n return\n\n legacy_version = False\n if not parse_args:\n # This is to provide backwards compat -v to display version\n legacy_version = len(sys.argv) == 2 and sys.argv[-1] == \"-v\"\n parse_args = [\"--version\"] if legacy_version else None\n elif parse_args == [\"-v\"]:\n parse_args = [\"--version\"]\n\n if not legacy_version:\n parsed, unknown = self.parser.parse_known_args(args=parse_args)\n if unknown and parsed.factory:\n for arg in unknown:\n if arg.startswith(\"--\"):\n self.parser.add_argument(arg.split(\"=\")[0])\n\n self.args = self.parser.parse_args(args=parse_args)\n self._precheck()\n app_loader = AppLoader(\n self.args.target, self.args.factory, self.args.simple, self.args\n )\n\n try:\n app = self._get_app(app_loader)\n kwargs = self._build_run_kwargs()\n except ValueError as e:\n error_logger.exception(f\"Failed to run app: {e}\")\n else:\n for http_version in self.args.http:\n app.prepare(**kwargs, version=http_version)\n if self.args.single:\n serve = Sanic.serve_single\n else:\n serve = partial(Sanic.serve, app_loader=app_loader)\n serve(app)\n\n def _inspector(self):\n args = sys.argv[2:]\n self.args, unknown = self.parser.parse_known_args(args=args)\n if unknown:\n for arg in unknown:\n if arg.startswith(\"--\"):\n try:\n key, value = arg.split(\"=\")\n key = key.lstrip(\"-\")\n except ValueError:\n value = False if arg.startswith(\"--no-\") else True\n key = (\n arg.replace(\"--no-\", \"\")\n .lstrip(\"-\")\n .replace(\"-\", \"_\")\n )\n setattr(self.args, key, value)\n\n kwargs = {**self.args.__dict__}\n host = kwargs.pop(\"host\")\n port = kwargs.pop(\"port\")\n secure = kwargs.pop(\"secure\")\n raw = kwargs.pop(\"raw\")\n action = kwargs.pop(\"action\") or \"info\"\n api_key = kwargs.pop(\"api_key\")\n positional = kwargs.pop(\"positional\", None)\n if action == \"<custom>\" and positional:\n action = positional[0]\n if len(positional) > 1:\n kwargs[\"args\"] = positional[1:]\n InspectorClient(host, port, secure, raw, api_key).do(action, **kwargs)\n\n def _precheck(self):\n # Custom TLS mismatch handling for better diagnostics\n if self.main_process and (\n # one of cert/key missing\n bool(self.args.cert) != bool(self.args.key)\n # new and old style self.args used together\n or self.args.tls\n and self.args.cert\n # strict host checking without certs would always fail\n or self.args.tlshost\n and not self.args.tls\n and not self.args.cert\n ):\n self.parser.print_usage(sys.stderr)\n message = (\n \"TLS certificates must be specified by either of:\\n\"\n \" --cert certdir/fullchain.pem --key certdir/privkey.pem\\n\"\n \" --tls certdir (equivalent to the above)\"\n )\n error_logger.error(message)\n sys.exit(1)\n\n def _get_app(self, app_loader: AppLoader):\n try:\n app = app_loader.load()\n except ImportError as e:\n if app_loader.module_name.startswith(e.name): # type: ignore\n error_logger.error(\n f\"No module named {e.name} found.\\n\"\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n )\n sys.exit(1)\n else:\n raise e\n return app\n\n def _build_run_kwargs(self):\n for group in self.groups:\n group.prepare(self.args)\n ssl: Union[None, dict, str, list] = []\n if self.args.tlshost:\n ssl.append(None)\n if self.args.cert is not None or self.args.key is not None:\n ssl.append(dict(cert=self.args.cert, key=self.args.key))\n if self.args.tls:\n ssl += self.args.tls\n if not ssl:\n ssl = None\n elif len(ssl) == 1 and ssl[0] is not None:\n # Use only one cert, no TLSSelector.\n ssl = ssl[0]\n\n kwargs = {\n \"access_log\": self.args.access_log,\n \"coffee\": self.args.coffee,\n \"debug\": self.args.debug,\n \"fast\": self.args.fast,\n \"host\": self.args.host,\n \"motd\": self.args.motd,\n \"noisy_exceptions\": self.args.noisy_exceptions,\n \"port\": self.args.port,\n \"ssl\": ssl,\n \"unix\": self.args.unix,\n \"verbosity\": self.args.verbosity or 0,\n \"workers\": self.args.workers,\n \"auto_tls\": self.args.auto_tls,\n \"single_process\": self.args.single,\n }\n\n for maybe_arg in (\"auto_reload\", \"dev\"):\n if getattr(self.args, maybe_arg, False):\n kwargs[maybe_arg] = True\n\n if self.args.path:\n kwargs[\"auto_reload\"] = True\n kwargs[\"reload_dir\"] = self.args.path\n\n return kwargs\n", "path": "sanic/cli/app.py"}]}
| 3,527 | 120 |
gh_patches_debug_25782
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-1851
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect paths used for steps in keyfile creation
From issue #1828 opened by @Reflux0301
### keyfile path
https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L190-L191
❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>
✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>
### mkdir
https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L201
❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>
✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d</code>
### chmod
https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L206
❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>
✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>
### crypttab
https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L209
❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>
✔️ <code>/etc/cryptsetup-keys.d/_name_.key</code>
### References
- https://wiki.archlinux.org/title/Dm-crypt/System_configuration#Unlocking_with_a_keyfile
- https://wiki.archlinux.org/title/Dm-crypt/Device_encryption#passphrase
</issue>
<code>
[start of archinstall/lib/luks.py]
1 from __future__ import annotations
2
3 import shlex
4 import time
5 from dataclasses import dataclass
6 from pathlib import Path
7 from typing import Optional, List
8
9 from . import disk
10 from .general import SysCommand, generate_password, SysCommandWorker
11 from .output import info, debug
12 from .exceptions import SysCallError, DiskError
13 from .storage import storage
14
15
16 @dataclass
17 class Luks2:
18 luks_dev_path: Path
19 mapper_name: Optional[str] = None
20 password: Optional[str] = None
21 key_file: Optional[Path] = None
22 auto_unmount: bool = False
23
24 # will be set internally after unlocking the device
25 _mapper_dev: Optional[Path] = None
26
27 @property
28 def mapper_dev(self) -> Optional[Path]:
29 if self.mapper_name:
30 return Path(f'/dev/mapper/{self.mapper_name}')
31 return None
32
33 def __post_init__(self):
34 if self.luks_dev_path is None:
35 raise ValueError('Partition must have a path set')
36
37 def __enter__(self):
38 self.unlock(self.key_file)
39
40 def __exit__(self, *args: str, **kwargs: str):
41 if self.auto_unmount:
42 self.lock()
43
44 def _default_key_file(self) -> Path:
45 return Path(f'/tmp/{self.luks_dev_path.name}.disk_pw')
46
47 def _password_bytes(self) -> bytes:
48 if not self.password:
49 raise ValueError('Password for luks2 device was not specified')
50
51 if isinstance(self.password, bytes):
52 return self.password
53 else:
54 return bytes(self.password, 'UTF-8')
55
56 def encrypt(
57 self,
58 key_size: int = 512,
59 hash_type: str = 'sha512',
60 iter_time: int = 10000,
61 key_file: Optional[Path] = None
62 ) -> Path:
63 info(f'Luks2 encrypting: {self.luks_dev_path}')
64
65 byte_password = self._password_bytes()
66
67 if not key_file:
68 if self.key_file:
69 key_file = self.key_file
70 else:
71 key_file = self._default_key_file()
72
73 with open(key_file, 'wb') as fh:
74 fh.write(byte_password)
75
76 cryptsetup_args = shlex.join([
77 '/usr/bin/cryptsetup',
78 '--batch-mode',
79 '--verbose',
80 '--type', 'luks2',
81 '--pbkdf', 'argon2id',
82 '--hash', hash_type,
83 '--key-size', str(key_size),
84 '--iter-time', str(iter_time),
85 '--key-file', str(key_file),
86 '--use-urandom',
87 'luksFormat', str(self.luks_dev_path),
88 ])
89
90 # Retry formatting the volume because archinstall can some times be too quick
91 # which generates a "Device /dev/sdX does not exist or access denied." between
92 # setting up partitions and us trying to encrypt it.
93 for retry_attempt in range(storage['DISK_RETRY_ATTEMPTS']):
94 try:
95 SysCommand(cryptsetup_args)
96 break
97 except SysCallError as err:
98 time.sleep(storage['DISK_TIMEOUTS'])
99
100 if retry_attempt != storage['DISK_RETRY_ATTEMPTS'] - 1:
101 continue
102
103 if err.exit_code == 1:
104 info(f'luks2 partition currently in use: {self.luks_dev_path}')
105 info('Attempting to unmount, crypt-close and trying encryption again')
106
107 self.lock()
108 # Then try again to set up the crypt-device
109 SysCommand(cryptsetup_args)
110 else:
111 raise DiskError(f'Could not encrypt volume "{self.luks_dev_path}": {err}')
112
113 return key_file
114
115 def _get_luks_uuid(self) -> str:
116 command = f'/usr/bin/cryptsetup luksUUID {self.luks_dev_path}'
117
118 try:
119 return SysCommand(command).decode().strip() # type: ignore
120 except SysCallError as err:
121 info(f'Unable to get UUID for Luks device: {self.luks_dev_path}')
122 raise err
123
124 def is_unlocked(self) -> bool:
125 return self.mapper_name is not None and Path(f'/dev/mapper/{self.mapper_name}').exists()
126
127 def unlock(self, key_file: Optional[Path] = None):
128 """
129 Unlocks the luks device, an optional key file location for unlocking can be specified,
130 otherwise a default location for the key file will be used.
131
132 :param key_file: An alternative key file
133 :type key_file: Path
134 """
135 debug(f'Unlocking luks2 device: {self.luks_dev_path}')
136
137 if not self.mapper_name:
138 raise ValueError('mapper name missing')
139
140 byte_password = self._password_bytes()
141
142 if not key_file:
143 if self.key_file:
144 key_file = self.key_file
145 else:
146 key_file = self._default_key_file()
147
148 with open(key_file, 'wb') as fh:
149 fh.write(byte_password)
150
151 wait_timer = time.time()
152 while Path(self.luks_dev_path).exists() is False and time.time() - wait_timer < 10:
153 time.sleep(0.025)
154
155 SysCommand(f'/usr/bin/cryptsetup open {self.luks_dev_path} {self.mapper_name} --key-file {key_file} --type luks2')
156
157 if not self.mapper_dev or not self.mapper_dev.is_symlink():
158 raise DiskError(f'Failed to open luks2 device: {self.luks_dev_path}')
159
160 def lock(self):
161 disk.device_handler.umount(self.luks_dev_path)
162
163 # Get crypt-information about the device by doing a reverse lookup starting with the partition path
164 # For instance: /dev/sda
165 disk.device_handler.partprobe(self.luks_dev_path)
166 lsblk_info = disk.get_lsblk_info(self.luks_dev_path)
167
168 # For each child (sub-partition/sub-device)
169 for child in lsblk_info.children:
170 # Unmount the child location
171 for mountpoint in child.mountpoints:
172 debug(f'Unmounting {mountpoint}')
173 disk.device_handler.umount(mountpoint, recursive=True)
174
175 # And close it if possible.
176 debug(f"Closing crypt device {child.name}")
177 SysCommand(f"cryptsetup close {child.name}")
178
179 self._mapper_dev = None
180
181 def create_keyfile(self, target_path: Path, override: bool = False):
182 """
183 Routine to create keyfiles, so it can be moved elsewhere
184 """
185 if self.mapper_name is None:
186 raise ValueError('Mapper name must be provided')
187
188 # Once we store the key as ../xyzloop.key systemd-cryptsetup can
189 # automatically load this key if we name the device to "xyzloop"
190 key_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name
191 key_file = key_file_path / '.key'
192 crypttab_path = target_path / 'etc/crypttab'
193
194 if key_file.exists():
195 if not override:
196 info(f'Key file {key_file} already exists, keeping existing')
197 return
198 else:
199 info(f'Key file {key_file} already exists, overriding')
200
201 key_file_path.mkdir(parents=True, exist_ok=True)
202
203 with open(key_file, "w") as keyfile:
204 keyfile.write(generate_password(length=512))
205
206 key_file_path.chmod(0o400)
207
208 self._add_key(key_file)
209 self._crypttab(crypttab_path, key_file, options=["luks", "key-slot=1"])
210
211 def _add_key(self, key_file: Path):
212 info(f'Adding additional key-file {key_file}')
213
214 command = f'/usr/bin/cryptsetup -q -v luksAddKey {self.luks_dev_path} {key_file}'
215 worker = SysCommandWorker(command, environment_vars={'LC_ALL': 'C'})
216 pw_injected = False
217
218 while worker.is_alive():
219 if b'Enter any existing passphrase' in worker and pw_injected is False:
220 worker.write(self._password_bytes())
221 pw_injected = True
222
223 if worker.exit_code != 0:
224 raise DiskError(f'Could not add encryption key {key_file} to {self.luks_dev_path}: {worker.decode()}')
225
226 def _crypttab(
227 self,
228 crypttab_path: Path,
229 key_file: Path,
230 options: List[str]
231 ) -> None:
232 info(f'Adding crypttab entry for key {key_file}')
233
234 with open(crypttab_path, 'a') as crypttab:
235 opt = ','.join(options)
236 uuid = self._get_luks_uuid()
237 row = f"{self.mapper_name} UUID={uuid} {key_file} {opt}\n"
238 crypttab.write(row)
239
[end of archinstall/lib/luks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py
--- a/archinstall/lib/luks.py
+++ b/archinstall/lib/luks.py
@@ -187,8 +187,8 @@
# Once we store the key as ../xyzloop.key systemd-cryptsetup can
# automatically load this key if we name the device to "xyzloop"
- key_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name
- key_file = key_file_path / '.key'
+ kf_path = Path(f'/etc/cryptsetup-keys.d/{self.mapper_name}.key')
+ key_file = target_path / kf_path.relative_to(kf_path.root)
crypttab_path = target_path / 'etc/crypttab'
if key_file.exists():
@@ -198,15 +198,15 @@
else:
info(f'Key file {key_file} already exists, overriding')
- key_file_path.mkdir(parents=True, exist_ok=True)
+ key_file.parent.mkdir(parents=True, exist_ok=True)
with open(key_file, "w") as keyfile:
keyfile.write(generate_password(length=512))
- key_file_path.chmod(0o400)
+ key_file.chmod(0o400)
self._add_key(key_file)
- self._crypttab(crypttab_path, key_file, options=["luks", "key-slot=1"])
+ self._crypttab(crypttab_path, kf_path, options=["luks", "key-slot=1"])
def _add_key(self, key_file: Path):
info(f'Adding additional key-file {key_file}')
|
{"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -187,8 +187,8 @@\n \n \t\t# Once we store the key as ../xyzloop.key systemd-cryptsetup can\n \t\t# automatically load this key if we name the device to \"xyzloop\"\n-\t\tkey_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name\n-\t\tkey_file = key_file_path / '.key'\n+\t\tkf_path = Path(f'/etc/cryptsetup-keys.d/{self.mapper_name}.key')\n+\t\tkey_file = target_path / kf_path.relative_to(kf_path.root)\n \t\tcrypttab_path = target_path / 'etc/crypttab'\n \n \t\tif key_file.exists():\n@@ -198,15 +198,15 @@\n \t\t\telse:\n \t\t\t\tinfo(f'Key file {key_file} already exists, overriding')\n \n-\t\tkey_file_path.mkdir(parents=True, exist_ok=True)\n+\t\tkey_file.parent.mkdir(parents=True, exist_ok=True)\n \n \t\twith open(key_file, \"w\") as keyfile:\n \t\t\tkeyfile.write(generate_password(length=512))\n \n-\t\tkey_file_path.chmod(0o400)\n+\t\tkey_file.chmod(0o400)\n \n \t\tself._add_key(key_file)\n-\t\tself._crypttab(crypttab_path, key_file, options=[\"luks\", \"key-slot=1\"])\n+\t\tself._crypttab(crypttab_path, kf_path, options=[\"luks\", \"key-slot=1\"])\n \n \tdef _add_key(self, key_file: Path):\n \t\tinfo(f'Adding additional key-file {key_file}')\n", "issue": "Incorrect paths used for steps in keyfile creation\nFrom issue #1828 opened by @Reflux0301\r\n\r\n### keyfile path\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L190-L191\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### mkdir\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L201\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d</code>\r\n### chmod\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L206\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### crypttab\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L209\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>\r\n\u2714\ufe0f <code>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### References\r\n\r\n- https://wiki.archlinux.org/title/Dm-crypt/System_configuration#Unlocking_with_a_keyfile\r\n- https://wiki.archlinux.org/title/Dm-crypt/Device_encryption#passphrase\n", "before_files": [{"content": "from __future__ import annotations\n\nimport shlex\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional, List\n\nfrom . import disk\nfrom .general import SysCommand, generate_password, SysCommandWorker\nfrom .output import info, debug\nfrom .exceptions import SysCallError, DiskError\nfrom .storage import storage\n\n\n@dataclass\nclass Luks2:\n\tluks_dev_path: Path\n\tmapper_name: Optional[str] = None\n\tpassword: Optional[str] = None\n\tkey_file: Optional[Path] = None\n\tauto_unmount: bool = False\n\n\t# will be set internally after unlocking the device\n\t_mapper_dev: Optional[Path] = None\n\n\t@property\n\tdef mapper_dev(self) -> Optional[Path]:\n\t\tif self.mapper_name:\n\t\t\treturn Path(f'/dev/mapper/{self.mapper_name}')\n\t\treturn None\n\n\tdef __post_init__(self):\n\t\tif self.luks_dev_path is None:\n\t\t\traise ValueError('Partition must have a path set')\n\n\tdef __enter__(self):\n\t\tself.unlock(self.key_file)\n\n\tdef __exit__(self, *args: str, **kwargs: str):\n\t\tif self.auto_unmount:\n\t\t\tself.lock()\n\n\tdef _default_key_file(self) -> Path:\n\t\treturn Path(f'/tmp/{self.luks_dev_path.name}.disk_pw')\n\n\tdef _password_bytes(self) -> bytes:\n\t\tif not self.password:\n\t\t\traise ValueError('Password for luks2 device was not specified')\n\n\t\tif isinstance(self.password, bytes):\n\t\t\treturn self.password\n\t\telse:\n\t\t\treturn bytes(self.password, 'UTF-8')\n\n\tdef encrypt(\n\t\tself,\n\t\tkey_size: int = 512,\n\t\thash_type: str = 'sha512',\n\t\titer_time: int = 10000,\n\t\tkey_file: Optional[Path] = None\n\t) -> Path:\n\t\tinfo(f'Luks2 encrypting: {self.luks_dev_path}')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', str(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', str(self.luks_dev_path),\n\t\t])\n\n\t\t# Retry formatting the volume because archinstall can some times be too quick\n\t\t# which generates a \"Device /dev/sdX does not exist or access denied.\" between\n\t\t# setting up partitions and us trying to encrypt it.\n\t\tfor retry_attempt in range(storage['DISK_RETRY_ATTEMPTS']):\n\t\t\ttry:\n\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\tbreak\n\t\t\texcept SysCallError as err:\n\t\t\t\ttime.sleep(storage['DISK_TIMEOUTS'])\n\n\t\t\t\tif retry_attempt != storage['DISK_RETRY_ATTEMPTS'] - 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif err.exit_code == 1:\n\t\t\t\t\tinfo(f'luks2 partition currently in use: {self.luks_dev_path}')\n\t\t\t\t\tinfo('Attempting to unmount, crypt-close and trying encryption again')\n\n\t\t\t\t\tself.lock()\n\t\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\telse:\n\t\t\t\t\traise DiskError(f'Could not encrypt volume \"{self.luks_dev_path}\": {err}')\n\n\t\treturn key_file\n\n\tdef _get_luks_uuid(self) -> str:\n\t\tcommand = f'/usr/bin/cryptsetup luksUUID {self.luks_dev_path}'\n\n\t\ttry:\n\t\t\treturn SysCommand(command).decode().strip() # type: ignore\n\t\texcept SysCallError as err:\n\t\t\tinfo(f'Unable to get UUID for Luks device: {self.luks_dev_path}')\n\t\t\traise err\n\n\tdef is_unlocked(self) -> bool:\n\t\treturn self.mapper_name is not None and Path(f'/dev/mapper/{self.mapper_name}').exists()\n\n\tdef unlock(self, key_file: Optional[Path] = None):\n\t\t\"\"\"\n\t\tUnlocks the luks device, an optional key file location for unlocking can be specified,\n\t\totherwise a default location for the key file will be used.\n\n\t\t:param key_file: An alternative key file\n\t\t:type key_file: Path\n\t\t\"\"\"\n\t\tdebug(f'Unlocking luks2 device: {self.luks_dev_path}')\n\n\t\tif not self.mapper_name:\n\t\t\traise ValueError('mapper name missing')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\twait_timer = time.time()\n\t\twhile Path(self.luks_dev_path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {self.luks_dev_path} {self.mapper_name} --key-file {key_file} --type luks2')\n\n\t\tif not self.mapper_dev or not self.mapper_dev.is_symlink():\n\t\t\traise DiskError(f'Failed to open luks2 device: {self.luks_dev_path}')\n\n\tdef lock(self):\n\t\tdisk.device_handler.umount(self.luks_dev_path)\n\n\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t# For instance: /dev/sda\n\t\tdisk.device_handler.partprobe(self.luks_dev_path)\n\t\tlsblk_info = disk.get_lsblk_info(self.luks_dev_path)\n\n\t\t# For each child (sub-partition/sub-device)\n\t\tfor child in lsblk_info.children:\n\t\t\t# Unmount the child location\n\t\t\tfor mountpoint in child.mountpoints:\n\t\t\t\tdebug(f'Unmounting {mountpoint}')\n\t\t\t\tdisk.device_handler.umount(mountpoint, recursive=True)\n\n\t\t\t# And close it if possible.\n\t\t\tdebug(f\"Closing crypt device {child.name}\")\n\t\t\tSysCommand(f\"cryptsetup close {child.name}\")\n\n\t\tself._mapper_dev = None\n\n\tdef create_keyfile(self, target_path: Path, override: bool = False):\n\t\t\"\"\"\n\t\tRoutine to create keyfiles, so it can be moved elsewhere\n\t\t\"\"\"\n\t\tif self.mapper_name is None:\n\t\t\traise ValueError('Mapper name must be provided')\n\n\t\t# Once we store the key as ../xyzloop.key systemd-cryptsetup can\n\t\t# automatically load this key if we name the device to \"xyzloop\"\n\t\tkey_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name\n\t\tkey_file = key_file_path / '.key'\n\t\tcrypttab_path = target_path / 'etc/crypttab'\n\n\t\tif key_file.exists():\n\t\t\tif not override:\n\t\t\t\tinfo(f'Key file {key_file} already exists, keeping existing')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tinfo(f'Key file {key_file} already exists, overriding')\n\n\t\tkey_file_path.mkdir(parents=True, exist_ok=True)\n\n\t\twith open(key_file, \"w\") as keyfile:\n\t\t\tkeyfile.write(generate_password(length=512))\n\n\t\tkey_file_path.chmod(0o400)\n\n\t\tself._add_key(key_file)\n\t\tself._crypttab(crypttab_path, key_file, options=[\"luks\", \"key-slot=1\"])\n\n\tdef _add_key(self, key_file: Path):\n\t\tinfo(f'Adding additional key-file {key_file}')\n\n\t\tcommand = f'/usr/bin/cryptsetup -q -v luksAddKey {self.luks_dev_path} {key_file}'\n\t\tworker = SysCommandWorker(command, environment_vars={'LC_ALL': 'C'})\n\t\tpw_injected = False\n\n\t\twhile worker.is_alive():\n\t\t\tif b'Enter any existing passphrase' in worker and pw_injected is False:\n\t\t\t\tworker.write(self._password_bytes())\n\t\t\t\tpw_injected = True\n\n\t\tif worker.exit_code != 0:\n\t\t\traise DiskError(f'Could not add encryption key {key_file} to {self.luks_dev_path}: {worker.decode()}')\n\n\tdef _crypttab(\n\t\tself,\n\t\tcrypttab_path: Path,\n\t\tkey_file: Path,\n\t\toptions: List[str]\n\t) -> None:\n\t\tinfo(f'Adding crypttab entry for key {key_file}')\n\n\t\twith open(crypttab_path, 'a') as crypttab:\n\t\t\topt = ','.join(options)\n\t\t\tuuid = self._get_luks_uuid()\n\t\t\trow = f\"{self.mapper_name} UUID={uuid} {key_file} {opt}\\n\"\n\t\t\tcrypttab.write(row)\n", "path": "archinstall/lib/luks.py"}]}
| 3,661 | 388 |
gh_patches_debug_34582
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-14903
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User Dashboard can become permanently broken when misconfiguring DEFAULT_DASHBOARD
### Deployment Type
Self-hosted
### NetBox Version
v3.7.0
### Python Version
3.9
### Steps to Reproduce
1. User johndoe modifies their dashboard over the GUI
2. Administrator changes DEFAULT_DASHBOARD in configuration.py with a broken configuration (Assigned a wrong value in a bookmarks widget)
3. Bookmark widget looks fine without logging in since it won't load any data
4. After johndoe logs in, they reset their dashboard
5. Site crashes with an Field Error Exception since the dashboard is misconfigured
6. johndoe can't reset their dashboard anymore, even after the error in DEFAULT_DASHBOARD is corrected
### Expected Behavior
When resetting the dashboard, I would expect it to now always follow the changes of DEFAULT_DASHBOARD.
### Observed Behavior
Resetting the dashboard seems to copy the current state of DEFAULT_DASHBOARD. If that state is broken, a user cannot reset it anymore.
Only the main page with the dashboard crashes.
</issue>
<code>
[start of netbox/extras/dashboard/utils.py]
1 import uuid
2
3 from django.conf import settings
4 from django.core.exceptions import ObjectDoesNotExist
5
6 from netbox.registry import registry
7 from extras.constants import DEFAULT_DASHBOARD
8
9 __all__ = (
10 'get_dashboard',
11 'get_default_dashboard',
12 'get_widget_class',
13 'register_widget',
14 )
15
16
17 def register_widget(cls):
18 """
19 Decorator for registering a DashboardWidget class.
20 """
21 app_label = cls.__module__.split('.', maxsplit=1)[0]
22 label = f'{app_label}.{cls.__name__}'
23 registry['widgets'][label] = cls
24
25 return cls
26
27
28 def get_widget_class(name):
29 """
30 Return a registered DashboardWidget class identified by its name.
31 """
32 try:
33 return registry['widgets'][name]
34 except KeyError:
35 raise ValueError(f"Unregistered widget class: {name}")
36
37
38 def get_dashboard(user):
39 """
40 Return the Dashboard for a given User if one exists, or generate a default dashboard.
41 """
42 if user.is_anonymous:
43 dashboard = get_default_dashboard()
44 else:
45 try:
46 dashboard = user.dashboard
47 except ObjectDoesNotExist:
48 # Create a dashboard for this user
49 dashboard = get_default_dashboard()
50 dashboard.user = user
51 dashboard.save()
52
53 return dashboard
54
55
56 def get_default_dashboard():
57 from extras.models import Dashboard
58
59 dashboard = Dashboard()
60 default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD
61
62 for widget in default_config:
63 id = str(uuid.uuid4())
64 dashboard.layout.append({
65 'id': id,
66 'w': widget['width'],
67 'h': widget['height'],
68 'x': widget.get('x'),
69 'y': widget.get('y'),
70 })
71 dashboard.config[id] = {
72 'class': widget['widget'],
73 'title': widget.get('title'),
74 'color': widget.get('color'),
75 'config': widget.get('config', {}),
76 }
77
78 return dashboard
79
[end of netbox/extras/dashboard/utils.py]
[start of netbox/netbox/views/misc.py]
1 import re
2 from collections import namedtuple
3
4 from django.conf import settings
5 from django.contrib.contenttypes.models import ContentType
6 from django.core.cache import cache
7 from django.shortcuts import redirect, render
8 from django.views.generic import View
9 from django_tables2 import RequestConfig
10 from packaging import version
11
12 from extras.dashboard.utils import get_dashboard
13 from netbox.forms import SearchForm
14 from netbox.search import LookupTypes
15 from netbox.search.backends import search_backend
16 from netbox.tables import SearchTable
17 from utilities.htmx import is_htmx
18 from utilities.paginator import EnhancedPaginator, get_paginate_count
19
20 __all__ = (
21 'HomeView',
22 'SearchView',
23 )
24
25 Link = namedtuple('Link', ('label', 'viewname', 'permission', 'count'))
26
27
28 class HomeView(View):
29 template_name = 'home.html'
30
31 def get(self, request):
32 if settings.LOGIN_REQUIRED and not request.user.is_authenticated:
33 return redirect('login')
34
35 # Construct the user's custom dashboard layout
36 dashboard = get_dashboard(request.user).get_layout()
37
38 # Check whether a new release is available. (Only for staff/superusers.)
39 new_release = None
40 if request.user.is_staff or request.user.is_superuser:
41 latest_release = cache.get('latest_release')
42 if latest_release:
43 release_version, release_url = latest_release
44 if release_version > version.parse(settings.VERSION):
45 new_release = {
46 'version': str(release_version),
47 'url': release_url,
48 }
49
50 return render(request, self.template_name, {
51 'dashboard': dashboard,
52 'new_release': new_release,
53 })
54
55
56 class SearchView(View):
57
58 def get(self, request):
59 results = []
60 highlight = None
61
62 # Initialize search form
63 form = SearchForm(request.GET) if 'q' in request.GET else SearchForm()
64
65 if form.is_valid():
66
67 # Restrict results by object type
68 object_types = []
69 for obj_type in form.cleaned_data['obj_types']:
70 app_label, model_name = obj_type.split('.')
71 object_types.append(ContentType.objects.get_by_natural_key(app_label, model_name))
72
73 lookup = form.cleaned_data['lookup'] or LookupTypes.PARTIAL
74 results = search_backend.search(
75 form.cleaned_data['q'],
76 user=request.user,
77 object_types=object_types,
78 lookup=lookup
79 )
80
81 # If performing a regex search, pass the highlight value as a compiled pattern
82 if form.cleaned_data['lookup'] == LookupTypes.REGEX:
83 try:
84 highlight = re.compile(f"({form.cleaned_data['q']})", flags=re.IGNORECASE)
85 except re.error:
86 pass
87 elif form.cleaned_data['lookup'] != LookupTypes.EXACT:
88 highlight = form.cleaned_data['q']
89
90 table = SearchTable(results, highlight=highlight)
91
92 # Paginate the table results
93 RequestConfig(request, {
94 'paginator_class': EnhancedPaginator,
95 'per_page': get_paginate_count(request)
96 }).configure(table)
97
98 # If this is an HTMX request, return only the rendered table HTML
99 if is_htmx(request):
100 return render(request, 'htmx/table.html', {
101 'table': table,
102 })
103
104 return render(request, 'search.html', {
105 'form': form,
106 'table': table,
107 })
108
[end of netbox/netbox/views/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/extras/dashboard/utils.py b/netbox/extras/dashboard/utils.py
--- a/netbox/extras/dashboard/utils.py
+++ b/netbox/extras/dashboard/utils.py
@@ -53,13 +53,13 @@
return dashboard
-def get_default_dashboard():
+def get_default_dashboard(config=None):
from extras.models import Dashboard
dashboard = Dashboard()
- default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD
+ config = config or settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD
- for widget in default_config:
+ for widget in config:
id = str(uuid.uuid4())
dashboard.layout.append({
'id': id,
diff --git a/netbox/netbox/views/misc.py b/netbox/netbox/views/misc.py
--- a/netbox/netbox/views/misc.py
+++ b/netbox/netbox/views/misc.py
@@ -2,14 +2,17 @@
from collections import namedtuple
from django.conf import settings
+from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.shortcuts import redirect, render
+from django.utils.translation import gettext_lazy as _
from django.views.generic import View
from django_tables2 import RequestConfig
from packaging import version
-from extras.dashboard.utils import get_dashboard
+from extras.constants import DEFAULT_DASHBOARD
+from extras.dashboard.utils import get_dashboard, get_default_dashboard
from netbox.forms import SearchForm
from netbox.search import LookupTypes
from netbox.search.backends import search_backend
@@ -33,7 +36,13 @@
return redirect('login')
# Construct the user's custom dashboard layout
- dashboard = get_dashboard(request.user).get_layout()
+ try:
+ dashboard = get_dashboard(request.user).get_layout()
+ except Exception:
+ messages.error(request, _(
+ "There was an error loading the dashboard configuration. A default dashboard is in use."
+ ))
+ dashboard = get_default_dashboard(config=DEFAULT_DASHBOARD).get_layout()
# Check whether a new release is available. (Only for staff/superusers.)
new_release = None
|
{"golden_diff": "diff --git a/netbox/extras/dashboard/utils.py b/netbox/extras/dashboard/utils.py\n--- a/netbox/extras/dashboard/utils.py\n+++ b/netbox/extras/dashboard/utils.py\n@@ -53,13 +53,13 @@\n return dashboard\n \n \n-def get_default_dashboard():\n+def get_default_dashboard(config=None):\n from extras.models import Dashboard\n \n dashboard = Dashboard()\n- default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n+ config = config or settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n \n- for widget in default_config:\n+ for widget in config:\n id = str(uuid.uuid4())\n dashboard.layout.append({\n 'id': id,\ndiff --git a/netbox/netbox/views/misc.py b/netbox/netbox/views/misc.py\n--- a/netbox/netbox/views/misc.py\n+++ b/netbox/netbox/views/misc.py\n@@ -2,14 +2,17 @@\n from collections import namedtuple\n \n from django.conf import settings\n+from django.contrib import messages\n from django.contrib.contenttypes.models import ContentType\n from django.core.cache import cache\n from django.shortcuts import redirect, render\n+from django.utils.translation import gettext_lazy as _\n from django.views.generic import View\n from django_tables2 import RequestConfig\n from packaging import version\n \n-from extras.dashboard.utils import get_dashboard\n+from extras.constants import DEFAULT_DASHBOARD\n+from extras.dashboard.utils import get_dashboard, get_default_dashboard\n from netbox.forms import SearchForm\n from netbox.search import LookupTypes\n from netbox.search.backends import search_backend\n@@ -33,7 +36,13 @@\n return redirect('login')\n \n # Construct the user's custom dashboard layout\n- dashboard = get_dashboard(request.user).get_layout()\n+ try:\n+ dashboard = get_dashboard(request.user).get_layout()\n+ except Exception:\n+ messages.error(request, _(\n+ \"There was an error loading the dashboard configuration. A default dashboard is in use.\"\n+ ))\n+ dashboard = get_default_dashboard(config=DEFAULT_DASHBOARD).get_layout()\n \n # Check whether a new release is available. (Only for staff/superusers.)\n new_release = None\n", "issue": "User Dashboard can become permanently broken when misconfiguring DEFAULT_DASHBOARD\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.0\n\n### Python Version\n\n3.9\n\n### Steps to Reproduce\n\n1. User johndoe modifies their dashboard over the GUI\r\n2. Administrator changes DEFAULT_DASHBOARD in configuration.py with a broken configuration (Assigned a wrong value in a bookmarks widget)\r\n3. Bookmark widget looks fine without logging in since it won't load any data\r\n4. After johndoe logs in, they reset their dashboard\r\n5. Site crashes with an Field Error Exception since the dashboard is misconfigured\r\n6. johndoe can't reset their dashboard anymore, even after the error in DEFAULT_DASHBOARD is corrected\n\n### Expected Behavior\n\nWhen resetting the dashboard, I would expect it to now always follow the changes of DEFAULT_DASHBOARD.\n\n### Observed Behavior\n\nResetting the dashboard seems to copy the current state of DEFAULT_DASHBOARD. If that state is broken, a user cannot reset it anymore.\r\nOnly the main page with the dashboard crashes.\n", "before_files": [{"content": "import uuid\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom netbox.registry import registry\nfrom extras.constants import DEFAULT_DASHBOARD\n\n__all__ = (\n 'get_dashboard',\n 'get_default_dashboard',\n 'get_widget_class',\n 'register_widget',\n)\n\n\ndef register_widget(cls):\n \"\"\"\n Decorator for registering a DashboardWidget class.\n \"\"\"\n app_label = cls.__module__.split('.', maxsplit=1)[0]\n label = f'{app_label}.{cls.__name__}'\n registry['widgets'][label] = cls\n\n return cls\n\n\ndef get_widget_class(name):\n \"\"\"\n Return a registered DashboardWidget class identified by its name.\n \"\"\"\n try:\n return registry['widgets'][name]\n except KeyError:\n raise ValueError(f\"Unregistered widget class: {name}\")\n\n\ndef get_dashboard(user):\n \"\"\"\n Return the Dashboard for a given User if one exists, or generate a default dashboard.\n \"\"\"\n if user.is_anonymous:\n dashboard = get_default_dashboard()\n else:\n try:\n dashboard = user.dashboard\n except ObjectDoesNotExist:\n # Create a dashboard for this user\n dashboard = get_default_dashboard()\n dashboard.user = user\n dashboard.save()\n\n return dashboard\n\n\ndef get_default_dashboard():\n from extras.models import Dashboard\n\n dashboard = Dashboard()\n default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n\n for widget in default_config:\n id = str(uuid.uuid4())\n dashboard.layout.append({\n 'id': id,\n 'w': widget['width'],\n 'h': widget['height'],\n 'x': widget.get('x'),\n 'y': widget.get('y'),\n })\n dashboard.config[id] = {\n 'class': widget['widget'],\n 'title': widget.get('title'),\n 'color': widget.get('color'),\n 'config': widget.get('config', {}),\n }\n\n return dashboard\n", "path": "netbox/extras/dashboard/utils.py"}, {"content": "import re\nfrom collections import namedtuple\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.cache import cache\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import View\nfrom django_tables2 import RequestConfig\nfrom packaging import version\n\nfrom extras.dashboard.utils import get_dashboard\nfrom netbox.forms import SearchForm\nfrom netbox.search import LookupTypes\nfrom netbox.search.backends import search_backend\nfrom netbox.tables import SearchTable\nfrom utilities.htmx import is_htmx\nfrom utilities.paginator import EnhancedPaginator, get_paginate_count\n\n__all__ = (\n 'HomeView',\n 'SearchView',\n)\n\nLink = namedtuple('Link', ('label', 'viewname', 'permission', 'count'))\n\n\nclass HomeView(View):\n template_name = 'home.html'\n\n def get(self, request):\n if settings.LOGIN_REQUIRED and not request.user.is_authenticated:\n return redirect('login')\n\n # Construct the user's custom dashboard layout\n dashboard = get_dashboard(request.user).get_layout()\n\n # Check whether a new release is available. (Only for staff/superusers.)\n new_release = None\n if request.user.is_staff or request.user.is_superuser:\n latest_release = cache.get('latest_release')\n if latest_release:\n release_version, release_url = latest_release\n if release_version > version.parse(settings.VERSION):\n new_release = {\n 'version': str(release_version),\n 'url': release_url,\n }\n\n return render(request, self.template_name, {\n 'dashboard': dashboard,\n 'new_release': new_release,\n })\n\n\nclass SearchView(View):\n\n def get(self, request):\n results = []\n highlight = None\n\n # Initialize search form\n form = SearchForm(request.GET) if 'q' in request.GET else SearchForm()\n\n if form.is_valid():\n\n # Restrict results by object type\n object_types = []\n for obj_type in form.cleaned_data['obj_types']:\n app_label, model_name = obj_type.split('.')\n object_types.append(ContentType.objects.get_by_natural_key(app_label, model_name))\n\n lookup = form.cleaned_data['lookup'] or LookupTypes.PARTIAL\n results = search_backend.search(\n form.cleaned_data['q'],\n user=request.user,\n object_types=object_types,\n lookup=lookup\n )\n\n # If performing a regex search, pass the highlight value as a compiled pattern\n if form.cleaned_data['lookup'] == LookupTypes.REGEX:\n try:\n highlight = re.compile(f\"({form.cleaned_data['q']})\", flags=re.IGNORECASE)\n except re.error:\n pass\n elif form.cleaned_data['lookup'] != LookupTypes.EXACT:\n highlight = form.cleaned_data['q']\n\n table = SearchTable(results, highlight=highlight)\n\n # Paginate the table results\n RequestConfig(request, {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }).configure(table)\n\n # If this is an HTMX request, return only the rendered table HTML\n if is_htmx(request):\n return render(request, 'htmx/table.html', {\n 'table': table,\n })\n\n return render(request, 'search.html', {\n 'form': form,\n 'table': table,\n })\n", "path": "netbox/netbox/views/misc.py"}]}
| 2,283 | 460 |
gh_patches_debug_38759
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-2501
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update django-filter to 1.0
## Details
Sorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it.
django-filter 1.0 has changes that are backwards incompatible. The release notes are here:
http://django-filter.readthedocs.io/en/latest/migration.html
It means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied).
Pin django-filter
The new 1.0 series is incompatible, and I've opened #2498 for this purpose.
Meanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now.
This fixes #2495 and #2490
</issue>
<code>
[start of readthedocs/builds/filters.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 import django_filters
4
5 from readthedocs.builds import constants
6 from readthedocs.builds.models import Build, Version
7
8
9 ANY_REPO = (
10 ('', _('Any')),
11 )
12
13 BUILD_TYPES = ANY_REPO + constants.BUILD_TYPES
14
15
16 class VersionSlugFilter(django_filters.FilterSet):
17
18 class Meta:
19 model = Version
20 fields = {
21 'identifier': ['icontains'],
22 'slug': ['icontains'],
23 }
24
25
26 class VersionFilter(django_filters.FilterSet):
27 project = django_filters.CharFilter(name='project__slug')
28 # Allow filtering on slug= or version=
29 slug = django_filters.CharFilter(label=_("Name"), name='slug',
30 lookup_type='exact')
31 version = django_filters.CharFilter(label=_("Version"), name='slug',
32 lookup_type='exact')
33
34 class Meta:
35 model = Version
36 fields = ['project', 'slug', 'version']
37
38
39 class BuildFilter(django_filters.FilterSet):
40 date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range')
41 type = django_filters.ChoiceFilter(label=_("Build Type"),
42 choices=BUILD_TYPES)
43
44 class Meta:
45 model = Build
46 fields = ['type', 'date', 'success']
47
[end of readthedocs/builds/filters.py]
[start of readthedocs/projects/filters.py]
1 """Project query filters"""
2
3 from django.utils.translation import ugettext_lazy as _
4
5 import django_filters
6
7 from readthedocs.projects import constants
8 from readthedocs.projects.models import Project, Domain
9
10 ANY_REPO = (
11 ('', _('Any')),
12 )
13
14 REPO_CHOICES = ANY_REPO + constants.REPO_CHOICES
15
16
17 def sort_slug(queryset, query):
18 """Fuzzy filter for slug fields
19
20 Returns sorted queryset where slug approximates ``query``
21 """
22 queryset = queryset.filter(slug__icontains=query)
23 ret = []
24 ret.extend([q.pk for q in queryset
25 if q.slug == query])
26 ret.extend([q.pk for q in queryset
27 if q.slug.startswith(query) and q.pk not in ret])
28 ret.extend([q.pk for q in queryset
29 if q.slug.endswith(query) and q.pk not in ret])
30 ret.extend([q.pk for q in queryset
31 if q.pk not in ret])
32
33 # Create a QS preserving ordering
34 clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)
35 for i, pk in enumerate(ret)])
36 ordering = 'CASE %s END' % clauses
37 ret_queryset = Project.objects.filter(pk__in=ret).extra(
38 select={'ordering': ordering}, order_by=('ordering',))
39 return ret_queryset
40
41
42 class ProjectFilter(django_filters.FilterSet):
43
44 """Project filter for filter views"""
45
46 name = django_filters.CharFilter(label=_("Name"), name='name',
47 lookup_type='icontains')
48 slug = django_filters.CharFilter(label=_("Slug"), name='slug',
49 lookup_type='icontains')
50 pub_date = django_filters.DateRangeFilter(label=_("Created Date"),
51 name="pub_date")
52 repo = django_filters.CharFilter(label=_("Repository URL"), name='repo',
53 lookup_type='icontains')
54 repo_type = django_filters.ChoiceFilter(
55 label=_("Repository Type"),
56 name='repo',
57 lookup_type='icontains',
58 choices=REPO_CHOICES,
59 )
60
61 class Meta:
62 model = Project
63 fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']
64
65
66 class DomainFilter(django_filters.FilterSet):
67 project = django_filters.CharFilter(label=_("Project"), name='project__slug',
68 lookup_type='exact')
69
70 class Meta:
71 model = Domain
72 fields = ['domain', 'project', 'canonical']
73
[end of readthedocs/projects/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py
--- a/readthedocs/builds/filters.py
+++ b/readthedocs/builds/filters.py
@@ -27,9 +27,9 @@
project = django_filters.CharFilter(name='project__slug')
# Allow filtering on slug= or version=
slug = django_filters.CharFilter(label=_("Name"), name='slug',
- lookup_type='exact')
+ lookup_expr='exact')
version = django_filters.CharFilter(label=_("Version"), name='slug',
- lookup_type='exact')
+ lookup_expr='exact')
class Meta:
model = Version
@@ -37,7 +37,7 @@
class BuildFilter(django_filters.FilterSet):
- date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range')
+ date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_expr='range')
type = django_filters.ChoiceFilter(label=_("Build Type"),
choices=BUILD_TYPES)
diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py
--- a/readthedocs/projects/filters.py
+++ b/readthedocs/projects/filters.py
@@ -44,17 +44,17 @@
"""Project filter for filter views"""
name = django_filters.CharFilter(label=_("Name"), name='name',
- lookup_type='icontains')
+ lookup_expr='icontains')
slug = django_filters.CharFilter(label=_("Slug"), name='slug',
- lookup_type='icontains')
+ lookup_expr='icontains')
pub_date = django_filters.DateRangeFilter(label=_("Created Date"),
name="pub_date")
repo = django_filters.CharFilter(label=_("Repository URL"), name='repo',
- lookup_type='icontains')
+ lookup_expr='icontains')
repo_type = django_filters.ChoiceFilter(
label=_("Repository Type"),
name='repo',
- lookup_type='icontains',
+ lookup_expr='icontains',
choices=REPO_CHOICES,
)
@@ -65,7 +65,7 @@
class DomainFilter(django_filters.FilterSet):
project = django_filters.CharFilter(label=_("Project"), name='project__slug',
- lookup_type='exact')
+ lookup_expr='exact')
class Meta:
model = Domain
|
{"golden_diff": "diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py\n--- a/readthedocs/builds/filters.py\n+++ b/readthedocs/builds/filters.py\n@@ -27,9 +27,9 @@\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Version\n@@ -37,7 +37,7 @@\n \n \n class BuildFilter(django_filters.FilterSet):\n- date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n+ date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_expr='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n \ndiff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py\n--- a/readthedocs/projects/filters.py\n+++ b/readthedocs/projects/filters.py\n@@ -44,17 +44,17 @@\n \"\"\"Project filter for filter views\"\"\"\n \n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n- lookup_type='icontains',\n+ lookup_expr='icontains',\n choices=REPO_CHOICES,\n )\n \n@@ -65,7 +65,7 @@\n \n class DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Domain\n", "issue": "Update django-filter to 1.0\n## Details\r\n\r\nSorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it.\r\n\r\ndjango-filter 1.0 has changes that are backwards incompatible. The release notes are here:\r\n\r\nhttp://django-filter.readthedocs.io/en/latest/migration.html\r\n\r\nIt means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied).\nPin django-filter\nThe new 1.0 series is incompatible, and I've opened #2498 for this purpose.\r\n\r\nMeanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now.\r\n\r\nThis fixes #2495 and #2490\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.builds import constants\nfrom readthedocs.builds.models import Build, Version\n\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nBUILD_TYPES = ANY_REPO + constants.BUILD_TYPES\n\n\nclass VersionSlugFilter(django_filters.FilterSet):\n\n class Meta:\n model = Version\n fields = {\n 'identifier': ['icontains'],\n 'slug': ['icontains'],\n }\n\n\nclass VersionFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n lookup_type='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n lookup_type='exact')\n\n class Meta:\n model = Version\n fields = ['project', 'slug', 'version']\n\n\nclass BuildFilter(django_filters.FilterSet):\n date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n\n class Meta:\n model = Build\n fields = ['type', 'date', 'success']\n", "path": "readthedocs/builds/filters.py"}, {"content": "\"\"\"Project query filters\"\"\"\n\nfrom django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, Domain\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nREPO_CHOICES = ANY_REPO + constants.REPO_CHOICES\n\n\ndef sort_slug(queryset, query):\n \"\"\"Fuzzy filter for slug fields\n\n Returns sorted queryset where slug approximates ``query``\n \"\"\"\n queryset = queryset.filter(slug__icontains=query)\n ret = []\n ret.extend([q.pk for q in queryset\n if q.slug == query])\n ret.extend([q.pk for q in queryset\n if q.slug.startswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.slug.endswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.pk not in ret])\n\n # Create a QS preserving ordering\n clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)\n for i, pk in enumerate(ret)])\n ordering = 'CASE %s END' % clauses\n ret_queryset = Project.objects.filter(pk__in=ret).extra(\n select={'ordering': ordering}, order_by=('ordering',))\n return ret_queryset\n\n\nclass ProjectFilter(django_filters.FilterSet):\n\n \"\"\"Project filter for filter views\"\"\"\n\n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n lookup_type='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n lookup_type='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n lookup_type='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n lookup_type='icontains',\n choices=REPO_CHOICES,\n )\n\n class Meta:\n model = Project\n fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']\n\n\nclass DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n lookup_type='exact')\n\n class Meta:\n model = Domain\n fields = ['domain', 'project', 'canonical']\n", "path": "readthedocs/projects/filters.py"}]}
| 1,801 | 514 |
gh_patches_debug_11048
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-2986
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KFP 0.2.1 SDK client broken when inverse proxy host is used
This bug is introduced in #2896
Under the hood `subprocess.check_output()` return bytes instead of string. However in `kfp_server_api/configuration.py` it was assumed to be string, which caused error like the following:
```
~/.local/lib/python3.5/site-packages/kfp_server_api/configuration.py in get_api_key_with_prefix(self, identifier)
203 if (self.api_key.get(identifier) and
204 self.api_key_prefix.get(identifier)):
--> 205 return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
206 elif self.api_key.get(identifier):
207 return self.api_key[identifier]
TypeError: Can't convert 'bytes' object to str implicitly
```
This impacts some CUJs involving TFX CLI and CAIP notebook experiences. Will send out a fix today.
</issue>
<code>
[start of sdk/python/kfp/_auth.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 import os
17 import subprocess
18 import google.auth
19 import google.auth.app_engine
20 import google.auth.compute_engine.credentials
21 import google.auth.iam
22 from google.auth.transport.requests import Request
23 import google.oauth2.credentials
24 import google.oauth2.service_account
25 import requests_toolbelt.adapters.appengine
26 from webbrowser import open_new_tab
27 import requests
28 import json
29
30 IAM_SCOPE = 'https://www.googleapis.com/auth/iam'
31 OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
32 LOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')
33
34 def get_gcp_access_token():
35 """Get and return GCP access token for the current Application Default
36 Credentials. If not set, returns None. For more information, see
37 https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token
38 """
39 args = ['gcloud', 'auth', 'print-access-token']
40 return subprocess.check_output(args).rstrip()
41
42 def get_auth_token(client_id, other_client_id, other_client_secret):
43 """Gets auth token from default service account or user account."""
44 if os.path.exists(LOCAL_KFP_CREDENTIAL):
45 # fetch IAP auth token using the locally stored credentials.
46 with open(LOCAL_KFP_CREDENTIAL, 'r') as f:
47 credentials = json.load(f)
48 if client_id in credentials:
49 return id_token_from_refresh_token(credentials[client_id]['other_client_id'],
50 credentials[client_id]['other_client_secret'],
51 credentials[client_id]['refresh_token'],
52 client_id)
53 if other_client_id is None or other_client_secret is None:
54 # fetch IAP auth token: service accounts
55 token = get_auth_token_from_sa(client_id)
56 else:
57 # fetch IAP auth token: user account
58 # Obtain the ID token for provided Client ID with user accounts.
59 # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token
60 refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)
61 credentials = {}
62 if os.path.exists(LOCAL_KFP_CREDENTIAL):
63 with open(LOCAL_KFP_CREDENTIAL, 'r') as f:
64 credentials = json.load(f)
65 credentials[client_id] = {}
66 credentials[client_id]['other_client_id'] = other_client_id
67 credentials[client_id]['other_client_secret'] = other_client_secret
68 credentials[client_id]['refresh_token'] = refresh_token
69 #TODO: handle the case when the refresh_token expires.
70 # which only happens if the refresh_token is not used once for six months.
71 if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):
72 os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))
73 with open(LOCAL_KFP_CREDENTIAL, 'w') as f:
74 json.dump(credentials, f)
75 token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)
76 return token
77
78 def get_auth_token_from_sa(client_id):
79 """Gets auth token from default service account.
80
81 If no service account credential is found, returns None.
82 """
83 service_account_credentials = get_service_account_credentials(client_id)
84 if service_account_credentials:
85 return get_google_open_id_connect_token(service_account_credentials)
86 return None
87
88 def get_service_account_credentials(client_id):
89 # Figure out what environment we're running in and get some preliminary
90 # information about the service account.
91 bootstrap_credentials, _ = google.auth.default(
92 scopes=[IAM_SCOPE])
93 if isinstance(bootstrap_credentials,
94 google.oauth2.credentials.Credentials):
95 logging.info('Found OAuth2 credentials and skip SA auth.')
96 return None
97 elif isinstance(bootstrap_credentials,
98 google.auth.app_engine.Credentials):
99 requests_toolbelt.adapters.appengine.monkeypatch()
100
101 # For service account's using the Compute Engine metadata service,
102 # service_account_email isn't available until refresh is called.
103 bootstrap_credentials.refresh(Request())
104 signer_email = bootstrap_credentials.service_account_email
105 if isinstance(bootstrap_credentials,
106 google.auth.compute_engine.credentials.Credentials):
107 # Since the Compute Engine metadata service doesn't expose the service
108 # account key, we use the IAM signBlob API to sign instead.
109 # In order for this to work:
110 #
111 # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.
112 # You can specify this specific scope when creating a VM
113 # through the API or gcloud. When using Cloud Console,
114 # you'll need to specify the "full access to all Cloud APIs"
115 # scope. A VM's scopes can only be specified at creation time.
116 #
117 # 2. The VM's default service account needs the "Service Account Actor"
118 # role. This can be found under the "Project" category in Cloud
119 # Console, or roles/iam.serviceAccountActor in gcloud.
120 signer = google.auth.iam.Signer(
121 Request(), bootstrap_credentials, signer_email)
122 else:
123 # A Signer object can sign a JWT using the service account's key.
124 signer = bootstrap_credentials.signer
125
126 # Construct OAuth 2.0 service account credentials using the signer
127 # and email acquired from the bootstrap credentials.
128 return google.oauth2.service_account.Credentials(
129 signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={
130 'target_audience': client_id
131 })
132
133 def get_google_open_id_connect_token(service_account_credentials):
134 """Get an OpenID Connect token issued by Google for the service account.
135 This function:
136 1. Generates a JWT signed with the service account's private key
137 containing a special "target_audience" claim.
138 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1
139 has a target_audience claim, that endpoint will respond with
140 an OpenID Connect token for the service account -- in other words,
141 a JWT signed by *Google*. The aud claim in this JWT will be
142 set to the value from the target_audience claim in #1.
143 For more information, see
144 https://developers.google.com/identity/protocols/OAuth2ServiceAccount .
145 The HTTP/REST example on that page describes the JWT structure and
146 demonstrates how to call the token endpoint. (The example on that page
147 shows how to get an OAuth2 access token; this code is using a
148 modified version of it to get an OpenID Connect token.)
149 """
150
151 service_account_jwt = (
152 service_account_credentials._make_authorization_grant_assertion())
153 request = google.auth.transport.requests.Request()
154 body = {
155 'assertion': service_account_jwt,
156 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,
157 }
158 token_response = google.oauth2._client._token_endpoint_request(
159 request, OAUTH_TOKEN_URI, body)
160 return token_response['id_token']
161
162 def get_refresh_token_from_client_id(client_id, client_secret):
163 """Obtain the ID token for provided Client ID with user accounts.
164 Flow: get authorization code -> exchange for refresh token -> obtain and return ID token
165 """
166 auth_code = get_auth_code(client_id)
167 return get_refresh_token_from_code(auth_code, client_id, client_secret)
168
169 def get_auth_code(client_id):
170 auth_url = "https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob"%client_id
171 print(auth_url)
172 open_new_tab(auth_url)
173 return input("Authorization code: ")
174
175 def get_refresh_token_from_code(auth_code, client_id, client_secret):
176 payload = {"code": auth_code, "client_id": client_id, "client_secret": client_secret,
177 "redirect_uri": "urn:ietf:wg:oauth:2.0:oob", "grant_type": "authorization_code"}
178 res = requests.post(OAUTH_TOKEN_URI, data=payload)
179 return (str(json.loads(res.text)[u"refresh_token"]))
180
181 def id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):
182 payload = {"client_id": client_id, "client_secret": client_secret,
183 "refresh_token": refresh_token, "grant_type": "refresh_token",
184 "audience": audience}
185 res = requests.post(OAUTH_TOKEN_URI, data=payload)
186 return (str(json.loads(res.text)[u"id_token"]))
187
[end of sdk/python/kfp/_auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py
--- a/sdk/python/kfp/_auth.py
+++ b/sdk/python/kfp/_auth.py
@@ -37,7 +37,8 @@
https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token
"""
args = ['gcloud', 'auth', 'print-access-token']
- return subprocess.check_output(args).rstrip()
+ # Casting to string to accommodate API server request schema.
+ return subprocess.check_output(args).rstrip().decode("utf-8")
def get_auth_token(client_id, other_client_id, other_client_secret):
"""Gets auth token from default service account or user account."""
|
{"golden_diff": "diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py\n--- a/sdk/python/kfp/_auth.py\n+++ b/sdk/python/kfp/_auth.py\n@@ -37,7 +37,8 @@\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n args = ['gcloud', 'auth', 'print-access-token']\n- return subprocess.check_output(args).rstrip()\n+ # Casting to string to accommodate API server request schema.\n+ return subprocess.check_output(args).rstrip().decode(\"utf-8\")\n \n def get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n", "issue": "KFP 0.2.1 SDK client broken when inverse proxy host is used\nThis bug is introduced in #2896 \r\n\r\nUnder the hood `subprocess.check_output()` return bytes instead of string. However in `kfp_server_api/configuration.py` it was assumed to be string, which caused error like the following:\r\n\r\n```\r\n~/.local/lib/python3.5/site-packages/kfp_server_api/configuration.py in get_api_key_with_prefix(self, identifier)\r\n 203 if (self.api_key.get(identifier) and\r\n 204 self.api_key_prefix.get(identifier)):\r\n--> 205 return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501\r\n 206 elif self.api_key.get(identifier):\r\n 207 return self.api_key[identifier]\r\n\r\nTypeError: Can't convert 'bytes' object to str implicitly\r\n```\r\nThis impacts some CUJs involving TFX CLI and CAIP notebook experiences. Will send out a fix today.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport subprocess\nimport google.auth\nimport google.auth.app_engine\nimport google.auth.compute_engine.credentials\nimport google.auth.iam\nfrom google.auth.transport.requests import Request\nimport google.oauth2.credentials\nimport google.oauth2.service_account\nimport requests_toolbelt.adapters.appengine\nfrom webbrowser import open_new_tab\nimport requests\nimport json\n\nIAM_SCOPE = 'https://www.googleapis.com/auth/iam'\nOAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'\nLOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')\n\ndef get_gcp_access_token():\n \"\"\"Get and return GCP access token for the current Application Default\n Credentials. If not set, returns None. For more information, see\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n args = ['gcloud', 'auth', 'print-access-token']\n return subprocess.check_output(args).rstrip()\n\ndef get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n # fetch IAP auth token using the locally stored credentials.\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n if client_id in credentials:\n return id_token_from_refresh_token(credentials[client_id]['other_client_id'],\n credentials[client_id]['other_client_secret'],\n credentials[client_id]['refresh_token'],\n client_id)\n if other_client_id is None or other_client_secret is None:\n # fetch IAP auth token: service accounts\n token = get_auth_token_from_sa(client_id)\n else:\n # fetch IAP auth token: user account\n # Obtain the ID token for provided Client ID with user accounts.\n # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)\n credentials = {}\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n credentials[client_id] = {}\n credentials[client_id]['other_client_id'] = other_client_id\n credentials[client_id]['other_client_secret'] = other_client_secret\n credentials[client_id]['refresh_token'] = refresh_token\n #TODO: handle the case when the refresh_token expires.\n # which only happens if the refresh_token is not used once for six months.\n if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):\n os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))\n with open(LOCAL_KFP_CREDENTIAL, 'w') as f:\n json.dump(credentials, f)\n token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)\n return token\n\ndef get_auth_token_from_sa(client_id):\n \"\"\"Gets auth token from default service account.\n\n If no service account credential is found, returns None.\n \"\"\"\n service_account_credentials = get_service_account_credentials(client_id)\n if service_account_credentials:\n return get_google_open_id_connect_token(service_account_credentials)\n return None\n\ndef get_service_account_credentials(client_id):\n # Figure out what environment we're running in and get some preliminary\n # information about the service account.\n bootstrap_credentials, _ = google.auth.default(\n scopes=[IAM_SCOPE])\n if isinstance(bootstrap_credentials,\n google.oauth2.credentials.Credentials):\n logging.info('Found OAuth2 credentials and skip SA auth.')\n return None\n elif isinstance(bootstrap_credentials,\n google.auth.app_engine.Credentials):\n requests_toolbelt.adapters.appengine.monkeypatch()\n\n # For service account's using the Compute Engine metadata service,\n # service_account_email isn't available until refresh is called.\n bootstrap_credentials.refresh(Request())\n signer_email = bootstrap_credentials.service_account_email\n if isinstance(bootstrap_credentials,\n google.auth.compute_engine.credentials.Credentials):\n # Since the Compute Engine metadata service doesn't expose the service\n # account key, we use the IAM signBlob API to sign instead.\n # In order for this to work:\n #\n # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.\n # You can specify this specific scope when creating a VM\n # through the API or gcloud. When using Cloud Console,\n # you'll need to specify the \"full access to all Cloud APIs\"\n # scope. A VM's scopes can only be specified at creation time.\n #\n # 2. The VM's default service account needs the \"Service Account Actor\"\n # role. This can be found under the \"Project\" category in Cloud\n # Console, or roles/iam.serviceAccountActor in gcloud.\n signer = google.auth.iam.Signer(\n Request(), bootstrap_credentials, signer_email)\n else:\n # A Signer object can sign a JWT using the service account's key.\n signer = bootstrap_credentials.signer\n\n # Construct OAuth 2.0 service account credentials using the signer\n # and email acquired from the bootstrap credentials.\n return google.oauth2.service_account.Credentials(\n signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={\n 'target_audience': client_id\n })\n\ndef get_google_open_id_connect_token(service_account_credentials):\n \"\"\"Get an OpenID Connect token issued by Google for the service account.\n This function:\n 1. Generates a JWT signed with the service account's private key\n containing a special \"target_audience\" claim.\n 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1\n has a target_audience claim, that endpoint will respond with\n an OpenID Connect token for the service account -- in other words,\n a JWT signed by *Google*. The aud claim in this JWT will be\n set to the value from the target_audience claim in #1.\n For more information, see\n https://developers.google.com/identity/protocols/OAuth2ServiceAccount .\n The HTTP/REST example on that page describes the JWT structure and\n demonstrates how to call the token endpoint. (The example on that page\n shows how to get an OAuth2 access token; this code is using a\n modified version of it to get an OpenID Connect token.)\n \"\"\"\n\n service_account_jwt = (\n service_account_credentials._make_authorization_grant_assertion())\n request = google.auth.transport.requests.Request()\n body = {\n 'assertion': service_account_jwt,\n 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,\n }\n token_response = google.oauth2._client._token_endpoint_request(\n request, OAUTH_TOKEN_URI, body)\n return token_response['id_token']\n\ndef get_refresh_token_from_client_id(client_id, client_secret):\n \"\"\"Obtain the ID token for provided Client ID with user accounts.\n Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n \"\"\"\n auth_code = get_auth_code(client_id)\n return get_refresh_token_from_code(auth_code, client_id, client_secret)\n\ndef get_auth_code(client_id):\n auth_url = \"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob\"%client_id\n print(auth_url)\n open_new_tab(auth_url)\n return input(\"Authorization code: \")\n\ndef get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"refresh_token\"]))\n\ndef id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"id_token\"]))\n", "path": "sdk/python/kfp/_auth.py"}]}
| 3,166 | 154 |
gh_patches_debug_25884
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-2089
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: bing engine crashes sometimes
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Repository: https://github.com/searxng/searxng
Branch: master
Version: 2023.01.08-4e735b28
**How did you install SearXNG?**
Docker
**What happened?**
Bing searches sometimes don't work.
**How To Reproduce**
`!bi certbot` (does not work at all for me)
`!bi current events` (works ~50% of the time)
**Expected behavior**
Search works
**Screenshots & Logs**
```
2023-01-08 13:07:27,885 WARNING:searx.engines.bing: ErrorContext('searx/engines/bing.py', 89, "link = eval_xpath(result, './/h2/a')[0]", 'IndexError', None, ()) False
2023-01-08 13:07:27,885 ERROR:searx.engines.bing: exception : list index out of range
Traceback (most recent call last):
File "/usr/local/searxng/searx/search/processors/online.py", line 160, in search
search_results = self._search_basic(query, params)
File "/usr/local/searxng/searx/search/processors/online.py", line 148, in _search_basic
return self.engine.response(response)
File "/usr/local/searxng/searx/engines/bing.py", line 89, in response
link = eval_xpath(result, './/h2/a')[0]
IndexError: list index out of range
```
**Additional context**
Many searches *do* work.
Reproducible on some*, but not most, public instances running `2023.01.08-4e735b28`. At least using the specific repro searches above.
\* https://search.rhscz.eu , https://xo.wtf , out of the 7 running the current version
</issue>
<code>
[start of searx/engines/bing.py]
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 # lint: pylint
3 """Bing (Web)
4
5 - https://github.com/searx/searx/issues/2019#issuecomment-648227442
6 """
7 # pylint: disable=too-many-branches
8
9 import re
10 from urllib.parse import urlencode, urlparse, parse_qs
11 from lxml import html
12 from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language
13 from searx.network import multi_requests, Request
14
15 about = {
16 "website": 'https://www.bing.com',
17 "wikidata_id": 'Q182496',
18 "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',
19 "use_official_api": False,
20 "require_api_key": False,
21 "results": 'HTML',
22 }
23
24 # engine dependent config
25 categories = ['general', 'web']
26 paging = True
27 time_range_support = False
28 safesearch = False
29 send_accept_language_header = True
30 supported_languages_url = 'https://www.bing.com/account/general'
31 language_aliases = {}
32
33 # search-url
34 base_url = 'https://www.bing.com/'
35
36 # initial query: https://www.bing.com/search?q=foo&search=&form=QBLH
37 inital_query = 'search?{query}&search=&form=QBLH'
38
39 # following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE
40 page_query = 'search?{query}&search=&first={offset}&FORM=PERE'
41
42
43 def _get_offset_from_pageno(pageno):
44 return (pageno - 1) * 10 + 1
45
46
47 def request(query, params):
48
49 offset = _get_offset_from_pageno(params.get('pageno', 1))
50
51 # logger.debug("params['pageno'] --> %s", params.get('pageno'))
52 # logger.debug(" offset --> %s", offset)
53
54 search_string = page_query
55 if offset == 1:
56 search_string = inital_query
57
58 if params['language'] == 'all':
59 lang = 'EN'
60 else:
61 lang = match_language(params['language'], supported_languages, language_aliases)
62
63 query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)
64
65 search_path = search_string.format(query=urlencode({'q': query}), offset=offset)
66
67 if offset > 1:
68 referer = base_url + inital_query.format(query=urlencode({'q': query}))
69 params['headers']['Referer'] = referer
70 logger.debug("headers.Referer --> %s", referer)
71
72 params['url'] = base_url + search_path
73 params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
74 return params
75
76
77 def response(resp):
78 results = []
79 result_len = 0
80
81 dom = html.fromstring(resp.text)
82
83 # parse results again if nothing is found yet
84
85 url_to_resolve = []
86 url_to_resolve_index = []
87 for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, "b_algo")]')):
88
89 link = eval_xpath(result, './/h2/a')[0]
90 url = link.attrib.get('href')
91 title = extract_text(link)
92
93 # Make sure that the element is free of <a href> links and <span class='algoSlug_icon'>
94 content = eval_xpath(result, '(.//p)[1]')
95 for p in content:
96 for e in p.xpath('.//a'):
97 e.getparent().remove(e)
98 for e in p.xpath('.//span[@class="algoSlug_icon"]'):
99 e.getparent().remove(e)
100 content = extract_text(content)
101
102 # get the real URL either using the URL shown to user or following the Bing URL
103 if url.startswith('https://www.bing.com/ck/a?'):
104 url_cite = extract_text(eval_xpath(result, './/div[@class="b_attribution"]/cite'))
105 # Bing can shorten the URL either at the end or in the middle of the string
106 if (
107 url_cite.startswith('https://')
108 and '…' not in url_cite
109 and '...' not in url_cite
110 and '›' not in url_cite
111 ):
112 # no need for an additional HTTP request
113 url = url_cite
114 else:
115 # resolve the URL with an additional HTTP request
116 url_to_resolve.append(url.replace('&ntb=1', '&ntb=F'))
117 url_to_resolve_index.append(i)
118 url = None # remove the result if the HTTP Bing redirect raise an exception
119
120 # append result
121 results.append({'url': url, 'title': title, 'content': content})
122
123 # resolve all Bing redirections in parallel
124 request_list = [
125 Request.get(u, allow_redirects=False, headers=resp.search_params['headers']) for u in url_to_resolve
126 ]
127 response_list = multi_requests(request_list)
128 for i, redirect_response in enumerate(response_list):
129 if not isinstance(redirect_response, Exception):
130 results[url_to_resolve_index[i]]['url'] = redirect_response.headers['location']
131
132 # get number_of_results
133 try:
134 result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
135 if "-" in result_len_container:
136
137 # Remove the part "from-to" for paginated request ...
138 result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :]
139
140 result_len_container = re.sub('[^0-9]', '', result_len_container)
141
142 if len(result_len_container) > 0:
143 result_len = int(result_len_container)
144
145 except Exception as e: # pylint: disable=broad-except
146 logger.debug('result error :\n%s', e)
147
148 if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
149 return []
150
151 results.append({'number_of_results': result_len})
152 return results
153
154
155 # get supported languages from their site
156 def _fetch_supported_languages(resp):
157
158 lang_tags = set()
159
160 dom = html.fromstring(resp.text)
161 lang_links = eval_xpath(dom, '//div[@id="language-section"]//li')
162
163 for _li in lang_links:
164
165 href = eval_xpath(_li, './/@href')[0]
166 (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)
167 query = parse_qs(query, keep_blank_values=True)
168
169 # fmt: off
170 setlang = query.get('setlang', [None, ])[0]
171 # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN']
172 lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip
173 # fmt: on
174
175 tag = lang + '-' + nation if nation else lang
176 lang_tags.add(tag)
177
178 return list(lang_tags)
179
[end of searx/engines/bing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -9,7 +9,7 @@
import re
from urllib.parse import urlencode, urlparse, parse_qs
from lxml import html
-from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language
+from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language, eval_xpath_getindex
from searx.network import multi_requests, Request
about = {
@@ -84,9 +84,12 @@
url_to_resolve = []
url_to_resolve_index = []
- for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, "b_algo")]')):
+ i = 0
+ for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'):
- link = eval_xpath(result, './/h2/a')[0]
+ link = eval_xpath_getindex(result, './/h2/a', 0, None)
+ if link is None:
+ continue
url = link.attrib.get('href')
title = extract_text(link)
@@ -119,6 +122,8 @@
# append result
results.append({'url': url, 'title': title, 'content': content})
+ # increment result pointer for the next iteration in this loop
+ i += 1
# resolve all Bing redirections in parallel
request_list = [
|
{"golden_diff": "diff --git a/searx/engines/bing.py b/searx/engines/bing.py\n--- a/searx/engines/bing.py\n+++ b/searx/engines/bing.py\n@@ -9,7 +9,7 @@\n import re\n from urllib.parse import urlencode, urlparse, parse_qs\n from lxml import html\n-from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language\n+from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language, eval_xpath_getindex\n from searx.network import multi_requests, Request\n \n about = {\n@@ -84,9 +84,12 @@\n \n url_to_resolve = []\n url_to_resolve_index = []\n- for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, \"b_algo\")]')):\n+ i = 0\n+ for result in eval_xpath_list(dom, '//ol[@id=\"b_results\"]/li[contains(@class, \"b_algo\")]'):\n \n- link = eval_xpath(result, './/h2/a')[0]\n+ link = eval_xpath_getindex(result, './/h2/a', 0, None)\n+ if link is None:\n+ continue\n url = link.attrib.get('href')\n title = extract_text(link)\n \n@@ -119,6 +122,8 @@\n \n # append result\n results.append({'url': url, 'title': title, 'content': content})\n+ # increment result pointer for the next iteration in this loop\n+ i += 1\n \n # resolve all Bing redirections in parallel\n request_list = [\n", "issue": "Bug: bing engine crashes sometimes\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.01.08-4e735b28\r\n\r\n**How did you install SearXNG?**\r\nDocker\r\n**What happened?**\r\nBing searches sometimes don't work.\r\n\r\n**How To Reproduce**\r\n`!bi certbot` (does not work at all for me) \r\n`!bi current events` (works ~50% of the time)\r\n\r\n**Expected behavior**\r\nSearch works\r\n\r\n**Screenshots & Logs**\r\n```\r\n2023-01-08 13:07:27,885 WARNING:searx.engines.bing: ErrorContext('searx/engines/bing.py', 89, \"link = eval_xpath(result, './/h2/a')[0]\", 'IndexError', None, ()) False\r\n2023-01-08 13:07:27,885 ERROR:searx.engines.bing: exception : list index out of range\r\nTraceback (most recent call last):\r\n File \"/usr/local/searxng/searx/search/processors/online.py\", line 160, in search\r\n search_results = self._search_basic(query, params)\r\n File \"/usr/local/searxng/searx/search/processors/online.py\", line 148, in _search_basic\r\n return self.engine.response(response)\r\n File \"/usr/local/searxng/searx/engines/bing.py\", line 89, in response\r\n link = eval_xpath(result, './/h2/a')[0]\r\nIndexError: list index out of range\r\n```\r\n\r\n**Additional context**\r\nMany searches *do* work.\r\n\r\nReproducible on some*, but not most, public instances running `2023.01.08-4e735b28`. At least using the specific repro searches above.\r\n\r\n\\* https://search.rhscz.eu , https://xo.wtf , out of the 7 running the current version\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bing (Web)\n\n- https://github.com/searx/searx/issues/2019#issuecomment-648227442\n\"\"\"\n# pylint: disable=too-many-branches\n\nimport re\nfrom urllib.parse import urlencode, urlparse, parse_qs\nfrom lxml import html\nfrom searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language\nfrom searx.network import multi_requests, Request\n\nabout = {\n \"website\": 'https://www.bing.com',\n \"wikidata_id\": 'Q182496',\n \"official_api_documentation\": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = False\nsafesearch = False\nsend_accept_language_header = True\nsupported_languages_url = 'https://www.bing.com/account/general'\nlanguage_aliases = {}\n\n# search-url\nbase_url = 'https://www.bing.com/'\n\n# initial query: https://www.bing.com/search?q=foo&search=&form=QBLH\ninital_query = 'search?{query}&search=&form=QBLH'\n\n# following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE\npage_query = 'search?{query}&search=&first={offset}&FORM=PERE'\n\n\ndef _get_offset_from_pageno(pageno):\n return (pageno - 1) * 10 + 1\n\n\ndef request(query, params):\n\n offset = _get_offset_from_pageno(params.get('pageno', 1))\n\n # logger.debug(\"params['pageno'] --> %s\", params.get('pageno'))\n # logger.debug(\" offset --> %s\", offset)\n\n search_string = page_query\n if offset == 1:\n search_string = inital_query\n\n if params['language'] == 'all':\n lang = 'EN'\n else:\n lang = match_language(params['language'], supported_languages, language_aliases)\n\n query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)\n\n search_path = search_string.format(query=urlencode({'q': query}), offset=offset)\n\n if offset > 1:\n referer = base_url + inital_query.format(query=urlencode({'q': query}))\n params['headers']['Referer'] = referer\n logger.debug(\"headers.Referer --> %s\", referer)\n\n params['url'] = base_url + search_path\n params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n return params\n\n\ndef response(resp):\n results = []\n result_len = 0\n\n dom = html.fromstring(resp.text)\n\n # parse results again if nothing is found yet\n\n url_to_resolve = []\n url_to_resolve_index = []\n for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, \"b_algo\")]')):\n\n link = eval_xpath(result, './/h2/a')[0]\n url = link.attrib.get('href')\n title = extract_text(link)\n\n # Make sure that the element is free of <a href> links and <span class='algoSlug_icon'>\n content = eval_xpath(result, '(.//p)[1]')\n for p in content:\n for e in p.xpath('.//a'):\n e.getparent().remove(e)\n for e in p.xpath('.//span[@class=\"algoSlug_icon\"]'):\n e.getparent().remove(e)\n content = extract_text(content)\n\n # get the real URL either using the URL shown to user or following the Bing URL\n if url.startswith('https://www.bing.com/ck/a?'):\n url_cite = extract_text(eval_xpath(result, './/div[@class=\"b_attribution\"]/cite'))\n # Bing can shorten the URL either at the end or in the middle of the string\n if (\n url_cite.startswith('https://')\n and '\u2026' not in url_cite\n and '...' not in url_cite\n and '\u203a' not in url_cite\n ):\n # no need for an additional HTTP request\n url = url_cite\n else:\n # resolve the URL with an additional HTTP request\n url_to_resolve.append(url.replace('&ntb=1', '&ntb=F'))\n url_to_resolve_index.append(i)\n url = None # remove the result if the HTTP Bing redirect raise an exception\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n # resolve all Bing redirections in parallel\n request_list = [\n Request.get(u, allow_redirects=False, headers=resp.search_params['headers']) for u in url_to_resolve\n ]\n response_list = multi_requests(request_list)\n for i, redirect_response in enumerate(response_list):\n if not isinstance(redirect_response, Exception):\n results[url_to_resolve_index[i]]['url'] = redirect_response.headers['location']\n\n # get number_of_results\n try:\n result_len_container = \"\".join(eval_xpath(dom, '//span[@class=\"sb_count\"]//text()'))\n if \"-\" in result_len_container:\n\n # Remove the part \"from-to\" for paginated request ...\n result_len_container = result_len_container[result_len_container.find(\"-\") * 2 + 2 :]\n\n result_len_container = re.sub('[^0-9]', '', result_len_container)\n\n if len(result_len_container) > 0:\n result_len = int(result_len_container)\n\n except Exception as e: # pylint: disable=broad-except\n logger.debug('result error :\\n%s', e)\n\n if result_len and _get_offset_from_pageno(resp.search_params.get(\"pageno\", 0)) > result_len:\n return []\n\n results.append({'number_of_results': result_len})\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n lang_tags = set()\n\n dom = html.fromstring(resp.text)\n lang_links = eval_xpath(dom, '//div[@id=\"language-section\"]//li')\n\n for _li in lang_links:\n\n href = eval_xpath(_li, './/@href')[0]\n (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)\n query = parse_qs(query, keep_blank_values=True)\n\n # fmt: off\n setlang = query.get('setlang', [None, ])[0]\n # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN']\n lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip\n # fmt: on\n\n tag = lang + '-' + nation if nation else lang\n lang_tags.add(tag)\n\n return list(lang_tags)\n", "path": "searx/engines/bing.py"}]}
| 3,051 | 361 |
gh_patches_debug_39315
|
rasdani/github-patches
|
git_diff
|
pypa__pipenv-2641
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pipenv shell does not work when running Git for Windows' bash on Windows
##### Issue description
Running `pipenv shell` on windows when running bash in Git for Windows does not activate the virtual env successfully. It appears the actual activate script with the correct values is not being used.
##### Expected result
I would successfully be in the virtualenv
##### Actual result
The `VIRTUAL_ENV` and `PATH` variables are in the `C:` notation instead of `/c/` notation which would have worked.
```
declare -x VIRTUAL_ENV="C:\\Users\\user\\.virtualenvs\\project-OJ2s3Ey8"
declare -x PATH="C:/Users/user/.virtualenvs/project-OJ2s3Ey8/Scripts:/c/Users/user/bin:/mingw64/bin:/usr/local/bin:/usr/bin:/usr/bin:/mingw64/bin:/usr/bin:/c/Users/andy/bin:/c/Python37/Scripts:/c/Python37" etc...
```
Sourcing the virtualenv activate variable does work, even though Git for Windows uses `msys` and not `cygwin`. Did not test cygwin
##### Steps to replicate
1. `pipenv install sphinx`
1. `echo $$`
1. `pipenv shell`
1. `echo $$`
1. `pip freeze`
1. `echo $PATH`
Shows that the pip environment is the original python environment, not the virtualenv. The different pid verifies that a new shell is indeed being run, but the virtualenv is not activating
Also tried
1. `PIPENV_EMULATOR=bash pipenv shell`
Identical results
<details><summary>$ pipenv --support</summary>
Pipenv version: `'2018.7.1'`
Pipenv location: `'c:\\python37\\lib\\site-packages\\pipenv'`
Python location: `'c:\\python37\\python.exe'`
Other Python installations in `PATH`:
- `3.7`: `C:\Python37\python.exe`
- `3.7.0`: `C:\Python37\python.exe`
- `3.7.0`: `C:\Windows\py.exe`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '3.7.0',
'os_name': 'nt',
'platform_machine': 'AMD64',
'platform_python_implementation': 'CPython',
'platform_release': '10',
'platform_system': 'Windows',
'platform_version': '10.0.17134',
'python_full_version': '3.7.0',
'python_version': '3.7',
'sys_platform': 'win32'}
```
</issue>
<code>
[start of pipenv/shells.py]
1 import collections
2 import contextlib
3 import os
4 import signal
5 import subprocess
6 import sys
7
8 from ._compat import get_terminal_size, Path
9 from .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR
10 from .utils import temp_environ
11 from .vendor import shellingham
12
13
14 ShellDetectionFailure = shellingham.ShellDetectionFailure
15
16
17 def _build_info(value):
18 return (os.path.splitext(os.path.basename(value))[0], value)
19
20
21 def detect_info():
22 if PIPENV_SHELL_EXPLICIT:
23 return _build_info(PIPENV_SHELL_EXPLICIT)
24 try:
25 return shellingham.detect_shell()
26 except (shellingham.ShellDetectionFailure, TypeError):
27 if PIPENV_SHELL:
28 return _build_info(PIPENV_SHELL)
29 raise ShellDetectionFailure
30
31
32 def _get_activate_script(venv):
33 """Returns the string to activate a virtualenv.
34
35 This is POSIX-only at the moment since the compat (pexpect-based) shell
36 does not work elsewhere anyway.
37 """
38 # Suffix and source command for other shells.
39 # Support for fish shell.
40 if PIPENV_SHELL and "fish" in PIPENV_SHELL:
41 suffix = ".fish"
42 command = "source"
43 # Support for csh shell.
44 elif PIPENV_SHELL and "csh" in PIPENV_SHELL:
45 suffix = ".csh"
46 command = "source"
47 else:
48 suffix = ""
49 command = "."
50 # Escape any spaces located within the virtualenv path to allow
51 # for proper activation.
52 venv_location = str(venv).replace(" ", r"\ ")
53 # The leading space can make history cleaner in some shells.
54 return " {2} {0}/bin/activate{1}".format(venv_location, suffix, command)
55
56
57 def _handover(cmd, args):
58 args = [cmd] + args
59 if os.name != "nt":
60 os.execvp(cmd, args)
61 else:
62 sys.exit(subprocess.call(args, shell=True, universal_newlines=True))
63
64
65 class Shell(object):
66 def __init__(self, cmd):
67 self.cmd = cmd
68 self.args = []
69
70 def __repr__(self):
71 return '{type}(cmd={cmd!r})'.format(
72 type=type(self).__name__,
73 cmd=self.cmd,
74 )
75
76 @contextlib.contextmanager
77 def inject_path(self, venv):
78 with temp_environ():
79 os.environ["PATH"] = "{0}{1}{2}".format(
80 os.pathsep.join(str(p.parent) for p in _iter_python(venv)),
81 os.pathsep,
82 os.environ["PATH"],
83 )
84 yield
85
86 def fork(self, venv, cwd, args):
87 # FIXME: This isn't necessarily the correct prompt. We should read the
88 # actual prompt by peeking into the activation script.
89 name = os.path.basename(venv)
90 os.environ["VIRTUAL_ENV"] = str(venv)
91 if "PROMPT" in os.environ:
92 os.environ["PROMPT"] = "({0}) {1}".format(name, os.environ["PROMPT"])
93 if "PS1" in os.environ:
94 os.environ["PS1"] = "({0}) {1}".format(name, os.environ["PS1"])
95 with self.inject_path(venv):
96 os.chdir(cwd)
97 _handover(self.cmd, self.args + list(args))
98
99 def fork_compat(self, venv, cwd, args):
100 from .vendor import pexpect
101
102 # Grab current terminal dimensions to replace the hardcoded default
103 # dimensions of pexpect.
104 dims = get_terminal_size()
105 with temp_environ():
106 c = pexpect.spawn(self.cmd, ["-i"], dimensions=(dims.lines, dims.columns))
107 c.sendline(_get_activate_script(venv))
108 if args:
109 c.sendline(" ".join(args))
110
111 # Handler for terminal resizing events
112 # Must be defined here to have the shell process in its context, since
113 # we can't pass it as an argument
114 def sigwinch_passthrough(sig, data):
115 dims = get_terminal_size()
116 c.setwinsize(dims.lines, dims.columns)
117
118 signal.signal(signal.SIGWINCH, sigwinch_passthrough)
119
120 # Interact with the new shell.
121 c.interact(escape_character=None)
122 c.close()
123 sys.exit(c.exitstatus)
124
125
126 POSSIBLE_ENV_PYTHON = [Path("bin", "python"), Path("Scripts", "python.exe")]
127
128
129 def _iter_python(venv):
130 for path in POSSIBLE_ENV_PYTHON:
131 full_path = Path(venv, path)
132 if full_path.is_file():
133 yield full_path
134
135
136 class Bash(Shell):
137 # The usual PATH injection technique does not work with Bash.
138 # https://github.com/berdario/pew/issues/58#issuecomment-102182346
139 @contextlib.contextmanager
140 def inject_path(self, venv):
141 from ._compat import NamedTemporaryFile
142
143 bashrc_path = Path.home().joinpath(".bashrc")
144 with NamedTemporaryFile("w+") as rcfile:
145 if bashrc_path.is_file():
146 base_rc_src = 'source "{0}"\n'.format(bashrc_path.as_posix())
147 rcfile.write(base_rc_src)
148
149 export_path = 'export PATH="{0}:$PATH"\n'.format(
150 ":".join(python.parent.as_posix() for python in _iter_python(venv))
151 )
152 rcfile.write(export_path)
153 rcfile.flush()
154 self.args.extend(["--rcfile", rcfile.name])
155 yield
156
157
158 class CmderEmulatedShell(Shell):
159 def fork(self, venv, cwd, args):
160 if cwd:
161 os.environ["CMDER_START"] = cwd
162 super(CmderEmulatedShell, self).fork(venv, cwd, args)
163
164
165 class CmderCommandPrompt(CmderEmulatedShell):
166 def fork(self, venv, cwd, args):
167 rc = os.path.expandvars("%CMDER_ROOT%\\vendor\\init.bat")
168 if os.path.exists(rc):
169 self.args.extend(["/k", rc])
170 super(CmderCommandPrompt, self).fork(venv, cwd, args)
171
172
173 class CmderPowershell(Shell):
174 def fork(self, venv, cwd, args):
175 rc = os.path.expandvars("%CMDER_ROOT%\\vendor\\profile.ps1")
176 if os.path.exists(rc):
177 self.args.extend(
178 [
179 "-ExecutionPolicy",
180 "Bypass",
181 "-NoLogo",
182 "-NoProfile",
183 "-NoExit",
184 "-Command",
185 "Invoke-Expression '. ''{0}'''".format(rc),
186 ]
187 )
188 super(CmderPowershell, self).fork(venv, cwd, args)
189
190
191 # Two dimensional dict. First is the shell type, second is the emulator type.
192 # Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.
193 SHELL_LOOKUP = collections.defaultdict(
194 lambda: collections.defaultdict(lambda: Shell),
195 {
196 "bash": collections.defaultdict(lambda: Bash),
197 "cmd": collections.defaultdict(lambda: Shell, {"cmder": CmderCommandPrompt}),
198 "powershell": collections.defaultdict(
199 lambda: Shell, {"cmder": CmderPowershell}
200 ),
201 "pwsh": collections.defaultdict(lambda: Shell, {"cmder": CmderPowershell}),
202 },
203 )
204
205
206 def _detect_emulator():
207 if os.environ.get("CMDER_ROOT"):
208 return "cmder"
209 return ""
210
211
212 def choose_shell():
213 emulator = PIPENV_EMULATOR.lower() or _detect_emulator()
214 type_, command = detect_info()
215 return SHELL_LOOKUP[type_][emulator](command)
216
[end of pipenv/shells.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pipenv/shells.py b/pipenv/shells.py
--- a/pipenv/shells.py
+++ b/pipenv/shells.py
@@ -134,6 +134,9 @@
class Bash(Shell):
+ def _format_path(self, python):
+ return python.parent.as_posix()
+
# The usual PATH injection technique does not work with Bash.
# https://github.com/berdario/pew/issues/58#issuecomment-102182346
@contextlib.contextmanager
@@ -146,15 +149,25 @@
base_rc_src = 'source "{0}"\n'.format(bashrc_path.as_posix())
rcfile.write(base_rc_src)
- export_path = 'export PATH="{0}:$PATH"\n'.format(
- ":".join(python.parent.as_posix() for python in _iter_python(venv))
- )
+ export_path = 'export PATH="{0}:$PATH"\n'.format(":".join(
+ self._format_path(python)
+ for python in _iter_python(venv)
+ ))
rcfile.write(export_path)
rcfile.flush()
self.args.extend(["--rcfile", rcfile.name])
yield
+class MsysBash(Bash):
+ def _format_path(self, python):
+ s = super(MsysBash, self)._format_path(python)
+ if not python.drive:
+ return s
+ # Convert "C:/something" to "/c/something".
+ return '/{drive}{path}'.format(drive=s[0].lower(), path=s[2:])
+
+
class CmderEmulatedShell(Shell):
def fork(self, venv, cwd, args):
if cwd:
@@ -193,23 +206,37 @@
SHELL_LOOKUP = collections.defaultdict(
lambda: collections.defaultdict(lambda: Shell),
{
- "bash": collections.defaultdict(lambda: Bash),
- "cmd": collections.defaultdict(lambda: Shell, {"cmder": CmderCommandPrompt}),
+ "bash": collections.defaultdict(
+ lambda: Bash, {"msys": MsysBash},
+ ),
+ "cmd": collections.defaultdict(
+ lambda: Shell, {"cmder": CmderCommandPrompt},
+ ),
"powershell": collections.defaultdict(
- lambda: Shell, {"cmder": CmderPowershell}
+ lambda: Shell, {"cmder": CmderPowershell},
+ ),
+ "pwsh": collections.defaultdict(
+ lambda: Shell, {"cmder": CmderPowershell},
),
- "pwsh": collections.defaultdict(lambda: Shell, {"cmder": CmderPowershell}),
},
)
def _detect_emulator():
+ keys = []
if os.environ.get("CMDER_ROOT"):
- return "cmder"
- return ""
+ keys.append("cmder")
+ if os.environ.get("MSYSTEM"):
+ keys.append("msys")
+ return ",".join(keys)
def choose_shell():
emulator = PIPENV_EMULATOR.lower() or _detect_emulator()
type_, command = detect_info()
- return SHELL_LOOKUP[type_][emulator](command)
+ shell_types = SHELL_LOOKUP[type_]
+ for key in emulator.split(","):
+ key = key.strip().lower()
+ if key in shell_types:
+ return shell_types[key](command)
+ return shell_types[""](command)
|
{"golden_diff": "diff --git a/pipenv/shells.py b/pipenv/shells.py\n--- a/pipenv/shells.py\n+++ b/pipenv/shells.py\n@@ -134,6 +134,9 @@\n \n \n class Bash(Shell):\n+ def _format_path(self, python):\n+ return python.parent.as_posix()\n+\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n@@ -146,15 +149,25 @@\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n \n- export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n- \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n- )\n+ export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\":\".join(\n+ self._format_path(python)\n+ for python in _iter_python(venv)\n+ ))\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n \n \n+class MsysBash(Bash):\n+ def _format_path(self, python):\n+ s = super(MsysBash, self)._format_path(python)\n+ if not python.drive:\n+ return s\n+ # Convert \"C:/something\" to \"/c/something\".\n+ return '/{drive}{path}'.format(drive=s[0].lower(), path=s[2:])\n+\n+\n class CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n@@ -193,23 +206,37 @@\n SHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n- \"bash\": collections.defaultdict(lambda: Bash),\n- \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n+ \"bash\": collections.defaultdict(\n+ lambda: Bash, {\"msys\": MsysBash},\n+ ),\n+ \"cmd\": collections.defaultdict(\n+ lambda: Shell, {\"cmder\": CmderCommandPrompt},\n+ ),\n \"powershell\": collections.defaultdict(\n- lambda: Shell, {\"cmder\": CmderPowershell}\n+ lambda: Shell, {\"cmder\": CmderPowershell},\n+ ),\n+ \"pwsh\": collections.defaultdict(\n+ lambda: Shell, {\"cmder\": CmderPowershell},\n ),\n- \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n )\n \n \n def _detect_emulator():\n+ keys = []\n if os.environ.get(\"CMDER_ROOT\"):\n- return \"cmder\"\n- return \"\"\n+ keys.append(\"cmder\")\n+ if os.environ.get(\"MSYSTEM\"):\n+ keys.append(\"msys\")\n+ return \",\".join(keys)\n \n \n def choose_shell():\n emulator = PIPENV_EMULATOR.lower() or _detect_emulator()\n type_, command = detect_info()\n- return SHELL_LOOKUP[type_][emulator](command)\n+ shell_types = SHELL_LOOKUP[type_]\n+ for key in emulator.split(\",\"):\n+ key = key.strip().lower()\n+ if key in shell_types:\n+ return shell_types[key](command)\n+ return shell_types[\"\"](command)\n", "issue": "pipenv shell does not work when running Git for Windows' bash on Windows\n##### Issue description\r\n\r\nRunning `pipenv shell` on windows when running bash in Git for Windows does not activate the virtual env successfully. It appears the actual activate script with the correct values is not being used.\r\n\r\n##### Expected result\r\n\r\nI would successfully be in the virtualenv\r\n\r\n##### Actual result\r\n\r\nThe `VIRTUAL_ENV` and `PATH` variables are in the `C:` notation instead of `/c/` notation which would have worked.\r\n\r\n```\r\ndeclare -x VIRTUAL_ENV=\"C:\\\\Users\\\\user\\\\.virtualenvs\\\\project-OJ2s3Ey8\"\r\ndeclare -x PATH=\"C:/Users/user/.virtualenvs/project-OJ2s3Ey8/Scripts:/c/Users/user/bin:/mingw64/bin:/usr/local/bin:/usr/bin:/usr/bin:/mingw64/bin:/usr/bin:/c/Users/andy/bin:/c/Python37/Scripts:/c/Python37\" etc...\r\n```\r\n\r\nSourcing the virtualenv activate variable does work, even though Git for Windows uses `msys` and not `cygwin`. Did not test cygwin\r\n\r\n##### Steps to replicate\r\n\r\n1. `pipenv install sphinx`\r\n1. `echo $$`\r\n1. `pipenv shell`\r\n1. `echo $$`\r\n1. `pip freeze`\r\n1. `echo $PATH`\r\n\r\nShows that the pip environment is the original python environment, not the virtualenv. The different pid verifies that a new shell is indeed being run, but the virtualenv is not activating\r\n\r\nAlso tried\r\n\r\n1. `PIPENV_EMULATOR=bash pipenv shell`\r\n\r\nIdentical results\r\n\r\n<details><summary>$ pipenv --support</summary>\r\n\r\nPipenv version: `'2018.7.1'`\r\n\r\nPipenv location: `'c:\\\\python37\\\\lib\\\\site-packages\\\\pipenv'`\r\n\r\nPython location: `'c:\\\\python37\\\\python.exe'`\r\n\r\nOther Python installations in `PATH`:\r\n\r\n - `3.7`: `C:\\Python37\\python.exe`\r\n\r\n - `3.7.0`: `C:\\Python37\\python.exe`\r\n - `3.7.0`: `C:\\Windows\\py.exe`\r\n\r\nPEP 508 Information:\r\n\r\n```\r\n{'implementation_name': 'cpython',\r\n 'implementation_version': '3.7.0',\r\n 'os_name': 'nt',\r\n 'platform_machine': 'AMD64',\r\n 'platform_python_implementation': 'CPython',\r\n 'platform_release': '10',\r\n 'platform_system': 'Windows',\r\n 'platform_version': '10.0.17134',\r\n 'python_full_version': '3.7.0',\r\n 'python_version': '3.7',\r\n 'sys_platform': 'win32'}\r\n```\n", "before_files": [{"content": "import collections\nimport contextlib\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom ._compat import get_terminal_size, Path\nfrom .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR\nfrom .utils import temp_environ\nfrom .vendor import shellingham\n\n\nShellDetectionFailure = shellingham.ShellDetectionFailure\n\n\ndef _build_info(value):\n return (os.path.splitext(os.path.basename(value))[0], value)\n\n\ndef detect_info():\n if PIPENV_SHELL_EXPLICIT:\n return _build_info(PIPENV_SHELL_EXPLICIT)\n try:\n return shellingham.detect_shell()\n except (shellingham.ShellDetectionFailure, TypeError):\n if PIPENV_SHELL:\n return _build_info(PIPENV_SHELL)\n raise ShellDetectionFailure\n\n\ndef _get_activate_script(venv):\n \"\"\"Returns the string to activate a virtualenv.\n\n This is POSIX-only at the moment since the compat (pexpect-based) shell\n does not work elsewhere anyway.\n \"\"\"\n # Suffix and source command for other shells.\n # Support for fish shell.\n if PIPENV_SHELL and \"fish\" in PIPENV_SHELL:\n suffix = \".fish\"\n command = \"source\"\n # Support for csh shell.\n elif PIPENV_SHELL and \"csh\" in PIPENV_SHELL:\n suffix = \".csh\"\n command = \"source\"\n else:\n suffix = \"\"\n command = \".\"\n # Escape any spaces located within the virtualenv path to allow\n # for proper activation.\n venv_location = str(venv).replace(\" \", r\"\\ \")\n # The leading space can make history cleaner in some shells.\n return \" {2} {0}/bin/activate{1}\".format(venv_location, suffix, command)\n\n\ndef _handover(cmd, args):\n args = [cmd] + args\n if os.name != \"nt\":\n os.execvp(cmd, args)\n else:\n sys.exit(subprocess.call(args, shell=True, universal_newlines=True))\n\n\nclass Shell(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.args = []\n\n def __repr__(self):\n return '{type}(cmd={cmd!r})'.format(\n type=type(self).__name__,\n cmd=self.cmd,\n )\n\n @contextlib.contextmanager\n def inject_path(self, venv):\n with temp_environ():\n os.environ[\"PATH\"] = \"{0}{1}{2}\".format(\n os.pathsep.join(str(p.parent) for p in _iter_python(venv)),\n os.pathsep,\n os.environ[\"PATH\"],\n )\n yield\n\n def fork(self, venv, cwd, args):\n # FIXME: This isn't necessarily the correct prompt. We should read the\n # actual prompt by peeking into the activation script.\n name = os.path.basename(venv)\n os.environ[\"VIRTUAL_ENV\"] = str(venv)\n if \"PROMPT\" in os.environ:\n os.environ[\"PROMPT\"] = \"({0}) {1}\".format(name, os.environ[\"PROMPT\"])\n if \"PS1\" in os.environ:\n os.environ[\"PS1\"] = \"({0}) {1}\".format(name, os.environ[\"PS1\"])\n with self.inject_path(venv):\n os.chdir(cwd)\n _handover(self.cmd, self.args + list(args))\n\n def fork_compat(self, venv, cwd, args):\n from .vendor import pexpect\n\n # Grab current terminal dimensions to replace the hardcoded default\n # dimensions of pexpect.\n dims = get_terminal_size()\n with temp_environ():\n c = pexpect.spawn(self.cmd, [\"-i\"], dimensions=(dims.lines, dims.columns))\n c.sendline(_get_activate_script(venv))\n if args:\n c.sendline(\" \".join(args))\n\n # Handler for terminal resizing events\n # Must be defined here to have the shell process in its context, since\n # we can't pass it as an argument\n def sigwinch_passthrough(sig, data):\n dims = get_terminal_size()\n c.setwinsize(dims.lines, dims.columns)\n\n signal.signal(signal.SIGWINCH, sigwinch_passthrough)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n sys.exit(c.exitstatus)\n\n\nPOSSIBLE_ENV_PYTHON = [Path(\"bin\", \"python\"), Path(\"Scripts\", \"python.exe\")]\n\n\ndef _iter_python(venv):\n for path in POSSIBLE_ENV_PYTHON:\n full_path = Path(venv, path)\n if full_path.is_file():\n yield full_path\n\n\nclass Bash(Shell):\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n def inject_path(self, venv):\n from ._compat import NamedTemporaryFile\n\n bashrc_path = Path.home().joinpath(\".bashrc\")\n with NamedTemporaryFile(\"w+\") as rcfile:\n if bashrc_path.is_file():\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n\n export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n )\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n\n\nclass CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n os.environ[\"CMDER_START\"] = cwd\n super(CmderEmulatedShell, self).fork(venv, cwd, args)\n\n\nclass CmderCommandPrompt(CmderEmulatedShell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\init.bat\")\n if os.path.exists(rc):\n self.args.extend([\"/k\", rc])\n super(CmderCommandPrompt, self).fork(venv, cwd, args)\n\n\nclass CmderPowershell(Shell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\profile.ps1\")\n if os.path.exists(rc):\n self.args.extend(\n [\n \"-ExecutionPolicy\",\n \"Bypass\",\n \"-NoLogo\",\n \"-NoProfile\",\n \"-NoExit\",\n \"-Command\",\n \"Invoke-Expression '. ''{0}'''\".format(rc),\n ]\n )\n super(CmderPowershell, self).fork(venv, cwd, args)\n\n\n# Two dimensional dict. First is the shell type, second is the emulator type.\n# Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.\nSHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n \"bash\": collections.defaultdict(lambda: Bash),\n \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n \"powershell\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell}\n ),\n \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n)\n\n\ndef _detect_emulator():\n if os.environ.get(\"CMDER_ROOT\"):\n return \"cmder\"\n return \"\"\n\n\ndef choose_shell():\n emulator = PIPENV_EMULATOR.lower() or _detect_emulator()\n type_, command = detect_info()\n return SHELL_LOOKUP[type_][emulator](command)\n", "path": "pipenv/shells.py"}]}
| 3,406 | 797 |
gh_patches_debug_20195
|
rasdani/github-patches
|
git_diff
|
kivy__python-for-android-1723
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Psycopg2 error after the apk installation.

I got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working.
How do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py
How can I add this recipe to my project, to buildozer compile it successfully ?
</issue>
<code>
[start of pythonforandroid/recipes/psycopg2/__init__.py]
1 from pythonforandroid.recipe import PythonRecipe
2 from pythonforandroid.toolchain import current_directory, shprint
3 import sh
4
5
6 class Psycopg2Recipe(PythonRecipe):
7 """
8 Requires `libpq-dev` system dependency e.g. for `pg_config` binary.
9 """
10 version = 'latest'
11 url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'
12 depends = ['libpq']
13 site_packages_name = 'psycopg2'
14 call_hostpython_via_targetpython = False
15
16 def prebuild_arch(self, arch):
17 libdir = self.ctx.get_libs_dir(arch.arch)
18 with current_directory(self.get_build_dir(arch.arch)):
19 # pg_config_helper will return the system installed libpq, but we
20 # need the one we just cross-compiled
21 shprint(sh.sed, '-i',
22 "s|pg_config_helper.query(.libdir.)|'{}'|".format(libdir),
23 'setup.py')
24
25 def get_recipe_env(self, arch):
26 env = super(Psycopg2Recipe, self).get_recipe_env(arch)
27 env['LDFLAGS'] = "{} -L{}".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))
28 env['EXTRA_CFLAGS'] = "--host linux-armv"
29 return env
30
31 def install_python_package(self, arch, name=None, env=None, is_dir=True):
32 '''Automate the installation of a Python package (or a cython
33 package where the cython components are pre-built).'''
34 if env is None:
35 env = self.get_recipe_env(arch)
36
37 with current_directory(self.get_build_dir(arch.arch)):
38 hostpython = sh.Command(self.ctx.hostpython)
39
40 shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',
41 _env=env)
42 shprint(hostpython, 'setup.py', 'install', '-O2',
43 '--root={}'.format(self.ctx.get_python_install_dir()),
44 '--install-lib=lib/python2.7/site-packages', _env=env)
45
46
47 recipe = Psycopg2Recipe()
48
[end of pythonforandroid/recipes/psycopg2/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py
--- a/pythonforandroid/recipes/psycopg2/__init__.py
+++ b/pythonforandroid/recipes/psycopg2/__init__.py
@@ -6,6 +6,9 @@
class Psycopg2Recipe(PythonRecipe):
"""
Requires `libpq-dev` system dependency e.g. for `pg_config` binary.
+ If you get `nl_langinfo` symbol runtime error, make sure you're running on
+ `ANDROID_API` (`ndk-api`) >= 26, see:
+ https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557
"""
version = 'latest'
url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'
@@ -41,7 +44,7 @@
_env=env)
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
- '--install-lib=lib/python2.7/site-packages', _env=env)
+ '--install-lib=.', _env=env)
recipe = Psycopg2Recipe()
|
{"golden_diff": "diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py\n--- a/pythonforandroid/recipes/psycopg2/__init__.py\n+++ b/pythonforandroid/recipes/psycopg2/__init__.py\n@@ -6,6 +6,9 @@\n class Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n+ If you get `nl_langinfo` symbol runtime error, make sure you're running on\n+ `ANDROID_API` (`ndk-api`) >= 26, see:\n+ https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n@@ -41,7 +44,7 @@\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n- '--install-lib=lib/python2.7/site-packages', _env=env)\n+ '--install-lib=.', _env=env)\n \n \n recipe = Psycopg2Recipe()\n", "issue": "Psycopg2 error after the apk installation.\n\r\n\r\nI got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working.\r\n\r\nHow do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py\r\n\r\nHow can I add this recipe to my project, to buildozer compile it successfully ?\n", "before_files": [{"content": "from pythonforandroid.recipe import PythonRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n depends = ['libpq']\n site_packages_name = 'psycopg2'\n call_hostpython_via_targetpython = False\n\n def prebuild_arch(self, arch):\n libdir = self.ctx.get_libs_dir(arch.arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # pg_config_helper will return the system installed libpq, but we\n # need the one we just cross-compiled\n shprint(sh.sed, '-i',\n \"s|pg_config_helper.query(.libdir.)|'{}'|\".format(libdir),\n 'setup.py')\n\n def get_recipe_env(self, arch):\n env = super(Psycopg2Recipe, self).get_recipe_env(arch)\n env['LDFLAGS'] = \"{} -L{}\".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))\n env['EXTRA_CFLAGS'] = \"--host linux-armv\"\n return env\n\n def install_python_package(self, arch, name=None, env=None, is_dir=True):\n '''Automate the installation of a Python package (or a cython\n package where the cython components are pre-built).'''\n if env is None:\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_build_dir(arch.arch)):\n hostpython = sh.Command(self.ctx.hostpython)\n\n shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n '--install-lib=lib/python2.7/site-packages', _env=env)\n\n\nrecipe = Psycopg2Recipe()\n", "path": "pythonforandroid/recipes/psycopg2/__init__.py"}]}
| 1,276 | 294 |
gh_patches_debug_7498
|
rasdani/github-patches
|
git_diff
|
beeware__toga-1751
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
iOS app not showing content with Rubicon ObjC 0.4.4
### Describe the bug
When running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log:
```
2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld
2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop
2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop()
```
This is a warning, not an error; the app will continue to run.
### Steps to reproduce
1. Run `examples/tutorial0` on iOS
2. See error
The app won't crash; but the app window will remain black.
### Expected behavior
The app should run and window content should be displayed.
### Screenshots
_No response_
### Environment
- Operating System: iOS
- Python version: All
- Software versions:
- Briefcase: All
- Toga: <=0.3.0.dev39
- Rubicon-objc 0.4.4
### Logs
N/A
### Additional context
The error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon.
However, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app.
The issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`.
Two workarounds exist:
1. Use the `main` branch of Toga in your app.
2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version.
</issue>
<code>
[start of iOS/setup.py]
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_iOS to compute the version;
8 # and to support versioned subpackage dependencies
9 with open("src/toga_iOS/__init__.py", encoding="utf8") as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
12 )
13 if version_match:
14 version = version_match.group(1)
15 else:
16 raise RuntimeError("Unable to find version string.")
17
18 setup(
19 version=version,
20 install_requires=[
21 "rubicon-objc>=0.4.4",
22 f"toga-core=={version}",
23 ],
24 )
25
[end of iOS/setup.py]
[start of cocoa/setup.py]
1 #!/usr/bin/env python
2 import re
3
4 from setuptools import setup
5
6 # Version handline needs to be programatic because
7 # we can't import toga_cocoa to compute the version;
8 # and to support versioned subpackage dependencies
9 with open("src/toga_cocoa/__init__.py", encoding="utf8") as version_file:
10 version_match = re.search(
11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M
12 )
13 if version_match:
14 version = version_match.group(1)
15 else:
16 raise RuntimeError("Unable to find version string.")
17
18 setup(
19 version=version,
20 install_requires=[
21 "rubicon-objc>=0.4.4",
22 f"toga-core=={version}",
23 ],
24 )
25
[end of cocoa/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cocoa/setup.py b/cocoa/setup.py
--- a/cocoa/setup.py
+++ b/cocoa/setup.py
@@ -18,7 +18,7 @@
setup(
version=version,
install_requires=[
- "rubicon-objc>=0.4.4",
- f"toga-core=={version}",
+ "rubicon-objc >= 0.4.5rc1, < 0.5.0",
+ f"toga-core == {version}",
],
)
diff --git a/iOS/setup.py b/iOS/setup.py
--- a/iOS/setup.py
+++ b/iOS/setup.py
@@ -18,7 +18,7 @@
setup(
version=version,
install_requires=[
- "rubicon-objc>=0.4.4",
- f"toga-core=={version}",
+ "rubicon-objc >= 0.4.5rc1, < 0.5.0",
+ f"toga-core == {version}",
],
)
|
{"golden_diff": "diff --git a/cocoa/setup.py b/cocoa/setup.py\n--- a/cocoa/setup.py\n+++ b/cocoa/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\ndiff --git a/iOS/setup.py b/iOS/setup.py\n--- a/iOS/setup.py\n+++ b/iOS/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\n", "issue": "iOS app not showing content with Rubicon ObjC 0.4.4\n### Describe the bug\r\n\r\nWhen running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log:\r\n \r\n```\r\n2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld\r\n2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop\r\n2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop()\r\n```\r\n\r\nThis is a warning, not an error; the app will continue to run.\r\n\r\n### Steps to reproduce\r\n\r\n1. Run `examples/tutorial0` on iOS\r\n2. See error\r\n\r\nThe app won't crash; but the app window will remain black.\r\n\r\n### Expected behavior\r\n\r\nThe app should run and window content should be displayed.\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: iOS\r\n- Python version: All\r\n- Software versions:\r\n - Briefcase: All\r\n - Toga: <=0.3.0.dev39\r\n - Rubicon-objc 0.4.4\r\n\r\n\r\n### Logs\r\n\r\nN/A\r\n\r\n### Additional context\r\n\r\nThe error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon.\r\n\r\nHowever, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app. \r\n\r\nThe issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`.\r\n\r\nTwo workarounds exist:\r\n1. Use the `main` branch of Toga in your app.\r\n2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_iOS to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_iOS/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "iOS/setup.py"}, {"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_cocoa to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_cocoa/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "cocoa/setup.py"}]}
| 1,780 | 230 |
gh_patches_debug_10540
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1670
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove `pyhf.simplemodels.hepdata_like` from public API for v0.7.0
### Summary
[`pyhf.simplemodels.hepdata_like`](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.simplemodels.hepdata_like.html#pyhf.simplemodels.hepdata_like) has been scheduled for removal from the public API in release `v0.7.0`. As `v0.7.0` will be the next release (and hopefully soon) this should get removed now.
### Additional Information
`pyhf.simplemodels.hepdata_like` has been deprecated since `v0.6.2`.
### Code of Conduct
- [X] I agree to follow the Code of Conduct
</issue>
<code>
[start of src/pyhf/simplemodels.py]
1 from warnings import warn
2
3 from pyhf import Model
4
5 __all__ = ["correlated_background", "uncorrelated_background"]
6
7
8 def __dir__():
9 return __all__
10
11
12 def correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):
13 r"""
14 Construct a simple single channel :class:`~pyhf.pdf.Model` with a
15 :class:`~pyhf.modifiers.histosys` modifier representing a background
16 with a fully correlated bin-by-bin uncertainty.
17
18 Args:
19 signal (:obj:`list`): The data in the signal sample.
20 bkg (:obj:`list`): The data in the background sample.
21 bkg_up (:obj:`list`): The background sample under an upward variation
22 corresponding to :math:`\alpha=+1`.
23 bkg_down (:obj:`list`): The background sample under a downward variation
24 corresponding to :math:`\alpha=-1`.
25 batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.
26
27 Returns:
28 ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.
29
30 Example:
31 >>> import pyhf
32 >>> pyhf.set_backend("numpy")
33 >>> model = pyhf.simplemodels.correlated_background(
34 ... signal=[12.0, 11.0],
35 ... bkg=[50.0, 52.0],
36 ... bkg_up=[45.0, 57.0],
37 ... bkg_down=[55.0, 47.0],
38 ... )
39 >>> model.schema
40 'model.json'
41 >>> model.config.channels
42 ['single_channel']
43 >>> model.config.samples
44 ['background', 'signal']
45 >>> model.config.parameters
46 ['correlated_bkg_uncertainty', 'mu']
47 >>> model.expected_data(model.config.suggested_init())
48 array([62., 63., 0.])
49
50 """
51 spec = {
52 "channels": [
53 {
54 "name": "single_channel",
55 "samples": [
56 {
57 "name": "signal",
58 "data": signal,
59 "modifiers": [
60 {"name": "mu", "type": "normfactor", "data": None}
61 ],
62 },
63 {
64 "name": "background",
65 "data": bkg,
66 "modifiers": [
67 {
68 "name": "correlated_bkg_uncertainty",
69 "type": "histosys",
70 "data": {"hi_data": bkg_up, "lo_data": bkg_down},
71 }
72 ],
73 },
74 ],
75 }
76 ]
77 }
78 return Model(spec, batch_size=batch_size)
79
80
81 def uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None):
82 """
83 Construct a simple single channel :class:`~pyhf.pdf.Model` with a
84 :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated
85 background uncertainty.
86
87 Example:
88 >>> import pyhf
89 >>> pyhf.set_backend("numpy")
90 >>> model = pyhf.simplemodels.uncorrelated_background(
91 ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]
92 ... )
93 >>> model.schema
94 'model.json'
95 >>> model.config.channels
96 ['singlechannel']
97 >>> model.config.samples
98 ['background', 'signal']
99 >>> model.config.parameters
100 ['mu', 'uncorr_bkguncrt']
101 >>> model.expected_data(model.config.suggested_init())
102 array([ 62. , 63. , 277.77777778, 55.18367347])
103
104 Args:
105 signal (:obj:`list`): The data in the signal sample
106 bkg (:obj:`list`): The data in the background sample
107 bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts
108 batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute
109
110 Returns:
111 ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema
112
113 """
114 spec = {
115 'channels': [
116 {
117 'name': 'singlechannel',
118 'samples': [
119 {
120 'name': 'signal',
121 'data': signal,
122 'modifiers': [
123 {'name': 'mu', 'type': 'normfactor', 'data': None}
124 ],
125 },
126 {
127 'name': 'background',
128 'data': bkg,
129 'modifiers': [
130 {
131 'name': 'uncorr_bkguncrt',
132 'type': 'shapesys',
133 'data': bkg_uncertainty,
134 }
135 ],
136 },
137 ],
138 }
139 ]
140 }
141 return Model(spec, batch_size=batch_size)
142
143
144 # Deprecated APIs
145 def _deprecated_api_warning(
146 deprecated_api, new_api, deprecated_release, remove_release
147 ):
148 warn(
149 f"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}."
150 + f" Please use {new_api}.",
151 DeprecationWarning,
152 stacklevel=3, # Raise to user level
153 )
154
155
156 def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):
157 """
158 .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`
159 instead.
160
161 .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in
162 ``pyhf`` ``v0.7.0``.
163 """
164 _deprecated_api_warning(
165 "pyhf.simplemodels.hepdata_like",
166 "pyhf.simplemodels.uncorrelated_background",
167 "0.6.2",
168 "0.7.0",
169 )
170 return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)
171
[end of src/pyhf/simplemodels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py
--- a/src/pyhf/simplemodels.py
+++ b/src/pyhf/simplemodels.py
@@ -151,20 +151,3 @@
DeprecationWarning,
stacklevel=3, # Raise to user level
)
-
-
-def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):
- """
- .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`
- instead.
-
- .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in
- ``pyhf`` ``v0.7.0``.
- """
- _deprecated_api_warning(
- "pyhf.simplemodels.hepdata_like",
- "pyhf.simplemodels.uncorrelated_background",
- "0.6.2",
- "0.7.0",
- )
- return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)
|
{"golden_diff": "diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py\n--- a/src/pyhf/simplemodels.py\n+++ b/src/pyhf/simplemodels.py\n@@ -151,20 +151,3 @@\n DeprecationWarning,\n stacklevel=3, # Raise to user level\n )\n-\n-\n-def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n- \"\"\"\n- .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`\n- instead.\n-\n- .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in\n- ``pyhf`` ``v0.7.0``.\n- \"\"\"\n- _deprecated_api_warning(\n- \"pyhf.simplemodels.hepdata_like\",\n- \"pyhf.simplemodels.uncorrelated_background\",\n- \"0.6.2\",\n- \"0.7.0\",\n- )\n- return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)\n", "issue": "Remove `pyhf.simplemodels.hepdata_like` from public API for v0.7.0\n### Summary\n\n[`pyhf.simplemodels.hepdata_like`](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.simplemodels.hepdata_like.html#pyhf.simplemodels.hepdata_like) has been scheduled for removal from the public API in release `v0.7.0`. As `v0.7.0` will be the next release (and hopefully soon) this should get removed now.\n\n### Additional Information\n\n`pyhf.simplemodels.hepdata_like` has been deprecated since `v0.6.2`.\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "from warnings import warn\n\nfrom pyhf import Model\n\n__all__ = [\"correlated_background\", \"uncorrelated_background\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):\n r\"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.histosys` modifier representing a background\n with a fully correlated bin-by-bin uncertainty.\n\n Args:\n signal (:obj:`list`): The data in the signal sample.\n bkg (:obj:`list`): The data in the background sample.\n bkg_up (:obj:`list`): The background sample under an upward variation\n corresponding to :math:`\\alpha=+1`.\n bkg_down (:obj:`list`): The background sample under a downward variation\n corresponding to :math:`\\alpha=-1`.\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.correlated_background(\n ... signal=[12.0, 11.0],\n ... bkg=[50.0, 52.0],\n ... bkg_up=[45.0, 57.0],\n ... bkg_down=[55.0, 47.0],\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['single_channel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['correlated_bkg_uncertainty', 'mu']\n >>> model.expected_data(model.config.suggested_init())\n array([62., 63., 0.])\n\n \"\"\"\n spec = {\n \"channels\": [\n {\n \"name\": \"single_channel\",\n \"samples\": [\n {\n \"name\": \"signal\",\n \"data\": signal,\n \"modifiers\": [\n {\"name\": \"mu\", \"type\": \"normfactor\", \"data\": None}\n ],\n },\n {\n \"name\": \"background\",\n \"data\": bkg,\n \"modifiers\": [\n {\n \"name\": \"correlated_bkg_uncertainty\",\n \"type\": \"histosys\",\n \"data\": {\"hi_data\": bkg_up, \"lo_data\": bkg_down},\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\ndef uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal (:obj:`list`): The data in the signal sample\n bkg (:obj:`list`): The data in the background sample\n bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncertainty,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\n# Deprecated APIs\ndef _deprecated_api_warning(\n deprecated_api, new_api, deprecated_release, remove_release\n):\n warn(\n f\"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}.\"\n + f\" Please use {new_api}.\",\n DeprecationWarning,\n stacklevel=3, # Raise to user level\n )\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`\n instead.\n\n .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in\n ``pyhf`` ``v0.7.0``.\n \"\"\"\n _deprecated_api_warning(\n \"pyhf.simplemodels.hepdata_like\",\n \"pyhf.simplemodels.uncorrelated_background\",\n \"0.6.2\",\n \"0.7.0\",\n )\n return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)\n", "path": "src/pyhf/simplemodels.py"}]}
| 2,440 | 237 |
gh_patches_debug_5408
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2982
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use official clamav docker image for Mailu clamav image
With https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?
</issue>
<code>
[start of optional/clamav/start.py]
1 #!/usr/bin/env python3
2
3 import os
4 import logging as logger
5 import sys
6 from socrate import system
7
8 system.set_env(log_filters=r'SelfCheck: Database status OK\.$')
9
10 # Bootstrap the database if clamav is running for the first time
11 if not os.path.isfile("/data/main.cvd"):
12 logger.info("Starting primary virus DB download")
13 os.system("freshclam")
14
15 # Run the update daemon
16 logger.info("Starting the update daemon")
17 os.system("freshclam -d -c 6")
18
19 # Run clamav
20 logger.info("Starting clamav")
21 os.system("clamd")
22
[end of optional/clamav/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optional/clamav/start.py b/optional/clamav/start.py
deleted file mode 100755
--- a/optional/clamav/start.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import logging as logger
-import sys
-from socrate import system
-
-system.set_env(log_filters=r'SelfCheck: Database status OK\.$')
-
-# Bootstrap the database if clamav is running for the first time
-if not os.path.isfile("/data/main.cvd"):
- logger.info("Starting primary virus DB download")
- os.system("freshclam")
-
-# Run the update daemon
-logger.info("Starting the update daemon")
-os.system("freshclam -d -c 6")
-
-# Run clamav
-logger.info("Starting clamav")
-os.system("clamd")
|
{"golden_diff": "diff --git a/optional/clamav/start.py b/optional/clamav/start.py\ndeleted file mode 100755\n--- a/optional/clamav/start.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import os\n-import logging as logger\n-import sys\n-from socrate import system\n-\n-system.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n-\n-# Bootstrap the database if clamav is running for the first time\n-if not os.path.isfile(\"/data/main.cvd\"):\n- logger.info(\"Starting primary virus DB download\")\n- os.system(\"freshclam\")\n-\n-# Run the update daemon\n-logger.info(\"Starting the update daemon\")\n-os.system(\"freshclam -d -c 6\")\n-\n-# Run clamav\n-logger.info(\"Starting clamav\")\n-os.system(\"clamd\")\n", "issue": "Use official clamav docker image for Mailu clamav image\nWith https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport logging as logger\nimport sys\nfrom socrate import system\n\nsystem.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n\n# Bootstrap the database if clamav is running for the first time\nif not os.path.isfile(\"/data/main.cvd\"):\n logger.info(\"Starting primary virus DB download\")\n os.system(\"freshclam\")\n\n# Run the update daemon\nlogger.info(\"Starting the update daemon\")\nos.system(\"freshclam -d -c 6\")\n\n# Run clamav\nlogger.info(\"Starting clamav\")\nos.system(\"clamd\")\n", "path": "optional/clamav/start.py"}]}
| 763 | 199 |
gh_patches_debug_7255
|
rasdani/github-patches
|
git_diff
|
kubeflow__pipelines-6703
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Upgrade cloudpickle to > 2.0.0
Hello,
One of the features of pipelines is Step Caching (https://www.kubeflow.org/docs/components/pipelines/caching/) to avoid running the costly computations again and again.
The key for caching is:
```
message CacheKey {
map<string, ArtifactNameList> inputArtifactNames = 1;
map<string, Value> inputParameters = 2;
map<string, RuntimeArtifact> outputArtifactsSpec = 3;
map<string, string> outputParametersSpec=4;
ContainerSpec containerSpec=5;
}
```
When using the option `use_code_pickling` from https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/kfp/components/_python_op.py#L516
the pickle of the function gets embedded in the `ContainerSpec` (and hence becomes part of the key).
So far, all good.
However, the pickle is generated with `cloudpickle` which leads to non deterministic pickles every time you run the pipeline. As you can imagine, this makes caching feature useless because it will invalidate the cache every time it is run.
This non determinism was removed from `cloudpickle` with the following commit:
https://github.com/cloudpipe/cloudpickle/pull/428 and released as part of `2.0.0` release:
https://github.com/cloudpipe/cloudpickle/releases/tag/v2.0.0
Currently, `kfp` has bounded cloudpickle to less than v2.0.0 here:
https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/setup.py#L37
Would it be possible to make a new `kfp` release with upgraded cloudpickle? Without this cloudpickle version, step caching is currently impossible to use (or at the mercy of dictionary insertion order of cloudpickle).
Thanks!
</issue>
<code>
[start of sdk/python/setup.py]
1 # Copyright 2018 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import re
17
18 from setuptools import setup
19
20 NAME = 'kfp'
21 #VERSION = .... Change the version in kfp/__init__.py
22
23 # NOTICE, after any updates to the following, ./requirements.in should be updated
24 # accordingly.
25 REQUIRES = [
26 'absl-py>=0.9,<=0.11',
27 'PyYAML>=5.3,<6',
28 # `Blob.from_string` was introduced in google-cloud-storage 1.20.0
29 # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200
30 'google-cloud-storage>=1.20.0,<2',
31 'kubernetes>=8.0.0,<19',
32 # google-api-python-client v2 doesn't work for private dicovery by default:
33 # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235
34 'google-api-python-client>=1.7.8,<2',
35 'google-auth>=1.6.1,<2',
36 'requests-toolbelt>=0.8.0,<1',
37 'cloudpickle>=1.3.0,<2',
38 # Update the upper version whenever a new major version of the
39 # kfp-server-api package is released.
40 # Update the lower version when kfp sdk depends on new apis/fields in
41 # kfp-server-api.
42 # Note, please also update ./requirements.in
43 'kfp-server-api>=1.1.2,<2.0.0',
44 'jsonschema>=3.0.1,<4',
45 'tabulate>=0.8.6,<1',
46 'click>=7.1.2,<9',
47 'Deprecated>=1.2.7,<2',
48 'strip-hints>=0.1.8,<1',
49 'docstring-parser>=0.7.3,<1',
50 'kfp-pipeline-spec>=0.1.10,<0.2.0',
51 'fire>=0.3.1,<1',
52 'protobuf>=3.13.0,<4',
53 'uritemplate>=3.0.1,<4',
54 'pydantic>=1.8.2,<2',
55 # Standard library backports
56 'dataclasses;python_version<"3.7"',
57 'typing-extensions>=3.7.4,<4;python_version<"3.9"',
58 ]
59
60 TESTS_REQUIRE = [
61 'frozendict',
62 ]
63
64
65 def find_version(*file_path_parts):
66 here = os.path.abspath(os.path.dirname(__file__))
67 with open(os.path.join(here, *file_path_parts), 'r') as fp:
68 version_file_text = fp.read()
69
70 version_match = re.search(
71 r"^__version__ = ['\"]([^'\"]*)['\"]",
72 version_file_text,
73 re.M,
74 )
75 if version_match:
76 return version_match.group(1)
77
78 raise RuntimeError('Unable to find version string.')
79
80
81 setup(
82 name=NAME,
83 version=find_version('kfp', '__init__.py'),
84 description='KubeFlow Pipelines SDK',
85 author='The Kubeflow Authors',
86 url="https://github.com/kubeflow/pipelines",
87 project_urls={
88 "Documentation": "https://kubeflow-pipelines.readthedocs.io/en/stable/",
89 "Bug Tracker": "https://github.com/kubeflow/pipelines/issues",
90 "Source": "https://github.com/kubeflow/pipelines/tree/master/sdk",
91 "Changelog": "https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md",
92 },
93 install_requires=REQUIRES,
94 tests_require=TESTS_REQUIRE,
95 packages=[
96 'kfp',
97 'kfp.auth',
98 'kfp.cli',
99 'kfp.cli.diagnose_me',
100 'kfp.compiler',
101 'kfp.components',
102 'kfp.components.structures',
103 'kfp.containers',
104 'kfp.dsl',
105 'kfp.dsl.extensions',
106 'kfp.notebook',
107 'kfp.v2',
108 'kfp.v2.compiler',
109 'kfp.v2.components',
110 'kfp.v2.components.types',
111 'kfp.v2.components.experimental',
112 'kfp.v2.dsl',
113 'kfp.v2.google.client',
114 'kfp.v2.google.experimental',
115 ],
116 classifiers=[
117 'Intended Audience :: Developers',
118 'Intended Audience :: Education',
119 'Intended Audience :: Science/Research',
120 'License :: OSI Approved :: Apache Software License',
121 'Programming Language :: Python :: 3',
122 'Programming Language :: Python :: 3.6',
123 'Programming Language :: Python :: 3.7',
124 'Programming Language :: Python :: 3.8',
125 'Programming Language :: Python :: 3.9',
126 'Topic :: Scientific/Engineering',
127 'Topic :: Scientific/Engineering :: Artificial Intelligence',
128 'Topic :: Software Development',
129 'Topic :: Software Development :: Libraries',
130 'Topic :: Software Development :: Libraries :: Python Modules',
131 ],
132 python_requires='>=3.6.1',
133 include_package_data=True,
134 entry_points={
135 'console_scripts': [
136 'dsl-compile = kfp.compiler.main:main',
137 'dsl-compile-v2 = kfp.v2.compiler.main:main',
138 'kfp=kfp.__main__:main'
139 ]
140 })
141
[end of sdk/python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sdk/python/setup.py b/sdk/python/setup.py
--- a/sdk/python/setup.py
+++ b/sdk/python/setup.py
@@ -34,7 +34,7 @@
'google-api-python-client>=1.7.8,<2',
'google-auth>=1.6.1,<2',
'requests-toolbelt>=0.8.0,<1',
- 'cloudpickle>=1.3.0,<2',
+ 'cloudpickle>=2.0.0,<3',
# Update the upper version whenever a new major version of the
# kfp-server-api package is released.
# Update the lower version when kfp sdk depends on new apis/fields in
|
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -34,7 +34,7 @@\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n- 'cloudpickle>=1.3.0,<2',\n+ 'cloudpickle>=2.0.0,<3',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n", "issue": "Upgrade cloudpickle to > 2.0.0\nHello,\r\n\r\nOne of the features of pipelines is Step Caching (https://www.kubeflow.org/docs/components/pipelines/caching/) to avoid running the costly computations again and again. \r\n\r\nThe key for caching is:\r\n\r\n```\r\nmessage CacheKey {\r\n map<string, ArtifactNameList> inputArtifactNames = 1;\r\n map<string, Value> inputParameters = 2;\r\n map<string, RuntimeArtifact> outputArtifactsSpec = 3;\r\n map<string, string> outputParametersSpec=4;\r\n ContainerSpec containerSpec=5;\r\n}\r\n```\r\n\r\nWhen using the option `use_code_pickling` from https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/kfp/components/_python_op.py#L516\r\n\r\nthe pickle of the function gets embedded in the `ContainerSpec` (and hence becomes part of the key). \r\n\r\nSo far, all good. \r\n\r\nHowever, the pickle is generated with `cloudpickle` which leads to non deterministic pickles every time you run the pipeline. As you can imagine, this makes caching feature useless because it will invalidate the cache every time it is run.\r\n\r\nThis non determinism was removed from `cloudpickle` with the following commit:\r\nhttps://github.com/cloudpipe/cloudpickle/pull/428 and released as part of `2.0.0` release:\r\nhttps://github.com/cloudpipe/cloudpickle/releases/tag/v2.0.0\r\n\r\nCurrently, `kfp` has bounded cloudpickle to less than v2.0.0 here:\r\nhttps://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/setup.py#L37\r\n\r\n\r\nWould it be possible to make a new `kfp` release with upgraded cloudpickle? Without this cloudpickle version, step caching is currently impossible to use (or at the mercy of dictionary insertion order of cloudpickle). \r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]}
| 2,620 | 152 |
gh_patches_debug_32883
|
rasdani/github-patches
|
git_diff
|
marshmallow-code__webargs-555
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FalconParser should ideally support falcon's native media decoding
Falcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box.
```python
def parse_json(self, req, name, field):
"""
Pull a JSON body value from the request.
uses falcon's native req.media
"""
json_data = self._cache.get("json_data")
if json_data is None:
self._cache["json_data"] = json_data = req.media
return core.get_value(json_data, name, field, allow_many_nested=True)
```
This could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior.
Maybe something like:
```python
def parse_json(self, req, name, field):
"""Pull a JSON body value from the request.
.. note::
The request stream will be read and left at EOF.
"""
json_data = req.media if hasattr(req, 'media') else self._cache.get("json_data")
if json_data is None:
self._cache["json_data"] = json_data = parse_json_body(req)
return core.get_value(json_data, name, field, allow_many_nested=True)
```
</issue>
<code>
[start of src/webargs/falconparser.py]
1 """Falcon request argument parsing module.
2 """
3 import falcon
4 from falcon.util.uri import parse_query_string
5
6 from webargs import core
7 from webargs.multidictproxy import MultiDictProxy
8
9 HTTP_422 = "422 Unprocessable Entity"
10
11 # Mapping of int status codes to string status
12 status_map = {422: HTTP_422}
13
14
15 # Collect all exceptions from falcon.status_codes
16 def _find_exceptions():
17 for name in filter(lambda n: n.startswith("HTTP"), dir(falcon.status_codes)):
18 status = getattr(falcon.status_codes, name)
19 status_code = int(status.split(" ")[0])
20 status_map[status_code] = status
21
22
23 _find_exceptions()
24 del _find_exceptions
25
26
27 def is_json_request(req):
28 content_type = req.get_header("Content-Type")
29 return content_type and core.is_json(content_type)
30
31
32 # NOTE: Adapted from falcon.request.Request._parse_form_urlencoded
33 def parse_form_body(req):
34 if (
35 req.content_type is not None
36 and "application/x-www-form-urlencoded" in req.content_type
37 ):
38 body = req.stream.read(req.content_length or 0)
39 try:
40 body = body.decode("ascii")
41 except UnicodeDecodeError:
42 body = None
43 req.log_error(
44 "Non-ASCII characters found in form body "
45 "with Content-Type of "
46 "application/x-www-form-urlencoded. Body "
47 "will be ignored."
48 )
49
50 if body:
51 return parse_query_string(body, keep_blank=req.options.keep_blank_qs_values)
52
53 return core.missing
54
55
56 class HTTPError(falcon.HTTPError):
57 """HTTPError that stores a dictionary of validation error messages."""
58
59 def __init__(self, status, errors, *args, **kwargs):
60 self.errors = errors
61 super().__init__(status, *args, **kwargs)
62
63 def to_dict(self, *args, **kwargs):
64 """Override `falcon.HTTPError` to include error messages in responses."""
65 ret = super().to_dict(*args, **kwargs)
66 if self.errors is not None:
67 ret["errors"] = self.errors
68 return ret
69
70
71 class FalconParser(core.Parser):
72 """Falcon request argument parser."""
73
74 # Note on the use of MultiDictProxy throughout:
75 # Falcon parses query strings and form values into ordinary dicts, but with
76 # the values listified where appropriate
77 # it is still therefore necessary in these cases to wrap them in
78 # MultiDictProxy because we need to use the schema to determine when single
79 # values should be wrapped in lists due to the type of the destination
80 # field
81
82 def load_querystring(self, req, schema):
83 """Return query params from the request as a MultiDictProxy."""
84 return MultiDictProxy(req.params, schema)
85
86 def load_form(self, req, schema):
87 """Return form values from the request as a MultiDictProxy
88
89 .. note::
90
91 The request stream will be read and left at EOF.
92 """
93 form = parse_form_body(req)
94 if form is core.missing:
95 return form
96 return MultiDictProxy(form, schema)
97
98 def _raw_load_json(self, req):
99 """Return a json payload from the request for the core parser's load_json
100
101 Checks the input mimetype and may return 'missing' if the mimetype is
102 non-json, even if the request body is parseable as json."""
103 if not is_json_request(req) or req.content_length in (None, 0):
104 return core.missing
105 body = req.stream.read(req.content_length)
106 if body:
107 return core.parse_json(body)
108 return core.missing
109
110 def load_headers(self, req, schema):
111 """Return headers from the request."""
112 # Falcon only exposes headers as a dict (not multidict)
113 return req.headers
114
115 def load_cookies(self, req, schema):
116 """Return cookies from the request."""
117 # Cookies are expressed in Falcon as a dict, but the possibility of
118 # multiple values for a cookie is preserved internally -- if desired in
119 # the future, webargs could add a MultiDict type for Cookies here built
120 # from (req, schema), but Falcon does not provide one out of the box
121 return req.cookies
122
123 def get_request_from_view_args(self, view, args, kwargs):
124 """Get request from a resource method's arguments. Assumes that
125 request is the second argument.
126 """
127 req = args[1]
128 if not isinstance(req, falcon.Request):
129 raise TypeError("Argument is not a falcon.Request")
130 return req
131
132 def load_files(self, req, schema):
133 raise NotImplementedError(
134 f"Parsing files not yet supported by {self.__class__.__name__}"
135 )
136
137 def handle_error(self, error, req, schema, *, error_status_code, error_headers):
138 """Handles errors during parsing."""
139 status = status_map.get(error_status_code or self.DEFAULT_VALIDATION_STATUS)
140 if status is None:
141 raise LookupError(f"Status code {error_status_code} not supported")
142 raise HTTPError(status, errors=error.messages, headers=error_headers)
143
144 def _handle_invalid_json_error(self, error, req, *args, **kwargs):
145 status = status_map[400]
146 messages = {"json": ["Invalid JSON body."]}
147 raise HTTPError(status, errors=messages)
148
149
150 parser = FalconParser()
151 use_args = parser.use_args
152 use_kwargs = parser.use_kwargs
153
[end of src/webargs/falconparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py
--- a/src/webargs/falconparser.py
+++ b/src/webargs/falconparser.py
@@ -3,6 +3,8 @@
import falcon
from falcon.util.uri import parse_query_string
+import marshmallow as ma
+
from webargs import core
from webargs.multidictproxy import MultiDictProxy
@@ -69,7 +71,21 @@
class FalconParser(core.Parser):
- """Falcon request argument parser."""
+ """Falcon request argument parser.
+
+ Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for
+ details on the media location."""
+
+ # by default, Falcon will use the 'media' location to load data
+ #
+ # this effectively looks the same as loading JSON data by default, but if
+ # you add a handler for a different media type to Falcon, webargs will
+ # automatically pick up on that capability
+ DEFAULT_LOCATION = "media"
+ DEFAULT_UNKNOWN_BY_LOCATION = dict(
+ media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION
+ )
+ __location_map__ = dict(media="load_media", **core.Parser.__location_map__)
# Note on the use of MultiDictProxy throughout:
# Falcon parses query strings and form values into ordinary dicts, but with
@@ -95,6 +111,25 @@
return form
return MultiDictProxy(form, schema)
+ def load_media(self, req, schema):
+ """Return data unpacked and parsed by one of Falcon's media handlers.
+ By default, Falcon only handles JSON payloads.
+
+ To configure additional media handlers, see the
+ `Falcon documentation on media types`__.
+
+ .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html
+ __ FalconMedia_
+
+ .. note::
+
+ The request stream will be read and left at EOF.
+ """
+ # if there is no body, return missing instead of erroring
+ if req.content_length in (None, 0):
+ return core.missing
+ return req.media
+
def _raw_load_json(self, req):
"""Return a json payload from the request for the core parser's load_json
|
{"golden_diff": "diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py\n--- a/src/webargs/falconparser.py\n+++ b/src/webargs/falconparser.py\n@@ -3,6 +3,8 @@\n import falcon\n from falcon.util.uri import parse_query_string\n \n+import marshmallow as ma\n+\n from webargs import core\n from webargs.multidictproxy import MultiDictProxy\n \n@@ -69,7 +71,21 @@\n \n \n class FalconParser(core.Parser):\n- \"\"\"Falcon request argument parser.\"\"\"\n+ \"\"\"Falcon request argument parser.\n+\n+ Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for\n+ details on the media location.\"\"\"\n+\n+ # by default, Falcon will use the 'media' location to load data\n+ #\n+ # this effectively looks the same as loading JSON data by default, but if\n+ # you add a handler for a different media type to Falcon, webargs will\n+ # automatically pick up on that capability\n+ DEFAULT_LOCATION = \"media\"\n+ DEFAULT_UNKNOWN_BY_LOCATION = dict(\n+ media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION\n+ )\n+ __location_map__ = dict(media=\"load_media\", **core.Parser.__location_map__)\n \n # Note on the use of MultiDictProxy throughout:\n # Falcon parses query strings and form values into ordinary dicts, but with\n@@ -95,6 +111,25 @@\n return form\n return MultiDictProxy(form, schema)\n \n+ def load_media(self, req, schema):\n+ \"\"\"Return data unpacked and parsed by one of Falcon's media handlers.\n+ By default, Falcon only handles JSON payloads.\n+\n+ To configure additional media handlers, see the\n+ `Falcon documentation on media types`__.\n+\n+ .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html\n+ __ FalconMedia_\n+\n+ .. note::\n+\n+ The request stream will be read and left at EOF.\n+ \"\"\"\n+ # if there is no body, return missing instead of erroring\n+ if req.content_length in (None, 0):\n+ return core.missing\n+ return req.media\n+\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n", "issue": "FalconParser should ideally support falcon's native media decoding\nFalcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box.\r\n\r\n```python\r\n def parse_json(self, req, name, field):\r\n \"\"\"\r\n Pull a JSON body value from the request.\r\n uses falcon's native req.media\r\n \"\"\"\r\n json_data = self._cache.get(\"json_data\")\r\n if json_data is None:\r\n self._cache[\"json_data\"] = json_data = req.media\r\n return core.get_value(json_data, name, field, allow_many_nested=True)\r\n```\r\n\r\nThis could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior.\r\n\r\nMaybe something like:\r\n\r\n```python\r\n def parse_json(self, req, name, field):\r\n \"\"\"Pull a JSON body value from the request.\r\n .. note::\r\n The request stream will be read and left at EOF.\r\n \"\"\"\r\n json_data = req.media if hasattr(req, 'media') else self._cache.get(\"json_data\")\r\n if json_data is None:\r\n self._cache[\"json_data\"] = json_data = parse_json_body(req)\r\n return core.get_value(json_data, name, field, allow_many_nested=True)\r\n```\n", "before_files": [{"content": "\"\"\"Falcon request argument parsing module.\n\"\"\"\nimport falcon\nfrom falcon.util.uri import parse_query_string\n\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\nHTTP_422 = \"422 Unprocessable Entity\"\n\n# Mapping of int status codes to string status\nstatus_map = {422: HTTP_422}\n\n\n# Collect all exceptions from falcon.status_codes\ndef _find_exceptions():\n for name in filter(lambda n: n.startswith(\"HTTP\"), dir(falcon.status_codes)):\n status = getattr(falcon.status_codes, name)\n status_code = int(status.split(\" \")[0])\n status_map[status_code] = status\n\n\n_find_exceptions()\ndel _find_exceptions\n\n\ndef is_json_request(req):\n content_type = req.get_header(\"Content-Type\")\n return content_type and core.is_json(content_type)\n\n\n# NOTE: Adapted from falcon.request.Request._parse_form_urlencoded\ndef parse_form_body(req):\n if (\n req.content_type is not None\n and \"application/x-www-form-urlencoded\" in req.content_type\n ):\n body = req.stream.read(req.content_length or 0)\n try:\n body = body.decode(\"ascii\")\n except UnicodeDecodeError:\n body = None\n req.log_error(\n \"Non-ASCII characters found in form body \"\n \"with Content-Type of \"\n \"application/x-www-form-urlencoded. Body \"\n \"will be ignored.\"\n )\n\n if body:\n return parse_query_string(body, keep_blank=req.options.keep_blank_qs_values)\n\n return core.missing\n\n\nclass HTTPError(falcon.HTTPError):\n \"\"\"HTTPError that stores a dictionary of validation error messages.\"\"\"\n\n def __init__(self, status, errors, *args, **kwargs):\n self.errors = errors\n super().__init__(status, *args, **kwargs)\n\n def to_dict(self, *args, **kwargs):\n \"\"\"Override `falcon.HTTPError` to include error messages in responses.\"\"\"\n ret = super().to_dict(*args, **kwargs)\n if self.errors is not None:\n ret[\"errors\"] = self.errors\n return ret\n\n\nclass FalconParser(core.Parser):\n \"\"\"Falcon request argument parser.\"\"\"\n\n # Note on the use of MultiDictProxy throughout:\n # Falcon parses query strings and form values into ordinary dicts, but with\n # the values listified where appropriate\n # it is still therefore necessary in these cases to wrap them in\n # MultiDictProxy because we need to use the schema to determine when single\n # values should be wrapped in lists due to the type of the destination\n # field\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.params, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy\n\n .. note::\n\n The request stream will be read and left at EOF.\n \"\"\"\n form = parse_form_body(req)\n if form is core.missing:\n return form\n return MultiDictProxy(form, schema)\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req) or req.content_length in (None, 0):\n return core.missing\n body = req.stream.read(req.content_length)\n if body:\n return core.parse_json(body)\n return core.missing\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request.\"\"\"\n # Falcon only exposes headers as a dict (not multidict)\n return req.headers\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n # Cookies are expressed in Falcon as a dict, but the possibility of\n # multiple values for a cookie is preserved internally -- if desired in\n # the future, webargs could add a MultiDict type for Cookies here built\n # from (req, schema), but Falcon does not provide one out of the box\n return req.cookies\n\n def get_request_from_view_args(self, view, args, kwargs):\n \"\"\"Get request from a resource method's arguments. Assumes that\n request is the second argument.\n \"\"\"\n req = args[1]\n if not isinstance(req, falcon.Request):\n raise TypeError(\"Argument is not a falcon.Request\")\n return req\n\n def load_files(self, req, schema):\n raise NotImplementedError(\n f\"Parsing files not yet supported by {self.__class__.__name__}\"\n )\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing.\"\"\"\n status = status_map.get(error_status_code or self.DEFAULT_VALIDATION_STATUS)\n if status is None:\n raise LookupError(f\"Status code {error_status_code} not supported\")\n raise HTTPError(status, errors=error.messages, headers=error_headers)\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n status = status_map[400]\n messages = {\"json\": [\"Invalid JSON body.\"]}\n raise HTTPError(status, errors=messages)\n\n\nparser = FalconParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/falconparser.py"}]}
| 2,474 | 531 |
gh_patches_debug_17978
|
rasdani/github-patches
|
git_diff
|
cython__cython-4952
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make the cythonize command line more useful
A couple of more common build options, such as `--numpy`, `--include_path`, `--libraries`, `--cflags`, `--ldflags` should be added to `cythonize` command line so that it can be used easily for the majority of building tasks.
Setting up a `setup.py` script for every `pyx` file I need to write is really, really tedious, which put me off from cython for years before I finally tried it for the first time (and that was in IPython notebook with cython magic, so I still don't know how to write `setup.py`). I am sure that many beginners to cython are just scared of the complicated build process like me, and never have the chance to actually try it. Please make it more accessible.
The `%%cython` magic in IPython has much better sane defaults and useful command line options than the `cythonize` script.
</issue>
<code>
[start of Cython/Build/Cythonize.py]
1 #!/usr/bin/env python
2
3 from __future__ import absolute_import
4
5 import os
6 import shutil
7 import tempfile
8 from distutils.core import setup
9
10 from .Dependencies import cythonize, extended_iglob
11 from ..Utils import is_package_dir
12 from ..Compiler import Options
13
14 try:
15 import multiprocessing
16 parallel_compiles = int(multiprocessing.cpu_count() * 1.5)
17 except ImportError:
18 multiprocessing = None
19 parallel_compiles = 0
20
21
22 class _FakePool(object):
23 def map_async(self, func, args):
24 try:
25 from itertools import imap
26 except ImportError:
27 imap=map
28 for _ in imap(func, args):
29 pass
30
31 def close(self):
32 pass
33
34 def terminate(self):
35 pass
36
37 def join(self):
38 pass
39
40
41 def find_package_base(path):
42 base_dir, package_path = os.path.split(path)
43 while is_package_dir(base_dir):
44 base_dir, parent = os.path.split(base_dir)
45 package_path = '%s/%s' % (parent, package_path)
46 return base_dir, package_path
47
48
49 def cython_compile(path_pattern, options):
50 pool = None
51 all_paths = map(os.path.abspath, extended_iglob(path_pattern))
52 try:
53 for path in all_paths:
54 if options.build_inplace:
55 base_dir = path
56 while not os.path.isdir(base_dir) or is_package_dir(base_dir):
57 base_dir = os.path.dirname(base_dir)
58 else:
59 base_dir = None
60
61 if os.path.isdir(path):
62 # recursively compiling a package
63 paths = [os.path.join(path, '**', '*.{py,pyx}')]
64 else:
65 # assume it's a file(-like thing)
66 paths = [path]
67
68 ext_modules = cythonize(
69 paths,
70 nthreads=options.parallel,
71 exclude_failures=options.keep_going,
72 exclude=options.excludes,
73 compiler_directives=options.directives,
74 compile_time_env=options.compile_time_env,
75 force=options.force,
76 quiet=options.quiet,
77 depfile=options.depfile,
78 **options.options)
79
80 if ext_modules and options.build:
81 if len(ext_modules) > 1 and options.parallel > 1:
82 if pool is None:
83 try:
84 pool = multiprocessing.Pool(options.parallel)
85 except OSError:
86 pool = _FakePool()
87 pool.map_async(run_distutils, [
88 (base_dir, [ext]) for ext in ext_modules])
89 else:
90 run_distutils((base_dir, ext_modules))
91 except:
92 if pool is not None:
93 pool.terminate()
94 raise
95 else:
96 if pool is not None:
97 pool.close()
98 pool.join()
99
100
101 def run_distutils(args):
102 base_dir, ext_modules = args
103 script_args = ['build_ext', '-i']
104 cwd = os.getcwd()
105 temp_dir = None
106 try:
107 if base_dir:
108 os.chdir(base_dir)
109 temp_dir = tempfile.mkdtemp(dir=base_dir)
110 script_args.extend(['--build-temp', temp_dir])
111 setup(
112 script_name='setup.py',
113 script_args=script_args,
114 ext_modules=ext_modules,
115 )
116 finally:
117 if base_dir:
118 os.chdir(cwd)
119 if temp_dir and os.path.isdir(temp_dir):
120 shutil.rmtree(temp_dir)
121
122
123 def create_args_parser():
124 from argparse import ArgumentParser
125 from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction
126
127 parser = ArgumentParser()
128
129 parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
130 dest='directives', default={}, type=str,
131 action=ParseDirectivesAction,
132 help='set a compiler directive')
133 parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',
134 dest='compile_time_env', default={}, type=str,
135 action=ParseCompileTimeEnvAction,
136 help='set a compile time environment variable')
137 parser.add_argument('-s', '--option', metavar='NAME=VALUE',
138 dest='options', default={}, type=str,
139 action=ParseOptionsAction,
140 help='set a cythonize option')
141 parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,
142 help='use Python 2 syntax mode by default')
143 parser.add_argument('-3', dest='language_level', action='store_const', const=3,
144 help='use Python 3 syntax mode by default')
145 parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',
146 help='use Python 3 syntax mode by default')
147 parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',
148 help='Produce a colorized HTML version of the source.')
149 parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',
150 help='Produce a colorized HTML version of the source '
151 'which includes entire generated C/C++-code.')
152 parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',
153 action='append', default=[],
154 help='exclude certain file patterns from the compilation')
155
156 parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,
157 help='build extension modules using distutils')
158 parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,
159 help='build extension modules in place using distutils (implies -b)')
160 parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',
161 type=int, default=parallel_compiles,
162 help=('run builds in N parallel jobs (default: %d)' %
163 parallel_compiles or 1))
164 parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,
165 help='force recompilation')
166 parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,
167 help='be less verbose during compilation')
168
169 parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,
170 help='increase Python compatibility by ignoring some compile time errors')
171 parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,
172 help='compile as much as possible, ignore compilation failures')
173 parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,
174 help='strip docstrings')
175 parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')
176 parser.add_argument('sources', nargs='*')
177 return parser
178
179
180 def parse_args_raw(parser, args):
181 options, unknown = parser.parse_known_args(args)
182 sources = options.sources
183 # if positional arguments were interspersed
184 # some of them are in unknown
185 for option in unknown:
186 if option.startswith('-'):
187 parser.error("unknown option "+option)
188 else:
189 sources.append(option)
190 del options.sources
191 return (options, sources)
192
193
194 def parse_args(args):
195 parser = create_args_parser()
196 options, args = parse_args_raw(parser, args)
197
198 if not args:
199 parser.error("no source files provided")
200 if options.build_inplace:
201 options.build = True
202 if multiprocessing is None:
203 options.parallel = 0
204 if options.language_level:
205 assert options.language_level in (2, 3, '3str')
206 options.options['language_level'] = options.language_level
207
208 if options.lenient:
209 # increase Python compatibility by ignoring compile time errors
210 Options.error_on_unknown_names = False
211 Options.error_on_uninitialized = False
212
213 if options.annotate:
214 Options.annotate = options.annotate
215
216 if options.no_docstrings:
217 Options.docstrings = False
218
219 return options, args
220
221
222 def main(args=None):
223 options, paths = parse_args(args)
224
225 for path in paths:
226 cython_compile(path, options)
227
228
229 if __name__ == '__main__':
230 main()
231
[end of Cython/Build/Cythonize.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py
--- a/Cython/Build/Cythonize.py
+++ b/Cython/Build/Cythonize.py
@@ -121,10 +121,18 @@
def create_args_parser():
- from argparse import ArgumentParser
+ from argparse import ArgumentParser, RawDescriptionHelpFormatter
from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction
- parser = ArgumentParser()
+ parser = ArgumentParser(
+ formatter_class=RawDescriptionHelpFormatter,
+ epilog="""\
+Environment variables:
+ CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless
+ of modification times and changes.
+ Environment variables accepted by setuptools are supported to configure the C compiler and build:
+ https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options"""
+ )
parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',
dest='directives', default={}, type=str,
|
{"golden_diff": "diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py\n--- a/Cython/Build/Cythonize.py\n+++ b/Cython/Build/Cythonize.py\n@@ -121,10 +121,18 @@\n \n \n def create_args_parser():\n- from argparse import ArgumentParser\n+ from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n \n- parser = ArgumentParser()\n+ parser = ArgumentParser(\n+ formatter_class=RawDescriptionHelpFormatter,\n+ epilog=\"\"\"\\\n+Environment variables:\n+ CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n+ of modification times and changes.\n+ Environment variables accepted by setuptools are supported to configure the C compiler and build:\n+ https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n+ )\n \n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n", "issue": "Make the cythonize command line more useful\nA couple of more common build options, such as `--numpy`, `--include_path`, `--libraries`, `--cflags`, `--ldflags` should be added to `cythonize` command line so that it can be used easily for the majority of building tasks. \r\n\r\nSetting up a `setup.py` script for every `pyx` file I need to write is really, really tedious, which put me off from cython for years before I finally tried it for the first time (and that was in IPython notebook with cython magic, so I still don't know how to write `setup.py`). I am sure that many beginners to cython are just scared of the complicated build process like me, and never have the chance to actually try it. Please make it more accessible.\r\n\r\nThe `%%cython` magic in IPython has much better sane defaults and useful command line options than the `cythonize` script.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\n\ndef cython_compile(path_pattern, options):\n pool = None\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser()\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n for path in paths:\n cython_compile(path, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py"}]}
| 3,090 | 249 |
gh_patches_debug_27959
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-2009
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pattern_create & run features
It is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier.
I would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program.
</issue>
<code>
[start of pwndbg/commands/cyclic.py]
1 from __future__ import annotations
2
3 import argparse
4 import string
5
6 import gdb
7 from pwnlib.util.cyclic import cyclic
8 from pwnlib.util.cyclic import cyclic_find
9
10 import pwndbg.commands
11 import pwndbg.gdblib.arch
12 from pwndbg.color import message
13
14 parser = argparse.ArgumentParser(description="Cyclic pattern creator/finder.")
15
16 parser.add_argument(
17 "-a",
18 "--alphabet",
19 metavar="charset",
20 default=string.ascii_lowercase,
21 type=str.encode,
22 help="The alphabet to use in the cyclic pattern",
23 )
24
25 parser.add_argument(
26 "-n",
27 "--length",
28 metavar="length",
29 type=int,
30 help="Size of the unique subsequences (defaults to the pointer size for the current arch)",
31 )
32
33 group = parser.add_mutually_exclusive_group(required=False)
34 group.add_argument(
35 "-l",
36 "-o",
37 "--offset",
38 "--lookup",
39 dest="lookup",
40 metavar="lookup_value",
41 type=str,
42 help="Do a lookup instead of printing the sequence (accepts constant values as well as expressions)",
43 )
44
45 group.add_argument(
46 "count",
47 type=int,
48 nargs="?",
49 default=100,
50 help="Number of characters to print from the sequence (default: print the entire sequence)",
51 )
52
53
54 @pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic")
55 def cyclic_cmd(alphabet, length, lookup, count=100) -> None:
56 if length:
57 # Convert from gdb.Value
58 length = int(length)
59 else:
60 length = pwndbg.gdblib.arch.ptrsize
61
62 if lookup:
63 lookup = pwndbg.commands.fix(lookup, sloppy=True)
64
65 if isinstance(lookup, (gdb.Value, int)):
66 lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)
67 elif isinstance(lookup, str):
68 lookup = bytes(lookup, "utf-8")
69
70 if len(lookup) != length:
71 print(
72 message.error(
73 f"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)"
74 )
75 )
76 return
77
78 hexstr = "0x" + lookup.hex()
79 print(
80 message.notice(
81 f"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})"
82 )
83 )
84
85 if any(c not in alphabet for c in lookup):
86 print(message.error("Pattern contains characters not present in the alphabet"))
87 return
88
89 offset = cyclic_find(lookup, alphabet, length)
90
91 if offset == -1:
92 print(message.error("Given lookup pattern does not exist in the sequence"))
93 else:
94 print(message.success(f"Found at offset {offset}"))
95 else:
96 sequence = cyclic(int(count), alphabet, length)
97 print(sequence.decode())
98
[end of pwndbg/commands/cyclic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py
--- a/pwndbg/commands/cyclic.py
+++ b/pwndbg/commands/cyclic.py
@@ -30,6 +30,7 @@
help="Size of the unique subsequences (defaults to the pointer size for the current arch)",
)
+
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-l",
@@ -50,9 +51,17 @@
help="Number of characters to print from the sequence (default: print the entire sequence)",
)
+parser.add_argument(
+ "filename",
+ type=str,
+ help="Name (path) of the file to save the cyclic pattern to",
+ default="",
+ nargs="?",
+)
+
@pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic")
-def cyclic_cmd(alphabet, length, lookup, count=100) -> None:
+def cyclic_cmd(alphabet, length, lookup, count=100, filename="") -> None:
if length:
# Convert from gdb.Value
length = int(length)
@@ -93,5 +102,12 @@
else:
print(message.success(f"Found at offset {offset}"))
else:
- sequence = cyclic(int(count), alphabet, length)
- print(sequence.decode())
+ count = int(count)
+ sequence = cyclic(count, alphabet, length)
+
+ if not filename:
+ print(sequence.decode())
+ else:
+ with open(filename, "wb") as f:
+ f.write(sequence)
+ print(f"Written a cyclic sequence of length {count} to file {filename}")
|
{"golden_diff": "diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py\n--- a/pwndbg/commands/cyclic.py\n+++ b/pwndbg/commands/cyclic.py\n@@ -30,6 +30,7 @@\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n )\n \n+\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\n \"-l\",\n@@ -50,9 +51,17 @@\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n )\n \n+parser.add_argument(\n+ \"filename\",\n+ type=str,\n+ help=\"Name (path) of the file to save the cyclic pattern to\",\n+ default=\"\",\n+ nargs=\"?\",\n+)\n+\n \n @pwndbg.commands.ArgparsedCommand(parser, command_name=\"cyclic\")\n-def cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n+def cyclic_cmd(alphabet, length, lookup, count=100, filename=\"\") -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n@@ -93,5 +102,12 @@\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n- sequence = cyclic(int(count), alphabet, length)\n- print(sequence.decode())\n+ count = int(count)\n+ sequence = cyclic(count, alphabet, length)\n+\n+ if not filename:\n+ print(sequence.decode())\n+ else:\n+ with open(filename, \"wb\") as f:\n+ f.write(sequence)\n+ print(f\"Written a cyclic sequence of length {count} to file {filename}\")\n", "issue": "pattern_create & run features\nIt is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier.\r\n\r\nI would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program. \n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport string\n\nimport gdb\nfrom pwnlib.util.cyclic import cyclic\nfrom pwnlib.util.cyclic import cyclic_find\n\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nfrom pwndbg.color import message\n\nparser = argparse.ArgumentParser(description=\"Cyclic pattern creator/finder.\")\n\nparser.add_argument(\n \"-a\",\n \"--alphabet\",\n metavar=\"charset\",\n default=string.ascii_lowercase,\n type=str.encode,\n help=\"The alphabet to use in the cyclic pattern\",\n)\n\nparser.add_argument(\n \"-n\",\n \"--length\",\n metavar=\"length\",\n type=int,\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n)\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n \"-l\",\n \"-o\",\n \"--offset\",\n \"--lookup\",\n dest=\"lookup\",\n metavar=\"lookup_value\",\n type=str,\n help=\"Do a lookup instead of printing the sequence (accepts constant values as well as expressions)\",\n)\n\ngroup.add_argument(\n \"count\",\n type=int,\n nargs=\"?\",\n default=100,\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n)\n\n\[email protected](parser, command_name=\"cyclic\")\ndef cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n else:\n length = pwndbg.gdblib.arch.ptrsize\n\n if lookup:\n lookup = pwndbg.commands.fix(lookup, sloppy=True)\n\n if isinstance(lookup, (gdb.Value, int)):\n lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)\n elif isinstance(lookup, str):\n lookup = bytes(lookup, \"utf-8\")\n\n if len(lookup) != length:\n print(\n message.error(\n f\"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)\"\n )\n )\n return\n\n hexstr = \"0x\" + lookup.hex()\n print(\n message.notice(\n f\"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})\"\n )\n )\n\n if any(c not in alphabet for c in lookup):\n print(message.error(\"Pattern contains characters not present in the alphabet\"))\n return\n\n offset = cyclic_find(lookup, alphabet, length)\n\n if offset == -1:\n print(message.error(\"Given lookup pattern does not exist in the sequence\"))\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n sequence = cyclic(int(count), alphabet, length)\n print(sequence.decode())\n", "path": "pwndbg/commands/cyclic.py"}]}
| 1,469 | 378 |
gh_patches_debug_40208
|
rasdani/github-patches
|
git_diff
|
rlworkgroup__garage-1583
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RaySampler does not use all workers
In release 2019.10, [RaySampler](https://github.com/rlworkgroup/garage/blob/release-2019.10/src/garage/sampler/ray_sampler.py) sees only one of the workers as idle for all iterations after the first iteration. This can be seen with [examples/tf/trpo_swimmer_ray_sampler.py](https://github.com/rlworkgroup/garage/blob/release-2019.10/examples/tf/trpo_swimmer_ray_sampler.py).
To me it looks like `self._idle_worker_ids` is not correctly updated at the end of `obtain_samples()` [here](https://github.com/rlworkgroup/garage/blob/d6c993526c23c289ebc94635a78186d9bf197f32/src/garage/sampler/ray_sampler.py#L130). Only those workers which have returned with a result are put back in the idle pool. This is a problem because results from some workers are not processed at the end and these workers don't go back into the idle pool.
A quick fix (?) is to reset the list of idle workers ids to all workers ids at the start of `obtain samples()`. This does result in all the workers being used for all iterations. Since the agent parameters are updated in the next iteration, the samples from the previously not ready workers must be valid. However, I am not totally sure.
@avnishn @ryanjulian Appreciate it if the fix can be back-ported to 2019.10!
</issue>
<code>
[start of src/garage/sampler/ray_sampler.py]
1 """This is an implementation of an on policy batch sampler.
2
3 Uses a data parallel design.
4 Included is a sampler that deploys sampler workers.
5
6 The sampler workers must implement some type of set agent parameters
7 function, and a rollout function
8 """
9 from collections import defaultdict
10 import pickle
11
12 import numpy as np
13 import psutil
14 import ray
15
16 from garage.experiment import deterministic
17 from garage.misc.prog_bar_counter import ProgBarCounter
18 from garage.sampler.base import BaseSampler
19
20
21 class RaySampler(BaseSampler):
22 """Collects Policy Rollouts in a data parallel fashion.
23
24 Args:
25 algo (garage.np.algo.RLAlgorithm): A garage algo object
26 env (gym.Env): A gym/akro env object
27 seed (int): Random seed.
28 should_render (bool): If the sampler render the trajectories.
29 num_processors (int): Number of processors to be used.
30 sampler_worker_cls (garage.sampler.ray_sampler.SamplerWorker):
31 If none, uses the default SamplerWorker class
32
33 """
34
35 def __init__(self,
36 algo,
37 env,
38 seed,
39 should_render=False,
40 num_processors=None,
41 sampler_worker_cls=None):
42 super().__init__(algo, env)
43 self._sampler_worker = ray.remote(SamplerWorker if sampler_worker_cls
44 is None else sampler_worker_cls)
45 self._seed = seed
46 deterministic.set_seed(self._seed)
47 self._max_path_length = self.algo.max_path_length
48 self._should_render = should_render
49 if not ray.is_initialized():
50 ray.init(log_to_driver=False)
51 self._num_workers = (num_processors if num_processors else
52 psutil.cpu_count(logical=False))
53 self._all_workers = defaultdict(None)
54 self._idle_worker_ids = list(range(self._num_workers))
55 self._active_worker_ids = []
56
57 def start_worker(self):
58 """Initialize a new ray worker."""
59 # Pickle the environment once, instead of once per worker.
60 env_pkl = pickle.dumps(self.env)
61 # We need to pickle the agent so that we can e.g. set up the TF Session
62 # in the worker *before* unpicling it.
63 agent_pkl = pickle.dumps(self.algo.policy)
64 for worker_id in range(self._num_workers):
65 self._all_workers[worker_id] = self._sampler_worker.remote(
66 worker_id, env_pkl, agent_pkl, self._seed,
67 self._max_path_length, self._should_render)
68
69 # pylint: disable=arguments-differ
70 def obtain_samples(self, itr, num_samples):
71 """Sample the policy for new trajectories.
72
73 Args:
74 itr (int): Iteration number.
75 num_samples (int): Number of steps the the sampler should collect.
76
77 Returns:
78 list[dict]: Sample paths, each path with key
79 * observations: (numpy.ndarray)
80 * actions: (numpy.ndarray)
81 * rewards: (numpy.ndarray)
82 * agent_infos: (dict)
83 * env_infos: (dict)
84
85 """
86 _active_workers = []
87 self._active_worker_ids = []
88 pbar = ProgBarCounter(num_samples)
89 completed_samples = 0
90 traj = []
91 updating_workers = []
92
93 # update the policy params of each worker before sampling
94 # for the current iteration
95 curr_policy_params = self.algo.policy.get_param_values()
96 params_id = ray.put(curr_policy_params)
97 while self._idle_worker_ids:
98 worker_id = self._idle_worker_ids.pop()
99 worker = self._all_workers[worker_id]
100 updating_workers.append(worker.set_agent.remote(params_id))
101
102 while completed_samples < num_samples:
103 # if there are workers still being updated, check
104 # which ones are still updating and take the workers that
105 # are done updating, and start collecting trajectories on
106 # those workers.
107 if updating_workers:
108 updated, updating_workers = ray.wait(updating_workers,
109 num_returns=1,
110 timeout=0.1)
111 upd = [ray.get(up) for up in updated]
112 self._idle_worker_ids.extend(upd)
113
114 # if there are idle workers, use them to collect trajectories
115 # mark the newly busy workers as active
116 while self._idle_worker_ids:
117 idle_worker_id = self._idle_worker_ids.pop()
118 self._active_worker_ids.append(idle_worker_id)
119 worker = self._all_workers[idle_worker_id]
120 _active_workers.append(worker.rollout.remote())
121
122 # check which workers are done/not done collecting a sample
123 # if any are done, send them to process the collected trajectory
124 # if they are not, keep checking if they are done
125 ready, not_ready = ray.wait(_active_workers,
126 num_returns=1,
127 timeout=0.001)
128 _active_workers = not_ready
129 for result in ready:
130 trajectory, num_returned_samples = self._process_trajectory(
131 result)
132 completed_samples += num_returned_samples
133 pbar.inc(num_returned_samples)
134 traj.append(trajectory)
135 pbar.stop()
136 return traj
137
138 def shutdown_worker(self):
139 """Shuts down the worker."""
140 ray.shutdown()
141
142 def _process_trajectory(self, result):
143 """Collect trajectory from ray object store.
144
145 Converts that trajectory to garage friendly format.
146
147 Args:
148 result (obj): Ray object id of ready to be collected trajectory.
149
150 Returns:
151 dict: One trajectory, with keys
152 * observations: (numpy.ndarray)
153 * actions: (numpy.ndarray)
154 * rewards: (numpy.ndarray)
155 * agent_infos: (dict)
156 * env_infos: (dict)
157 int: Number of returned samples in the trajectory
158
159 """
160 trajectory = ray.get(result)
161 ready_worker_id = trajectory[0]
162 self._active_worker_ids.remove(ready_worker_id)
163 self._idle_worker_ids.append(ready_worker_id)
164 trajectory = dict(observations=np.asarray(trajectory[1]),
165 actions=np.asarray(trajectory[2]),
166 rewards=np.asarray(trajectory[3]),
167 agent_infos=trajectory[4],
168 env_infos=trajectory[5])
169 num_returned_samples = len(trajectory['observations'])
170 return trajectory, num_returned_samples
171
172
173 class SamplerWorker:
174 """Constructs a single sampler worker.
175
176 The worker can have its parameters updated, and sampler its policy for
177 trajectories or rollouts.
178
179 Args:
180 worker_id (int): the id of the sampler_worker
181 env_pkl (bytes): A pickled gym or akro env object
182 agent_pkl (bytes): A pickled agent
183 seed (int): Random seed.
184 max_path_length (int): max trajectory length
185 should_render (bool): if true, renders trajectories after
186 sampling them
187
188 """
189
190 def __init__(self,
191 worker_id,
192 env_pkl,
193 agent_pkl,
194 seed,
195 max_path_length,
196 should_render=False):
197 self.worker_id = worker_id
198 self._env = pickle.loads(env_pkl)
199 self.agent = pickle.loads(agent_pkl)
200 self._seed = seed + self.worker_id
201 deterministic.set_seed(self._seed)
202 self._max_path_length = max_path_length
203 self._should_render = should_render
204 self.agent_updates = 0
205
206 def set_agent(self, flattened_params):
207 """Set the agent params.
208
209 Args:
210 flattened_params (list[np.ndarray]): model parameters
211
212 Returns:
213 int: Worker id of this sampler worker.
214
215 """
216 self.agent.set_param_values(flattened_params)
217 self.agent_updates += 1
218 return self.worker_id
219
220 def rollout(self):
221 """Sample a single rollout from the agent/policy.
222
223 The following value for the following keys will be a 2D array,
224 with the first dimension corresponding to the time dimension.
225
226 - observations
227 - actions
228 - rewards
229 - next_observations
230 - terminals
231 The next two elements will be lists of dictionaries, with
232 the index into the list being the index into the time
233 - agent_infos
234 - env_infos
235
236 Returns:
237 int: ID of this work
238 numpy.ndarray: observations
239 numpy.ndarray: actions
240 numpy.ndarray: rewards
241 dict[list]: agent info
242 dict[list]: environment info
243
244 """
245 observations = []
246 actions = []
247 rewards = []
248 agent_infos = defaultdict(list)
249 env_infos = defaultdict(list)
250 o = self._env.reset()
251 self.agent.reset()
252 next_o = None
253 path_length = 0
254 while path_length < self._max_path_length:
255 a, agent_info = self.agent.get_action(o)
256 next_o, r, d, env_info = self._env.step(a)
257 observations.append(o)
258 rewards.append(r)
259 actions.append(a)
260 for k, v in agent_info.items():
261 agent_infos[k].append(v)
262 for k, v in env_info.items():
263 env_infos[k].append(v)
264 path_length += 1
265 if d:
266 break
267 o = next_o
268 for k, v in agent_infos.items():
269 agent_infos[k] = np.asarray(v)
270 for k, v in env_infos.items():
271 env_infos[k] = np.asarray(v)
272 return self.worker_id,\
273 np.array(observations),\
274 np.array(actions),\
275 np.array(rewards),\
276 dict(agent_infos),\
277 dict(env_infos)
278
[end of src/garage/sampler/ray_sampler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/garage/sampler/ray_sampler.py b/src/garage/sampler/ray_sampler.py
--- a/src/garage/sampler/ray_sampler.py
+++ b/src/garage/sampler/ray_sampler.py
@@ -89,7 +89,7 @@
completed_samples = 0
traj = []
updating_workers = []
-
+ samples_to_be_collected = 0
# update the policy params of each worker before sampling
# for the current iteration
curr_policy_params = self.algo.policy.get_param_values()
@@ -98,7 +98,6 @@
worker_id = self._idle_worker_ids.pop()
worker = self._all_workers[worker_id]
updating_workers.append(worker.set_agent.remote(params_id))
-
while completed_samples < num_samples:
# if there are workers still being updated, check
# which ones are still updating and take the workers that
@@ -113,12 +112,20 @@
# if there are idle workers, use them to collect trajectories
# mark the newly busy workers as active
- while self._idle_worker_ids:
+ workers_to_use = int(
+ np.clip(
+ np.ceil(
+ (num_samples - completed_samples -
+ samples_to_be_collected) / self._max_path_length) -
+ len(self._active_worker_ids), 0, len(self._all_workers)))
+ workers_started = 0
+ while self._idle_worker_ids and workers_started < workers_to_use:
idle_worker_id = self._idle_worker_ids.pop()
+ workers_started += 1
self._active_worker_ids.append(idle_worker_id)
+ samples_to_be_collected += self._max_path_length
worker = self._all_workers[idle_worker_id]
_active_workers.append(worker.rollout.remote())
-
# check which workers are done/not done collecting a sample
# if any are done, send them to process the collected trajectory
# if they are not, keep checking if they are done
@@ -129,9 +136,12 @@
for result in ready:
trajectory, num_returned_samples = self._process_trajectory(
result)
+ samples_to_be_collected -= self._max_path_length
completed_samples += num_returned_samples
pbar.inc(num_returned_samples)
traj.append(trajectory)
+ self._idle_worker_ids = list(range(self._num_workers))
+ self._active_worker_ids = []
pbar.stop()
return traj
|
{"golden_diff": "diff --git a/src/garage/sampler/ray_sampler.py b/src/garage/sampler/ray_sampler.py\n--- a/src/garage/sampler/ray_sampler.py\n+++ b/src/garage/sampler/ray_sampler.py\n@@ -89,7 +89,7 @@\n completed_samples = 0\n traj = []\n updating_workers = []\n-\n+ samples_to_be_collected = 0\n # update the policy params of each worker before sampling\n # for the current iteration\n curr_policy_params = self.algo.policy.get_param_values()\n@@ -98,7 +98,6 @@\n worker_id = self._idle_worker_ids.pop()\n worker = self._all_workers[worker_id]\n updating_workers.append(worker.set_agent.remote(params_id))\n-\n while completed_samples < num_samples:\n # if there are workers still being updated, check\n # which ones are still updating and take the workers that\n@@ -113,12 +112,20 @@\n \n # if there are idle workers, use them to collect trajectories\n # mark the newly busy workers as active\n- while self._idle_worker_ids:\n+ workers_to_use = int(\n+ np.clip(\n+ np.ceil(\n+ (num_samples - completed_samples -\n+ samples_to_be_collected) / self._max_path_length) -\n+ len(self._active_worker_ids), 0, len(self._all_workers)))\n+ workers_started = 0\n+ while self._idle_worker_ids and workers_started < workers_to_use:\n idle_worker_id = self._idle_worker_ids.pop()\n+ workers_started += 1\n self._active_worker_ids.append(idle_worker_id)\n+ samples_to_be_collected += self._max_path_length\n worker = self._all_workers[idle_worker_id]\n _active_workers.append(worker.rollout.remote())\n-\n # check which workers are done/not done collecting a sample\n # if any are done, send them to process the collected trajectory\n # if they are not, keep checking if they are done\n@@ -129,9 +136,12 @@\n for result in ready:\n trajectory, num_returned_samples = self._process_trajectory(\n result)\n+ samples_to_be_collected -= self._max_path_length\n completed_samples += num_returned_samples\n pbar.inc(num_returned_samples)\n traj.append(trajectory)\n+ self._idle_worker_ids = list(range(self._num_workers))\n+ self._active_worker_ids = []\n pbar.stop()\n return traj\n", "issue": "RaySampler does not use all workers\nIn release 2019.10, [RaySampler](https://github.com/rlworkgroup/garage/blob/release-2019.10/src/garage/sampler/ray_sampler.py) sees only one of the workers as idle for all iterations after the first iteration. This can be seen with [examples/tf/trpo_swimmer_ray_sampler.py](https://github.com/rlworkgroup/garage/blob/release-2019.10/examples/tf/trpo_swimmer_ray_sampler.py). \r\n\r\nTo me it looks like `self._idle_worker_ids` is not correctly updated at the end of `obtain_samples()` [here](https://github.com/rlworkgroup/garage/blob/d6c993526c23c289ebc94635a78186d9bf197f32/src/garage/sampler/ray_sampler.py#L130). Only those workers which have returned with a result are put back in the idle pool. This is a problem because results from some workers are not processed at the end and these workers don't go back into the idle pool. \r\n\r\nA quick fix (?) is to reset the list of idle workers ids to all workers ids at the start of `obtain samples()`. This does result in all the workers being used for all iterations. Since the agent parameters are updated in the next iteration, the samples from the previously not ready workers must be valid. However, I am not totally sure. \r\n\r\n@avnishn @ryanjulian Appreciate it if the fix can be back-ported to 2019.10!\n", "before_files": [{"content": "\"\"\"This is an implementation of an on policy batch sampler.\n\nUses a data parallel design.\nIncluded is a sampler that deploys sampler workers.\n\nThe sampler workers must implement some type of set agent parameters\nfunction, and a rollout function\n\"\"\"\nfrom collections import defaultdict\nimport pickle\n\nimport numpy as np\nimport psutil\nimport ray\n\nfrom garage.experiment import deterministic\nfrom garage.misc.prog_bar_counter import ProgBarCounter\nfrom garage.sampler.base import BaseSampler\n\n\nclass RaySampler(BaseSampler):\n \"\"\"Collects Policy Rollouts in a data parallel fashion.\n\n Args:\n algo (garage.np.algo.RLAlgorithm): A garage algo object\n env (gym.Env): A gym/akro env object\n seed (int): Random seed.\n should_render (bool): If the sampler render the trajectories.\n num_processors (int): Number of processors to be used.\n sampler_worker_cls (garage.sampler.ray_sampler.SamplerWorker):\n If none, uses the default SamplerWorker class\n\n \"\"\"\n\n def __init__(self,\n algo,\n env,\n seed,\n should_render=False,\n num_processors=None,\n sampler_worker_cls=None):\n super().__init__(algo, env)\n self._sampler_worker = ray.remote(SamplerWorker if sampler_worker_cls\n is None else sampler_worker_cls)\n self._seed = seed\n deterministic.set_seed(self._seed)\n self._max_path_length = self.algo.max_path_length\n self._should_render = should_render\n if not ray.is_initialized():\n ray.init(log_to_driver=False)\n self._num_workers = (num_processors if num_processors else\n psutil.cpu_count(logical=False))\n self._all_workers = defaultdict(None)\n self._idle_worker_ids = list(range(self._num_workers))\n self._active_worker_ids = []\n\n def start_worker(self):\n \"\"\"Initialize a new ray worker.\"\"\"\n # Pickle the environment once, instead of once per worker.\n env_pkl = pickle.dumps(self.env)\n # We need to pickle the agent so that we can e.g. set up the TF Session\n # in the worker *before* unpicling it.\n agent_pkl = pickle.dumps(self.algo.policy)\n for worker_id in range(self._num_workers):\n self._all_workers[worker_id] = self._sampler_worker.remote(\n worker_id, env_pkl, agent_pkl, self._seed,\n self._max_path_length, self._should_render)\n\n # pylint: disable=arguments-differ\n def obtain_samples(self, itr, num_samples):\n \"\"\"Sample the policy for new trajectories.\n\n Args:\n itr (int): Iteration number.\n num_samples (int): Number of steps the the sampler should collect.\n\n Returns:\n list[dict]: Sample paths, each path with key\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n\n \"\"\"\n _active_workers = []\n self._active_worker_ids = []\n pbar = ProgBarCounter(num_samples)\n completed_samples = 0\n traj = []\n updating_workers = []\n\n # update the policy params of each worker before sampling\n # for the current iteration\n curr_policy_params = self.algo.policy.get_param_values()\n params_id = ray.put(curr_policy_params)\n while self._idle_worker_ids:\n worker_id = self._idle_worker_ids.pop()\n worker = self._all_workers[worker_id]\n updating_workers.append(worker.set_agent.remote(params_id))\n\n while completed_samples < num_samples:\n # if there are workers still being updated, check\n # which ones are still updating and take the workers that\n # are done updating, and start collecting trajectories on\n # those workers.\n if updating_workers:\n updated, updating_workers = ray.wait(updating_workers,\n num_returns=1,\n timeout=0.1)\n upd = [ray.get(up) for up in updated]\n self._idle_worker_ids.extend(upd)\n\n # if there are idle workers, use them to collect trajectories\n # mark the newly busy workers as active\n while self._idle_worker_ids:\n idle_worker_id = self._idle_worker_ids.pop()\n self._active_worker_ids.append(idle_worker_id)\n worker = self._all_workers[idle_worker_id]\n _active_workers.append(worker.rollout.remote())\n\n # check which workers are done/not done collecting a sample\n # if any are done, send them to process the collected trajectory\n # if they are not, keep checking if they are done\n ready, not_ready = ray.wait(_active_workers,\n num_returns=1,\n timeout=0.001)\n _active_workers = not_ready\n for result in ready:\n trajectory, num_returned_samples = self._process_trajectory(\n result)\n completed_samples += num_returned_samples\n pbar.inc(num_returned_samples)\n traj.append(trajectory)\n pbar.stop()\n return traj\n\n def shutdown_worker(self):\n \"\"\"Shuts down the worker.\"\"\"\n ray.shutdown()\n\n def _process_trajectory(self, result):\n \"\"\"Collect trajectory from ray object store.\n\n Converts that trajectory to garage friendly format.\n\n Args:\n result (obj): Ray object id of ready to be collected trajectory.\n\n Returns:\n dict: One trajectory, with keys\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n int: Number of returned samples in the trajectory\n\n \"\"\"\n trajectory = ray.get(result)\n ready_worker_id = trajectory[0]\n self._active_worker_ids.remove(ready_worker_id)\n self._idle_worker_ids.append(ready_worker_id)\n trajectory = dict(observations=np.asarray(trajectory[1]),\n actions=np.asarray(trajectory[2]),\n rewards=np.asarray(trajectory[3]),\n agent_infos=trajectory[4],\n env_infos=trajectory[5])\n num_returned_samples = len(trajectory['observations'])\n return trajectory, num_returned_samples\n\n\nclass SamplerWorker:\n \"\"\"Constructs a single sampler worker.\n\n The worker can have its parameters updated, and sampler its policy for\n trajectories or rollouts.\n\n Args:\n worker_id (int): the id of the sampler_worker\n env_pkl (bytes): A pickled gym or akro env object\n agent_pkl (bytes): A pickled agent\n seed (int): Random seed.\n max_path_length (int): max trajectory length\n should_render (bool): if true, renders trajectories after\n sampling them\n\n \"\"\"\n\n def __init__(self,\n worker_id,\n env_pkl,\n agent_pkl,\n seed,\n max_path_length,\n should_render=False):\n self.worker_id = worker_id\n self._env = pickle.loads(env_pkl)\n self.agent = pickle.loads(agent_pkl)\n self._seed = seed + self.worker_id\n deterministic.set_seed(self._seed)\n self._max_path_length = max_path_length\n self._should_render = should_render\n self.agent_updates = 0\n\n def set_agent(self, flattened_params):\n \"\"\"Set the agent params.\n\n Args:\n flattened_params (list[np.ndarray]): model parameters\n\n Returns:\n int: Worker id of this sampler worker.\n\n \"\"\"\n self.agent.set_param_values(flattened_params)\n self.agent_updates += 1\n return self.worker_id\n\n def rollout(self):\n \"\"\"Sample a single rollout from the agent/policy.\n\n The following value for the following keys will be a 2D array,\n with the first dimension corresponding to the time dimension.\n\n - observations\n - actions\n - rewards\n - next_observations\n - terminals\n The next two elements will be lists of dictionaries, with\n the index into the list being the index into the time\n - agent_infos\n - env_infos\n\n Returns:\n int: ID of this work\n numpy.ndarray: observations\n numpy.ndarray: actions\n numpy.ndarray: rewards\n dict[list]: agent info\n dict[list]: environment info\n\n \"\"\"\n observations = []\n actions = []\n rewards = []\n agent_infos = defaultdict(list)\n env_infos = defaultdict(list)\n o = self._env.reset()\n self.agent.reset()\n next_o = None\n path_length = 0\n while path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(o)\n next_o, r, d, env_info = self._env.step(a)\n observations.append(o)\n rewards.append(r)\n actions.append(a)\n for k, v in agent_info.items():\n agent_infos[k].append(v)\n for k, v in env_info.items():\n env_infos[k].append(v)\n path_length += 1\n if d:\n break\n o = next_o\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n return self.worker_id,\\\n np.array(observations),\\\n np.array(actions),\\\n np.array(rewards),\\\n dict(agent_infos),\\\n dict(env_infos)\n", "path": "src/garage/sampler/ray_sampler.py"}]}
| 3,672 | 557 |
gh_patches_debug_9800
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-3898
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC]: A bug about usage of booster api in the document
### 📚 The doc issue
[zero_with_chunk](https://colossalai.org/docs/features/zero_with_chunk) and [booster_api](https://colossalai.org/docs/basics/booster_api) both used torch.optim.Adam as the type of optimizer in the code examples implemented with booster api. However, this will trigger an AssertionError when passing this kind of optimizer to booster.boost:

[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/booster/booster.py]
1 import warnings
2 from contextlib import contextmanager
3 from typing import Callable, Iterator, List, Optional, Tuple, Union
4
5 import torch
6 import torch.nn as nn
7 from torch.optim import Optimizer
8 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
9 from torch.utils.data import DataLoader
10
11 from colossalai.checkpoint_io import GeneralCheckpointIO
12
13 from .accelerator import Accelerator
14 from .mixed_precision import MixedPrecision, mixed_precision_factory
15 from .plugin import Plugin
16
17 __all__ = ['Booster']
18
19
20 class Booster:
21 """
22 Booster is a high-level API for training neural networks. It provides a unified interface for
23 training with different precision, accelerator, and plugin.
24
25 Examples:
26 ```python
27 colossalai.launch(...)
28 plugin = GeminiPlugin(stage=3, ...)
29 booster = Booster(precision='fp16', plugin=plugin)
30
31 model = GPT2()
32 optimizer = Adam(model.parameters())
33 dataloader = Dataloader(Dataset)
34 lr_scheduler = LinearWarmupScheduler()
35 criterion = GPTLMLoss()
36
37 model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)
38
39 for epoch in range(max_epochs):
40 for input_ids, attention_mask in dataloader:
41 outputs = model(input_ids, attention_mask)
42 loss = criterion(outputs.logits, input_ids)
43 booster.backward(loss, optimizer)
44 optimizer.step()
45 lr_scheduler.step()
46 optimizer.zero_grad()
47 ```
48
49 Args:
50 device (str or torch.device): The device to run the training. Default: 'cuda'.
51 mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.
52 If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.
53 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.
54 plugin (Plugin): The plugin to run the training. Default: None.
55 """
56
57 def __init__(self,
58 device: str = 'cuda',
59 mixed_precision: Union[MixedPrecision, str] = None,
60 plugin: Optional[Plugin] = None) -> None:
61 if plugin is not None:
62 assert isinstance(
63 plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'
64 self.plugin = plugin
65
66 # set accelerator
67 if self.plugin and self.plugin.control_device():
68 self.accelerator = None
69 warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')
70 else:
71 self.accelerator = Accelerator(device)
72
73 # set precision
74 if self.plugin and self.plugin.control_precision():
75 warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')
76 self.mixed_precision = None
77 elif mixed_precision is None:
78 self.mixed_precision = None
79 else:
80 # validate and set precision
81 if isinstance(mixed_precision, str):
82 # the user will take the default arguments for amp training
83 self.mixed_precision = mixed_precision_factory(mixed_precision)
84 elif isinstance(mixed_precision, MixedPrecision):
85 # the user can customize the arguments by passing the precision object
86 self.mixed_precision = mixed_precision
87 else:
88 raise ValueError(
89 f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'
90 )
91
92 if self.plugin is not None and self.plugin.control_checkpoint_io():
93 self.checkpoint_io = self.plugin.get_checkpoint_io()
94 else:
95 self.checkpoint_io = GeneralCheckpointIO()
96
97 def boost(
98 self,
99 model: nn.Module,
100 optimizer: Optimizer,
101 criterion: Callable = None,
102 dataloader: DataLoader = None,
103 lr_scheduler: LRScheduler = None,
104 ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:
105 """
106 Boost the model, optimizer, criterion, lr_scheduler, and dataloader.
107
108 Args:
109 model (nn.Module): The model to be boosted.
110 optimizer (Optimizer): The optimizer to be boosted.
111 criterion (Callable): The criterion to be boosted.
112 dataloader (DataLoader): The dataloader to be boosted.
113 lr_scheduler (LRScheduler): The lr_scheduler to be boosted.
114 """
115 # TODO(FrankLeeeee): consider multi-model and multi-optimizer case
116 # TODO(FrankLeeeee): consider multi-dataloader case
117 # transform model for mixed precision
118 if self.plugin:
119 model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(
120 model, optimizer, criterion, dataloader, lr_scheduler)
121
122 if self.plugin and not self.plugin.control_device():
123 # transform model for accelerator
124 model = self.accelerator.configure(model)
125
126 if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):
127 # transform model for mixed precision
128 # when mixed_precision is specified and the plugin is not given or does not control the precision
129 model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)
130
131 return model, optimizer, criterion, dataloader, lr_scheduler
132
133 def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:
134 """Backward pass.
135
136 Args:
137 loss (torch.Tensor): The loss to be backpropagated.
138 optimizer (Optimizer): The optimizer to be updated.
139 """
140 # TODO: implement this method with plugin
141 optimizer.backward(loss)
142
143 def execute_pipeline(self,
144 data_iter: Iterator,
145 model: nn.Module,
146 criterion: Callable[[torch.Tensor], torch.Tensor],
147 optimizer: Optimizer,
148 return_loss: bool = True,
149 return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:
150 # TODO: implement this method
151 # run pipeline forward backward pass
152 # return loss or outputs if needed
153 pass
154
155 def no_sync(self, model: nn.Module) -> contextmanager:
156 """Context manager to disable gradient synchronization across DP process groups.
157
158 Args:
159 model (nn.Module): The model to be disabled gradient synchronization.
160
161 Returns:
162 contextmanager: Context to disable gradient synchronization.
163 """
164 assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'
165 assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'
166 return self.plugin.no_sync(model)
167
168 def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):
169 """Load model from checkpoint.
170
171 Args:
172 model (nn.Module): A model boosted by Booster.
173 checkpoint (str): Path to the checkpoint. It must be a local path.
174 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
175 strict (bool, optional): whether to strictly enforce that the keys
176 in :attr:`state_dict` match the keys returned by this module's
177 :meth:`~torch.nn.Module.state_dict` function. Defaults to True.
178 """
179 self.checkpoint_io.load_model(model, checkpoint, strict)
180
181 def save_model(self,
182 model: nn.Module,
183 checkpoint: str,
184 prefix: str = None,
185 shard: bool = False,
186 size_per_shard: int = 1024):
187 """Save model to checkpoint.
188
189 Args:
190 model (nn.Module): A model boosted by Booster.
191 checkpoint (str): Path to the checkpoint. It must be a local path.
192 It is a file path if ``shard=False``. Otherwise, it is a directory path.
193 prefix (str, optional): A prefix added to parameter and buffer
194 names to compose the keys in state_dict. Defaults to None.
195 shard (bool, optional): Whether to save checkpoint a sharded way.
196 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
197 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
198 """
199 self.checkpoint_io.save_model(model, checkpoint=checkpoint, shard=shard, size_per_shard=size_per_shard)
200
201 def load_optimizer(self, optimizer: Optimizer, checkpoint: str):
202 """Load optimizer from checkpoint.
203
204 Args:
205 optimizer (Optimizer): An optimizer boosted by Booster.
206 checkpoint (str): Path to the checkpoint. It must be a local path.
207 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.
208 """
209 self.checkpoint_io.load_optimizer(optimizer, checkpoint)
210
211 def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):
212 """Save optimizer to checkpoint.
213 Warning: Saving sharded optimizer checkpoint is not supported yet.
214
215 Args:
216 optimizer (Optimizer): An optimizer boosted by Booster.
217 checkpoint (str): Path to the checkpoint. It must be a local path.
218 It is a file path if ``shard=False``. Otherwise, it is a directory path.
219 shard (bool, optional): Whether to save checkpoint a sharded way.
220 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.
221 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.
222 """
223 self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)
224
225 def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
226 """Save lr scheduler to checkpoint.
227
228 Args:
229 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
230 checkpoint (str): Path to the checkpoint. It must be a local file path.
231 """
232 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)
233
234 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):
235 """Load lr scheduler from checkpoint.
236
237 Args:
238 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.
239 checkpoint (str): Path to the checkpoint. It must be a local file path.
240 """
241 self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)
242
[end of colossalai/booster/booster.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py
--- a/colossalai/booster/booster.py
+++ b/colossalai/booster/booster.py
@@ -25,11 +25,11 @@
Examples:
```python
colossalai.launch(...)
- plugin = GeminiPlugin(stage=3, ...)
+ plugin = GeminiPlugin(...)
booster = Booster(precision='fp16', plugin=plugin)
model = GPT2()
- optimizer = Adam(model.parameters())
+ optimizer = HybridAdam(model.parameters())
dataloader = Dataloader(Dataset)
lr_scheduler = LinearWarmupScheduler()
criterion = GPTLMLoss()
|
{"golden_diff": "diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py\n--- a/colossalai/booster/booster.py\n+++ b/colossalai/booster/booster.py\n@@ -25,11 +25,11 @@\n Examples:\n ```python\n colossalai.launch(...)\n- plugin = GeminiPlugin(stage=3, ...)\n+ plugin = GeminiPlugin(...)\n booster = Booster(precision='fp16', plugin=plugin)\n \n model = GPT2()\n- optimizer = Adam(model.parameters())\n+ optimizer = HybridAdam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n", "issue": "[DOC]: A bug about usage of booster api in the document\n### \ud83d\udcda The doc issue\n\n[zero_with_chunk](https://colossalai.org/docs/features/zero_with_chunk) and [booster_api](https://colossalai.org/docs/basics/booster_api) both used torch.optim.Adam as the type of optimizer in the code examples implemented with booster api. However, this will trigger an AssertionError when passing this kind of optimizer to booster.boost:\r\n\r\n\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n ```python\n colossalai.launch(...)\n plugin = GeminiPlugin(stage=3, ...)\n booster = Booster(precision='fp16', plugin=plugin)\n\n model = GPT2()\n optimizer = Adam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n\n model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n\n for epoch in range(max_epochs):\n for input_ids, attention_mask in dataloader:\n outputs = model(input_ids, attention_mask)\n loss = criterion(outputs.logits, input_ids)\n booster.backward(loss, optimizer)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n ```\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint=checkpoint, shard=shard, size_per_shard=size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}]}
| 3,620 | 168 |
gh_patches_debug_10745
|
rasdani/github-patches
|
git_diff
|
secdev__scapy-1682
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VXLAN Binding to IPv6 or IPv4 doesn't work
#### Brief description
When doing a VXLAN()/IP or VXLAN()/IPv6 the binding does not work properly. Both IP layers are interpreted as Ethernet Layers.
#### Environment
- Scapy version: `2.4.0`
- Python version: `3.5.2`
- Operating System: `Ubuntu 16.04`
#### How to reproduce
```python
>>> issue = Ether()/IP()/UDP()/VXLAN(flags=4, NextProtocol=1)/IP()
>>> issue.show2()
###[ Ethernet ]###
dst= ff:ff:ff:ff:ff:ff
src= 00:00:00:00:00:00
type= 0x800
###[ IP ]###
version= 4
ihl= 5
tos= 0x0
len= 56
id= 1
flags=
frag= 0
ttl= 64
proto= udp
chksum= 0x7cb2
src= 127.0.0.1
dst= 127.0.0.1
\options\
###[ UDP ]###
sport= 4789
dport= 4789
len= 36
chksum= 0xd838
###[ VXLAN ]###
flags= NextProtocol
reserved0= 0
NextProtocol= IPv4
vni= 0x0
reserved2= 0x0
###[ Ethernet ]###
dst= 45:00:00:14:00:01
src= 00:00:40:00:7c:e7
type= 0x7f00
###[ Raw ]###
load= '\x00\x01\x7f\x00\x00\x01'
```
#### Actual result
```python
###[ Ethernet ]###
dst= ff:ff:ff:ff:ff:ff
src= 00:00:00:00:00:00
type= 0x800
###[ IP ]###
version= 4
ihl= 5
tos= 0x0
len= 56
id= 1
flags=
frag= 0
ttl= 64
proto= udp
chksum= 0x7cb2
src= 127.0.0.1
dst= 127.0.0.1
\options\
###[ UDP ]###
sport= 4789
dport= 4789
len= 36
chksum= 0xd838
###[ VXLAN ]###
flags= NextProtocol
reserved0= 0
NextProtocol= IPv4
vni= 0x0
reserved2= 0x0
###[ Ethernet ]###
dst= 45:00:00:14:00:01
src= 00:00:40:00:7c:e7
type= 0x7f00
###[ Raw ]###
load= '\x00\x01\x7f\x00\x00\x01'
```
#### Expected result
```python
###[ Ethernet ]###
dst= ff:ff:ff:ff:ff:ff
src= 00:00:00:00:00:00
type= 0x800
###[ IP ]###
version= 4
ihl= 5
tos= 0x0
len= 56
id= 1
flags=
frag= 0
ttl= 64
proto= udp
chksum= 0x7cb2
src= 127.0.0.1
dst= 127.0.0.1
\options\
###[ UDP ]###
sport= 4789
dport= 4789
len= 36
chksum= 0xd838
###[ VXLAN ]###
flags= NextProtocol
reserved0= 0
NextProtocol= IPv4
vni= 0x0
reserved2= 0x0
###[ IP ]###
version= 4
ihl= 5
tos= 0x0
len= 20
id= 1
flags=
frag= 0
ttl= 64
proto= hopopt
chksum= 0x7ce7
src= 127.0.0.1
dst= 127.0.0.1
\options\
```
</issue>
<code>
[start of scapy/layers/vxlan.py]
1 # This file is part of Scapy
2 # See http://www.secdev.org/projects/scapy for more information
3 # Copyright (C) Philippe Biondi <[email protected]>
4 # This program is published under a GPLv2 license
5
6 """
7 Virtual eXtensible Local Area Network (VXLAN)
8 - RFC 7348 -
9
10 A Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks
11 http://tools.ietf.org/html/rfc7348
12 https://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt
13
14 VXLAN Group Policy Option:
15 http://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
16 """
17
18 from scapy.packet import Packet, bind_layers
19 from scapy.layers.l2 import Ether
20 from scapy.layers.inet import IP, UDP
21 from scapy.layers.inet6 import IPv6
22 from scapy.fields import FlagsField, XByteField, ThreeBytesField, \
23 ConditionalField, ShortField, ByteEnumField, X3BytesField
24
25 _GP_FLAGS = ["R", "R", "R", "A", "R", "R", "D", "R"]
26
27
28 class VXLAN(Packet):
29 name = "VXLAN"
30
31 fields_desc = [
32 FlagsField("flags", 0x8, 8,
33 ['OAM', 'R', 'NextProtocol', 'Instance',
34 'V1', 'V2', 'R', 'G']),
35 ConditionalField(
36 ShortField("reserved0", 0),
37 lambda pkt: pkt.flags.NextProtocol,
38 ),
39 ConditionalField(
40 ByteEnumField('NextProtocol', 0,
41 {0: 'NotDefined',
42 1: 'IPv4',
43 2: 'IPv6',
44 3: 'Ethernet',
45 4: 'NSH'}),
46 lambda pkt: pkt.flags.NextProtocol,
47 ),
48 ConditionalField(
49 ThreeBytesField("reserved1", 0),
50 lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol),
51 ),
52 ConditionalField(
53 FlagsField("gpflags", 0, 8, _GP_FLAGS),
54 lambda pkt: pkt.flags.G,
55 ),
56 ConditionalField(
57 ShortField("gpid", 0),
58 lambda pkt: pkt.flags.G,
59 ),
60 X3BytesField("vni", 0),
61 XByteField("reserved2", 0),
62 ]
63
64 # Use default linux implementation port
65 overload_fields = {
66 UDP: {'dport': 8472},
67 }
68
69 def mysummary(self):
70 if self.flags.G:
71 return self.sprintf("VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)")
72 else:
73 return self.sprintf("VXLAN (vni=%VXLAN.vni%)")
74
75
76 bind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port
77 bind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port
78 bind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH
79 bind_layers(UDP, VXLAN, dport=8472) # Linux implementation port
80 bind_layers(UDP, VXLAN, dport=48879) # Cisco ACI
81 bind_layers(UDP, VXLAN, sport=4789)
82 bind_layers(UDP, VXLAN, sport=4790)
83 bind_layers(UDP, VXLAN, sport=6633)
84 bind_layers(UDP, VXLAN, sport=8472)
85 # By default, set both ports to the RFC standard
86 bind_layers(UDP, VXLAN, sport=4789, dport=4789)
87
88 bind_layers(VXLAN, Ether)
89 bind_layers(VXLAN, IP, NextProtocol=1)
90 bind_layers(VXLAN, IPv6, NextProtocol=2)
91 bind_layers(VXLAN, Ether, flags=4, NextProtocol=0)
92 bind_layers(VXLAN, IP, flags=4, NextProtocol=1)
93 bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)
94 bind_layers(VXLAN, Ether, flags=4, NextProtocol=3)
95
[end of scapy/layers/vxlan.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scapy/layers/vxlan.py b/scapy/layers/vxlan.py
--- a/scapy/layers/vxlan.py
+++ b/scapy/layers/vxlan.py
@@ -85,9 +85,10 @@
# By default, set both ports to the RFC standard
bind_layers(UDP, VXLAN, sport=4789, dport=4789)
-bind_layers(VXLAN, Ether)
+bind_layers(VXLAN, Ether, NextProtocol=0)
bind_layers(VXLAN, IP, NextProtocol=1)
bind_layers(VXLAN, IPv6, NextProtocol=2)
+bind_layers(VXLAN, Ether, NextProtocol=3)
bind_layers(VXLAN, Ether, flags=4, NextProtocol=0)
bind_layers(VXLAN, IP, flags=4, NextProtocol=1)
bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)
|
{"golden_diff": "diff --git a/scapy/layers/vxlan.py b/scapy/layers/vxlan.py\n--- a/scapy/layers/vxlan.py\n+++ b/scapy/layers/vxlan.py\n@@ -85,9 +85,10 @@\n # By default, set both ports to the RFC standard\n bind_layers(UDP, VXLAN, sport=4789, dport=4789)\n \n-bind_layers(VXLAN, Ether)\n+bind_layers(VXLAN, Ether, NextProtocol=0)\n bind_layers(VXLAN, IP, NextProtocol=1)\n bind_layers(VXLAN, IPv6, NextProtocol=2)\n+bind_layers(VXLAN, Ether, NextProtocol=3)\n bind_layers(VXLAN, Ether, flags=4, NextProtocol=0)\n bind_layers(VXLAN, IP, flags=4, NextProtocol=1)\n bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)\n", "issue": "VXLAN Binding to IPv6 or IPv4 doesn't work\n#### Brief description\r\nWhen doing a VXLAN()/IP or VXLAN()/IPv6 the binding does not work properly. Both IP layers are interpreted as Ethernet Layers.\r\n\r\n#### Environment\r\n\r\n- Scapy version: `2.4.0`\r\n- Python version: `3.5.2`\r\n- Operating System: `Ubuntu 16.04`\r\n\r\n#### How to reproduce\r\n```python\r\n>>> issue = Ether()/IP()/UDP()/VXLAN(flags=4, NextProtocol=1)/IP()\r\n>>> issue.show2()\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ Ethernet ]### \r\n dst= 45:00:00:14:00:01\r\n src= 00:00:40:00:7c:e7\r\n type= 0x7f00\r\n###[ Raw ]### \r\n load= '\\x00\\x01\\x7f\\x00\\x00\\x01'\r\n```\r\n\r\n#### Actual result\r\n```python\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ Ethernet ]### \r\n dst= 45:00:00:14:00:01\r\n src= 00:00:40:00:7c:e7\r\n type= 0x7f00\r\n###[ Raw ]### \r\n load= '\\x00\\x01\\x7f\\x00\\x00\\x01'\r\n```\r\n\r\n#### Expected result\r\n```python\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 20\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= hopopt\r\n chksum= 0x7ce7\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n```\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVirtual eXtensible Local Area Network (VXLAN)\n- RFC 7348 -\n\nA Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks\nhttp://tools.ietf.org/html/rfc7348\nhttps://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt\n\nVXLAN Group Policy Option:\nhttp://tools.ietf.org/html/draft-smith-vxlan-group-policy-00\n\"\"\"\n\nfrom scapy.packet import Packet, bind_layers\nfrom scapy.layers.l2 import Ether\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.inet6 import IPv6\nfrom scapy.fields import FlagsField, XByteField, ThreeBytesField, \\\n ConditionalField, ShortField, ByteEnumField, X3BytesField\n\n_GP_FLAGS = [\"R\", \"R\", \"R\", \"A\", \"R\", \"R\", \"D\", \"R\"]\n\n\nclass VXLAN(Packet):\n name = \"VXLAN\"\n\n fields_desc = [\n FlagsField(\"flags\", 0x8, 8,\n ['OAM', 'R', 'NextProtocol', 'Instance',\n 'V1', 'V2', 'R', 'G']),\n ConditionalField(\n ShortField(\"reserved0\", 0),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ByteEnumField('NextProtocol', 0,\n {0: 'NotDefined',\n 1: 'IPv4',\n 2: 'IPv6',\n 3: 'Ethernet',\n 4: 'NSH'}),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ThreeBytesField(\"reserved1\", 0),\n lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol),\n ),\n ConditionalField(\n FlagsField(\"gpflags\", 0, 8, _GP_FLAGS),\n lambda pkt: pkt.flags.G,\n ),\n ConditionalField(\n ShortField(\"gpid\", 0),\n lambda pkt: pkt.flags.G,\n ),\n X3BytesField(\"vni\", 0),\n XByteField(\"reserved2\", 0),\n ]\n\n # Use default linux implementation port\n overload_fields = {\n UDP: {'dport': 8472},\n }\n\n def mysummary(self):\n if self.flags.G:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)\")\n else:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni%)\")\n\n\nbind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port\nbind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port\nbind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH\nbind_layers(UDP, VXLAN, dport=8472) # Linux implementation port\nbind_layers(UDP, VXLAN, dport=48879) # Cisco ACI\nbind_layers(UDP, VXLAN, sport=4789)\nbind_layers(UDP, VXLAN, sport=4790)\nbind_layers(UDP, VXLAN, sport=6633)\nbind_layers(UDP, VXLAN, sport=8472)\n# By default, set both ports to the RFC standard\nbind_layers(UDP, VXLAN, sport=4789, dport=4789)\n\nbind_layers(VXLAN, Ether)\nbind_layers(VXLAN, IP, NextProtocol=1)\nbind_layers(VXLAN, IPv6, NextProtocol=2)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=0)\nbind_layers(VXLAN, IP, flags=4, NextProtocol=1)\nbind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=3)\n", "path": "scapy/layers/vxlan.py"}]}
| 2,846 | 208 |
gh_patches_debug_25728
|
rasdani/github-patches
|
git_diff
|
ydataai__ydata-profiling-1000
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
High correlation warning printed multiple times
I get the same warning "High correlation" with the same other column four times in the report.
Looks like a bug where the warning is accidentally generated multiple times or not de-duplicated properly.
Is it easy to spot the issue or reproduce? Or should I try to extract a standalone test case?
This is with pandas 1.3.0 and pandas-profiling 3.0.0.
<img width="572" alt="Screenshot 2021-09-05 at 18 54 44" src="https://user-images.githubusercontent.com/852409/132135015-45c0a273-763a-430e-b12f-d340e79b3ea7.png">
</issue>
<code>
[start of src/pandas_profiling/model/alerts.py]
1 """Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant
2 values, high correlations)."""
3 from enum import Enum, auto, unique
4 from typing import Any, Dict, List, Optional, Set
5
6 import numpy as np
7 import pandas as pd
8
9 from pandas_profiling.config import Settings
10 from pandas_profiling.model.correlations import perform_check_correlation
11
12
13 @unique
14 class AlertType(Enum):
15 """Alert types"""
16
17 CONSTANT = auto()
18 """This variable has a constant value."""
19
20 ZEROS = auto()
21 """This variable contains zeros."""
22
23 HIGH_CORRELATION = auto()
24 """This variable is highly correlated."""
25
26 HIGH_CARDINALITY = auto()
27 """This variable has a high cardinality."""
28
29 UNSUPPORTED = auto()
30 """This variable is unsupported."""
31
32 DUPLICATES = auto()
33 """This variable contains duplicates."""
34
35 SKEWED = auto()
36 """This variable is highly skewed."""
37
38 MISSING = auto()
39 """This variable contains missing values."""
40
41 INFINITE = auto()
42 """This variable contains infinite values."""
43
44 TYPE_DATE = auto()
45 """This variable is likely a datetime, but treated as categorical."""
46
47 UNIQUE = auto()
48 """This variable has unique values."""
49
50 CONSTANT_LENGTH = auto()
51 """This variable has a constant length"""
52
53 REJECTED = auto()
54 """Variables are rejected if we do not want to consider them for further analysis."""
55
56 UNIFORM = auto()
57 """The variable is uniformly distributed"""
58
59 EMPTY = auto()
60 """The DataFrame is empty"""
61
62
63 class Alert:
64 """An alert object (type, values, column)."""
65
66 _anchor_id: Optional[str] = None
67
68 def __init__(
69 self,
70 alert_type: AlertType,
71 values: Optional[Dict] = None,
72 column_name: Optional[str] = None,
73 fields: Optional[Set] = None,
74 ):
75 if values is None:
76 values = {}
77 if fields is None:
78 fields = set()
79
80 self.fields = fields
81 self.alert_type = alert_type
82 self.values = values
83 self.column_name = column_name
84
85 @property
86 def anchor_id(self) -> Optional[str]:
87 if self._anchor_id is None:
88 self._anchor_id = str(hash(self.column_name))
89 return self._anchor_id
90
91 def fmt(self) -> str:
92 # TODO: render in template
93 name = self.alert_type.name.replace("_", " ")
94 if name == "HIGH CORRELATION":
95 num = len(self.values["fields"])
96 title = ", ".join(self.values["fields"])
97 name = f'<abbr title="This variable has a high correlation with {num} fields: {title}">HIGH CORRELATION</abbr>'
98 return name
99
100 def __repr__(self):
101 alert_type = self.alert_type.name
102 column = self.column_name
103 return f"[{alert_type}] alert on column {column}"
104
105
106 def check_table_alerts(table: dict) -> List[Alert]:
107 """Checks the overall dataset for alerts.
108
109 Args:
110 table: Overall dataset statistics.
111
112 Returns:
113 A list of alerts.
114 """
115 alerts = []
116 if alert_value(table.get("n_duplicates", np.nan)):
117 alerts.append(
118 Alert(
119 alert_type=AlertType.DUPLICATES,
120 values=table,
121 fields={"n_duplicates"},
122 )
123 )
124 if table["n"] == 0:
125 alerts.append(
126 Alert(
127 alert_type=AlertType.EMPTY,
128 values=table,
129 fields={"n"},
130 )
131 )
132 return alerts
133
134
135 def numeric_alerts(config: Settings, summary: dict) -> List[Alert]:
136 alerts = []
137
138 # Skewness
139 if skewness_alert(summary["skewness"], config.vars.num.skewness_threshold):
140 alerts.append(
141 Alert(
142 alert_type=AlertType.SKEWED,
143 fields={"skewness"},
144 )
145 )
146
147 # Infinite values
148 if alert_value(summary["p_infinite"]):
149 alerts.append(
150 Alert(
151 alert_type=AlertType.INFINITE,
152 fields={"p_infinite", "n_infinite"},
153 )
154 )
155
156 # Zeros
157 if alert_value(summary["p_zeros"]):
158 alerts.append(
159 Alert(
160 alert_type=AlertType.ZEROS,
161 fields={"n_zeros", "p_zeros"},
162 )
163 )
164
165 if (
166 "chi_squared" in summary
167 and summary["chi_squared"]["pvalue"] > config.vars.num.chi_squared_threshold
168 ):
169 alerts.append(Alert(alert_type=AlertType.UNIFORM))
170
171 return alerts
172
173
174 def categorical_alerts(config: Settings, summary: dict) -> List[Alert]:
175 alerts = []
176
177 # High cardinality
178 if summary.get("n_distinct", np.nan) > config.vars.cat.cardinality_threshold:
179 alerts.append(
180 Alert(
181 alert_type=AlertType.HIGH_CARDINALITY,
182 fields={"n_distinct"},
183 )
184 )
185
186 if (
187 "chi_squared" in summary
188 and summary["chi_squared"]["pvalue"] > config.vars.cat.chi_squared_threshold
189 ):
190 alerts.append(Alert(alert_type=AlertType.UNIFORM))
191
192 if summary.get("date_warning"):
193 alerts.append(Alert(alert_type=AlertType.TYPE_DATE))
194
195 # Constant length
196 if "composition" in summary and summary["min_length"] == summary["max_length"]:
197 alerts.append(
198 Alert(
199 alert_type=AlertType.CONSTANT_LENGTH,
200 fields={"composition_min_length", "composition_max_length"},
201 )
202 )
203
204 return alerts
205
206
207 def generic_alerts(summary: dict) -> List[Alert]:
208 alerts = []
209
210 # Missing
211 if alert_value(summary["p_missing"]):
212 alerts.append(
213 Alert(
214 alert_type=AlertType.MISSING,
215 fields={"p_missing", "n_missing"},
216 )
217 )
218
219 return alerts
220
221
222 def supported_alerts(summary: dict) -> List[Alert]:
223 alerts = []
224
225 if summary.get("n_distinct", np.nan) == summary["n"]:
226 alerts.append(
227 Alert(
228 alert_type=AlertType.UNIQUE,
229 fields={"n_distinct", "p_distinct", "n_unique", "p_unique"},
230 )
231 )
232 if summary.get("n_distinct", np.nan) == 1:
233 summary["mode"] = summary["value_counts_without_nan"].index[0]
234 alerts.append(
235 Alert(
236 alert_type=AlertType.CONSTANT,
237 fields={"n_distinct"},
238 )
239 )
240 alerts.append(
241 Alert(
242 alert_type=AlertType.REJECTED,
243 fields=set(),
244 )
245 )
246 return alerts
247
248
249 def unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:
250 alerts = [
251 Alert(
252 alert_type=AlertType.UNSUPPORTED,
253 fields=set(),
254 ),
255 Alert(
256 alert_type=AlertType.REJECTED,
257 fields=set(),
258 ),
259 ]
260 return alerts
261
262
263 def check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:
264 """Checks individual variables for alerts.
265
266 Args:
267 col: The column name that is checked.
268 description: The series description.
269
270 Returns:
271 A list of alerts.
272 """
273 alerts = []
274
275 alerts += generic_alerts(description)
276
277 if description["type"] == "Unsupported":
278 alerts += unsupported_alerts(description)
279 else:
280 alerts += supported_alerts(description)
281
282 if description["type"] == "Categorical":
283 alerts += categorical_alerts(config, description)
284 if description["type"] == "Numeric":
285 alerts += numeric_alerts(config, description)
286
287 for idx in range(len(alerts)):
288 alerts[idx].column_name = col
289 alerts[idx].values = description
290 return alerts
291
292
293 def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:
294 alerts = []
295
296 for corr, matrix in correlations.items():
297 if config.correlations[corr].warn_high_correlations:
298 threshold = config.correlations[corr].threshold
299 correlated_mapping = perform_check_correlation(matrix, threshold)
300 if len(correlated_mapping) > 0:
301 for k, v in correlated_mapping.items():
302 alerts.append(
303 Alert(
304 column_name=k,
305 alert_type=AlertType.HIGH_CORRELATION,
306 values={"corr": corr, "fields": v},
307 )
308 )
309 return alerts
310
311
312 def get_alerts(
313 config: Settings, table_stats: dict, series_description: dict, correlations: dict
314 ) -> List[Alert]:
315 alerts = check_table_alerts(table_stats)
316 for col, description in series_description.items():
317 alerts += check_variable_alerts(config, col, description)
318 alerts += check_correlation_alerts(config, correlations)
319 alerts.sort(key=lambda alert: str(alert.alert_type))
320 return alerts
321
322
323 def alert_value(value: float) -> bool:
324 return not np.isnan(value) and value > 0.01
325
326
327 def skewness_alert(v: float, threshold: int) -> bool:
328 return not np.isnan(v) and (v < (-1 * threshold) or v > threshold)
329
330
331 def type_date_alert(series: pd.Series) -> bool:
332 from dateutil.parser import ParserError, parse
333
334 try:
335 series.apply(parse)
336 except ParserError:
337 return False
338 else:
339 return True
340
[end of src/pandas_profiling/model/alerts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pandas_profiling/model/alerts.py b/src/pandas_profiling/model/alerts.py
--- a/src/pandas_profiling/model/alerts.py
+++ b/src/pandas_profiling/model/alerts.py
@@ -293,19 +293,24 @@
def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:
alerts = []
+ correlations_consolidated = {}
for corr, matrix in correlations.items():
if config.correlations[corr].warn_high_correlations:
threshold = config.correlations[corr].threshold
correlated_mapping = perform_check_correlation(matrix, threshold)
- if len(correlated_mapping) > 0:
- for k, v in correlated_mapping.items():
- alerts.append(
- Alert(
- column_name=k,
- alert_type=AlertType.HIGH_CORRELATION,
- values={"corr": corr, "fields": v},
- )
- )
+ for col, fields in correlated_mapping.items():
+ set(fields).update(set(correlated_mapping.get(col, [])))
+ correlations_consolidated[col] = fields
+
+ if len(correlations_consolidated) > 0:
+ for col, fields in correlations_consolidated.items():
+ alerts.append(
+ Alert(
+ column_name=col,
+ alert_type=AlertType.HIGH_CORRELATION,
+ values={"corr": 'Overall', "fields": fields},
+ )
+ )
return alerts
|
{"golden_diff": "diff --git a/src/pandas_profiling/model/alerts.py b/src/pandas_profiling/model/alerts.py\n--- a/src/pandas_profiling/model/alerts.py\n+++ b/src/pandas_profiling/model/alerts.py\n@@ -293,19 +293,24 @@\n def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:\n alerts = []\n \n+ correlations_consolidated = {}\n for corr, matrix in correlations.items():\n if config.correlations[corr].warn_high_correlations:\n threshold = config.correlations[corr].threshold\n correlated_mapping = perform_check_correlation(matrix, threshold)\n- if len(correlated_mapping) > 0:\n- for k, v in correlated_mapping.items():\n- alerts.append(\n- Alert(\n- column_name=k,\n- alert_type=AlertType.HIGH_CORRELATION,\n- values={\"corr\": corr, \"fields\": v},\n- )\n- )\n+ for col, fields in correlated_mapping.items():\n+ set(fields).update(set(correlated_mapping.get(col, [])))\n+ correlations_consolidated[col] = fields\n+\n+ if len(correlations_consolidated) > 0:\n+ for col, fields in correlations_consolidated.items():\n+ alerts.append(\n+ Alert(\n+ column_name=col,\n+ alert_type=AlertType.HIGH_CORRELATION,\n+ values={\"corr\": 'Overall', \"fields\": fields},\n+ )\n+ )\n return alerts\n", "issue": "High correlation warning printed multiple times\nI get the same warning \"High correlation\" with the same other column four times in the report.\r\n\r\nLooks like a bug where the warning is accidentally generated multiple times or not de-duplicated properly.\r\n\r\nIs it easy to spot the issue or reproduce? Or should I try to extract a standalone test case?\r\n\r\nThis is with pandas 1.3.0 and pandas-profiling 3.0.0.\r\n\r\n<img width=\"572\" alt=\"Screenshot 2021-09-05 at 18 54 44\" src=\"https://user-images.githubusercontent.com/852409/132135015-45c0a273-763a-430e-b12f-d340e79b3ea7.png\">\r\n\n", "before_files": [{"content": "\"\"\"Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant\nvalues, high correlations).\"\"\"\nfrom enum import Enum, auto, unique\nfrom typing import Any, Dict, List, Optional, Set\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import perform_check_correlation\n\n\n@unique\nclass AlertType(Enum):\n \"\"\"Alert types\"\"\"\n\n CONSTANT = auto()\n \"\"\"This variable has a constant value.\"\"\"\n\n ZEROS = auto()\n \"\"\"This variable contains zeros.\"\"\"\n\n HIGH_CORRELATION = auto()\n \"\"\"This variable is highly correlated.\"\"\"\n\n HIGH_CARDINALITY = auto()\n \"\"\"This variable has a high cardinality.\"\"\"\n\n UNSUPPORTED = auto()\n \"\"\"This variable is unsupported.\"\"\"\n\n DUPLICATES = auto()\n \"\"\"This variable contains duplicates.\"\"\"\n\n SKEWED = auto()\n \"\"\"This variable is highly skewed.\"\"\"\n\n MISSING = auto()\n \"\"\"This variable contains missing values.\"\"\"\n\n INFINITE = auto()\n \"\"\"This variable contains infinite values.\"\"\"\n\n TYPE_DATE = auto()\n \"\"\"This variable is likely a datetime, but treated as categorical.\"\"\"\n\n UNIQUE = auto()\n \"\"\"This variable has unique values.\"\"\"\n\n CONSTANT_LENGTH = auto()\n \"\"\"This variable has a constant length\"\"\"\n\n REJECTED = auto()\n \"\"\"Variables are rejected if we do not want to consider them for further analysis.\"\"\"\n\n UNIFORM = auto()\n \"\"\"The variable is uniformly distributed\"\"\"\n\n EMPTY = auto()\n \"\"\"The DataFrame is empty\"\"\"\n\n\nclass Alert:\n \"\"\"An alert object (type, values, column).\"\"\"\n\n _anchor_id: Optional[str] = None\n\n def __init__(\n self,\n alert_type: AlertType,\n values: Optional[Dict] = None,\n column_name: Optional[str] = None,\n fields: Optional[Set] = None,\n ):\n if values is None:\n values = {}\n if fields is None:\n fields = set()\n\n self.fields = fields\n self.alert_type = alert_type\n self.values = values\n self.column_name = column_name\n\n @property\n def anchor_id(self) -> Optional[str]:\n if self._anchor_id is None:\n self._anchor_id = str(hash(self.column_name))\n return self._anchor_id\n\n def fmt(self) -> str:\n # TODO: render in template\n name = self.alert_type.name.replace(\"_\", \" \")\n if name == \"HIGH CORRELATION\":\n num = len(self.values[\"fields\"])\n title = \", \".join(self.values[\"fields\"])\n name = f'<abbr title=\"This variable has a high correlation with {num} fields: {title}\">HIGH CORRELATION</abbr>'\n return name\n\n def __repr__(self):\n alert_type = self.alert_type.name\n column = self.column_name\n return f\"[{alert_type}] alert on column {column}\"\n\n\ndef check_table_alerts(table: dict) -> List[Alert]:\n \"\"\"Checks the overall dataset for alerts.\n\n Args:\n table: Overall dataset statistics.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n if alert_value(table.get(\"n_duplicates\", np.nan)):\n alerts.append(\n Alert(\n alert_type=AlertType.DUPLICATES,\n values=table,\n fields={\"n_duplicates\"},\n )\n )\n if table[\"n\"] == 0:\n alerts.append(\n Alert(\n alert_type=AlertType.EMPTY,\n values=table,\n fields={\"n\"},\n )\n )\n return alerts\n\n\ndef numeric_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # Skewness\n if skewness_alert(summary[\"skewness\"], config.vars.num.skewness_threshold):\n alerts.append(\n Alert(\n alert_type=AlertType.SKEWED,\n fields={\"skewness\"},\n )\n )\n\n # Infinite values\n if alert_value(summary[\"p_infinite\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.INFINITE,\n fields={\"p_infinite\", \"n_infinite\"},\n )\n )\n\n # Zeros\n if alert_value(summary[\"p_zeros\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.ZEROS,\n fields={\"n_zeros\", \"p_zeros\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.num.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n return alerts\n\n\ndef categorical_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # High cardinality\n if summary.get(\"n_distinct\", np.nan) > config.vars.cat.cardinality_threshold:\n alerts.append(\n Alert(\n alert_type=AlertType.HIGH_CARDINALITY,\n fields={\"n_distinct\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.cat.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n if summary.get(\"date_warning\"):\n alerts.append(Alert(alert_type=AlertType.TYPE_DATE))\n\n # Constant length\n if \"composition\" in summary and summary[\"min_length\"] == summary[\"max_length\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT_LENGTH,\n fields={\"composition_min_length\", \"composition_max_length\"},\n )\n )\n\n return alerts\n\n\ndef generic_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n # Missing\n if alert_value(summary[\"p_missing\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.MISSING,\n fields={\"p_missing\", \"n_missing\"},\n )\n )\n\n return alerts\n\n\ndef supported_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n if summary.get(\"n_distinct\", np.nan) == summary[\"n\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.UNIQUE,\n fields={\"n_distinct\", \"p_distinct\", \"n_unique\", \"p_unique\"},\n )\n )\n if summary.get(\"n_distinct\", np.nan) == 1:\n summary[\"mode\"] = summary[\"value_counts_without_nan\"].index[0]\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT,\n fields={\"n_distinct\"},\n )\n )\n alerts.append(\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n )\n )\n return alerts\n\n\ndef unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:\n alerts = [\n Alert(\n alert_type=AlertType.UNSUPPORTED,\n fields=set(),\n ),\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n ),\n ]\n return alerts\n\n\ndef check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:\n \"\"\"Checks individual variables for alerts.\n\n Args:\n col: The column name that is checked.\n description: The series description.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n\n alerts += generic_alerts(description)\n\n if description[\"type\"] == \"Unsupported\":\n alerts += unsupported_alerts(description)\n else:\n alerts += supported_alerts(description)\n\n if description[\"type\"] == \"Categorical\":\n alerts += categorical_alerts(config, description)\n if description[\"type\"] == \"Numeric\":\n alerts += numeric_alerts(config, description)\n\n for idx in range(len(alerts)):\n alerts[idx].column_name = col\n alerts[idx].values = description\n return alerts\n\n\ndef check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:\n alerts = []\n\n for corr, matrix in correlations.items():\n if config.correlations[corr].warn_high_correlations:\n threshold = config.correlations[corr].threshold\n correlated_mapping = perform_check_correlation(matrix, threshold)\n if len(correlated_mapping) > 0:\n for k, v in correlated_mapping.items():\n alerts.append(\n Alert(\n column_name=k,\n alert_type=AlertType.HIGH_CORRELATION,\n values={\"corr\": corr, \"fields\": v},\n )\n )\n return alerts\n\n\ndef get_alerts(\n config: Settings, table_stats: dict, series_description: dict, correlations: dict\n) -> List[Alert]:\n alerts = check_table_alerts(table_stats)\n for col, description in series_description.items():\n alerts += check_variable_alerts(config, col, description)\n alerts += check_correlation_alerts(config, correlations)\n alerts.sort(key=lambda alert: str(alert.alert_type))\n return alerts\n\n\ndef alert_value(value: float) -> bool:\n return not np.isnan(value) and value > 0.01\n\n\ndef skewness_alert(v: float, threshold: int) -> bool:\n return not np.isnan(v) and (v < (-1 * threshold) or v > threshold)\n\n\ndef type_date_alert(series: pd.Series) -> bool:\n from dateutil.parser import ParserError, parse\n\n try:\n series.apply(parse)\n except ParserError:\n return False\n else:\n return True\n", "path": "src/pandas_profiling/model/alerts.py"}]}
| 3,692 | 329 |
gh_patches_debug_8720
|
rasdani/github-patches
|
git_diff
|
azavea__raster-vision-733
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow Map Tiles to be evaluated
I am trying to make rastervision work for a tiled map, so I run it on each tile I have, for example:

When I do, I get the error:
> rastervision_1 | Executing rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json
> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Error: Command failed: rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json
> rastervision_1 | 2019-03-26 14:44:13.975033: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
> rastervision_1 | /usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
> rastervision_1 | fromlist, level)
> rastervision_1 | /usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.
> rastervision_1 | s = DatasetReader(path, driver=driver, **kwargs)
> rastervision_1 | 2019-03-26 14:44:13:rastervision.task.task: INFO - Making predictions for scene
> rastervision_1 | Traceback (most recent call last):
> rastervision_1 | File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main
> rastervision_1 | "__main__", mod_spec)
> rastervision_1 | File "/usr/lib/python3.5/runpy.py", line 85, in _run_code
> rastervision_1 | exec(code, run_globals)
> rastervision_1 | File "/opt/src/rastervision/__main__.py", line 17, in <module>
> rastervision_1 | rv.main()
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__
> rastervision_1 | return self.main(*args, **kwargs)
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main
> rastervision_1 | rv = self.invoke(ctx)
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke
> rastervision_1 | return _process_result(sub_ctx.command.invoke(sub_ctx))
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke
> rastervision_1 | return ctx.invoke(self.callback, **ctx.params)
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke
> rastervision_1 | return callback(*args, **kwargs)
> rastervision_1 | File "/opt/src/rastervision/cli/main.py", line 240, in predict
> rastervision_1 | predictor.predict(image_uri, output_uri, export_config)
> rastervision_1 | File "/opt/src/rastervision/predictor.py", line 142, in predict
> rastervision_1 | labels = self.task.predict_scene(scene, self.tmp_dir)
> rastervision_1 | File "/opt/src/rastervision/task/task.py", line 192, in predict_scene
> rastervision_1 | predict_batch(batch_chips, batch_windows)
> rastervision_1 | File "/opt/src/rastervision/task/task.py", line 173, in predict_batch
> rastervision_1 | np.array(predict_chips), predict_windows, tmp_dir)
> rastervision_1 | File "/opt/src/rastervision/backend/tf_object_detection.py", line 736, in predict
> rastervision_1 | self.session)
> rastervision_1 | File "/opt/src/rastervision/backend/tf_object_detection.py", line 573, in compute_prediction
> rastervision_1 | [boxes, scores, class_ids], feed_dict={image_tensor: image_nps})
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 877, in run
> rastervision_1 | run_metadata_ptr)
> rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 1076, in _run
> rastervision_1 | str(subfeed_t.get_shape())))
> rastervision_1 | ValueError: Cannot feed value of shape (2, 300, 300, 1) for Tensor 'image_tensor:0', which has shape '(?, ?, ?, 3)'
> rastervision_1 |
> rastervision_1 | at ChildProcess.exithandler (child_process.js:294:12)
> rastervision_1 | at ChildProcess.emit (events.js:189:13)
> rastervision_1 | at maybeClose (internal/child_process.js:970:16)
> rastervision_1 | at Process.ChildProcess._handle.onexit (internal/child_process.js:259:5)
> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)
> rastervision_1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
I am successfull at running the same code on TIF files, and JPEG.
</issue>
<code>
[start of rastervision/data/raster_source/raster_source_config.py]
1 from abc import abstractmethod
2 from copy import deepcopy
3 import logging
4
5 import rastervision as rv
6 from rastervision.core.config import (Config, ConfigBuilder,
7 BundledConfigMixin)
8 from rastervision.data import (RasterTransformerConfig, StatsTransformerConfig)
9 from rastervision.protos.raster_source_pb2 \
10 import RasterSourceConfig as RasterSourceConfigMsg
11
12 log = logging.getLogger(__name__)
13
14
15 class RasterSourceConfig(BundledConfigMixin, Config):
16 deprecation_warnings = []
17
18 def __init__(self, source_type, transformers=None, channel_order=None):
19 if transformers is None:
20 transformers = []
21
22 self.source_type = source_type
23 self.transformers = transformers
24 self.channel_order = channel_order
25
26 def to_proto(self):
27 transformers = list(map(lambda c: c.to_proto(), self.transformers))
28 msg = RasterSourceConfigMsg(
29 source_type=self.source_type,
30 channel_order=self.channel_order,
31 transformers=transformers)
32 return msg
33
34 def save_bundle_files(self, bundle_dir):
35 new_transformers = []
36 files = []
37 for transformer in self.transformers:
38 new_transformer, t_files = transformer.save_bundle_files(
39 bundle_dir)
40 new_transformers.append(new_transformer)
41 files.extend(t_files)
42
43 new_config = self.to_builder() \
44 .with_transformers(new_transformers) \
45 .build()
46 return (new_config, files)
47
48 def load_bundle_files(self, bundle_dir):
49 new_transformers = []
50 for transformer in self.transformers:
51 new_transformer = transformer.load_bundle_files(bundle_dir)
52 new_transformers.append(new_transformer)
53 return self.to_builder() \
54 .with_transformers(new_transformers) \
55 .build()
56
57 @abstractmethod
58 def create_source(self, tmp_dir, crs_transformer, extent, class_map):
59 """Create the Raster Source for this configuration.
60 """
61 pass
62
63 def to_builder(self):
64 return rv._registry.get_config_builder(rv.RASTER_SOURCE,
65 self.source_type)(self)
66
67 @staticmethod
68 def check_deprecation(source_type):
69 # If source_type is deprecated and warning hasn't been shown yet, then warn.
70 if (source_type in rv.raster_source_deprecated_map and
71 source_type not in RasterSourceConfig.deprecation_warnings):
72 RasterSourceConfig.deprecation_warnings.append(source_type)
73 new_source_type = rv.raster_source_deprecated_map[source_type]
74 log.warn(
75 'RasterSource {} is deprecated. Please use {} instead.'.format(
76 source_type, new_source_type))
77
78 def builder(source_type):
79 RasterSourceConfig.check_deprecation(source_type)
80 return rv._registry.get_config_builder(rv.RASTER_SOURCE, source_type)()
81
82 @staticmethod
83 def from_proto(msg):
84 """Creates a TaskConfig from the specificed protobuf message
85 """
86 return rv._registry.get_config_builder(rv.RASTER_SOURCE, msg.source_type)() \
87 .from_proto(msg) \
88 .build()
89
90 @abstractmethod
91 def for_prediction(self, image_uri):
92 """Creates a new config with the image_uri."""
93 pass
94
95 @abstractmethod
96 def create_local(self, tmp_dir):
97 """Returns a new config with a local copy of the image data
98 if this image is remote.
99 """
100 pass
101
102 def create_transformers(self):
103 return list(map(lambda c: c.create_transformer(), self.transformers))
104
105 def update_for_command(self, command_type, experiment_config,
106 context=None):
107 for transformer in self.transformers:
108 transformer.update_for_command(command_type, experiment_config,
109 context)
110
111 def report_io(self, command_type, io_def):
112 for transformer in self.transformers:
113 transformer.report_io(command_type, io_def)
114
115
116 class RasterSourceConfigBuilder(ConfigBuilder):
117 def from_proto(self, msg):
118 transformers = list(
119 map(lambda m: RasterTransformerConfig.from_proto(m),
120 msg.transformers))
121
122 return self.with_channel_order(list(msg.channel_order)) \
123 .with_transformers(transformers)
124
125 def with_channel_order(self, channel_order):
126 """Defines the channel order for this raster source.
127
128 This defines the subset of channel indices and their order to use when extracting
129 chips from raw imagery.
130
131 Args:
132 channel_order: list of channel indices
133 """
134 b = deepcopy(self)
135 b.config['channel_order'] = channel_order
136 return b
137
138 def with_transformers(self, transformers):
139 """Transformers to be applied to the raster data.
140
141 Args:
142 transformers: A list of transformers to apply to the
143 raster data.
144
145 """
146 b = deepcopy(self)
147 b.config['transformers'] = list(transformers)
148 return b
149
150 def with_transformer(self, transformer):
151 """A transformer to be applied to the raster data.
152
153 Args:
154 transformer: A transformer to apply to the raster
155 data.
156
157 """
158 return self.with_transformers([transformer])
159
160 def with_stats_transformer(self):
161 """Add a stats transformer to the raster source."""
162 b = deepcopy(self)
163 transformers = b.config.get('transformers')
164 if transformers:
165 b.config['transformers'] = transformers.append(
166 StatsTransformerConfig())
167 else:
168 b.config['transformers'] = [StatsTransformerConfig()]
169 return b
170
[end of rastervision/data/raster_source/raster_source_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rastervision/data/raster_source/raster_source_config.py b/rastervision/data/raster_source/raster_source_config.py
--- a/rastervision/data/raster_source/raster_source_config.py
+++ b/rastervision/data/raster_source/raster_source_config.py
@@ -119,7 +119,10 @@
map(lambda m: RasterTransformerConfig.from_proto(m),
msg.transformers))
- return self.with_channel_order(list(msg.channel_order)) \
+ channel_order = list(msg.channel_order)
+ if len(channel_order) == 0:
+ channel_order = None
+ return self.with_channel_order(channel_order) \
.with_transformers(transformers)
def with_channel_order(self, channel_order):
|
{"golden_diff": "diff --git a/rastervision/data/raster_source/raster_source_config.py b/rastervision/data/raster_source/raster_source_config.py\n--- a/rastervision/data/raster_source/raster_source_config.py\n+++ b/rastervision/data/raster_source/raster_source_config.py\n@@ -119,7 +119,10 @@\n map(lambda m: RasterTransformerConfig.from_proto(m),\n msg.transformers))\n \n- return self.with_channel_order(list(msg.channel_order)) \\\n+ channel_order = list(msg.channel_order)\n+ if len(channel_order) == 0:\n+ channel_order = None\n+ return self.with_channel_order(channel_order) \\\n .with_transformers(transformers)\n \n def with_channel_order(self, channel_order):\n", "issue": "Allow Map Tiles to be evaluated\nI am trying to make rastervision work for a tiled map, so I run it on each tile I have, for example:\r\n\r\n\r\nWhen I do, I get the error:\r\n\r\n> rastervision_1 | Executing rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json\r\n> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Error: Command failed: rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json\r\n> rastervision_1 | 2019-03-26 14:44:13.975033: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\r\n> rastervision_1 | /usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n> rastervision_1 | fromlist, level)\r\n> rastervision_1 | /usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.\r\n> rastervision_1 | s = DatasetReader(path, driver=driver, **kwargs)\r\n> rastervision_1 | 2019-03-26 14:44:13:rastervision.task.task: INFO - Making predictions for scene\r\n> rastervision_1 | Traceback (most recent call last):\r\n> rastervision_1 | File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\r\n> rastervision_1 | \"__main__\", mod_spec)\r\n> rastervision_1 | File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n> rastervision_1 | exec(code, run_globals)\r\n> rastervision_1 | File \"/opt/src/rastervision/__main__.py\", line 17, in <module>\r\n> rastervision_1 | rv.main()\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 722, in __call__\r\n> rastervision_1 | return self.main(*args, **kwargs)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 697, in main\r\n> rastervision_1 | rv = self.invoke(ctx)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 1066, in invoke\r\n> rastervision_1 | return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 895, in invoke\r\n> rastervision_1 | return ctx.invoke(self.callback, **ctx.params)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 535, in invoke\r\n> rastervision_1 | return callback(*args, **kwargs)\r\n> rastervision_1 | File \"/opt/src/rastervision/cli/main.py\", line 240, in predict\r\n> rastervision_1 | predictor.predict(image_uri, output_uri, export_config)\r\n> rastervision_1 | File \"/opt/src/rastervision/predictor.py\", line 142, in predict\r\n> rastervision_1 | labels = self.task.predict_scene(scene, self.tmp_dir)\r\n> rastervision_1 | File \"/opt/src/rastervision/task/task.py\", line 192, in predict_scene\r\n> rastervision_1 | predict_batch(batch_chips, batch_windows)\r\n> rastervision_1 | File \"/opt/src/rastervision/task/task.py\", line 173, in predict_batch\r\n> rastervision_1 | np.array(predict_chips), predict_windows, tmp_dir)\r\n> rastervision_1 | File \"/opt/src/rastervision/backend/tf_object_detection.py\", line 736, in predict\r\n> rastervision_1 | self.session)\r\n> rastervision_1 | File \"/opt/src/rastervision/backend/tf_object_detection.py\", line 573, in compute_prediction\r\n> rastervision_1 | [boxes, scores, class_ids], feed_dict={image_tensor: image_nps})\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\", line 877, in run\r\n> rastervision_1 | run_metadata_ptr)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\", line 1076, in _run\r\n> rastervision_1 | str(subfeed_t.get_shape())))\r\n> rastervision_1 | ValueError: Cannot feed value of shape (2, 300, 300, 1) for Tensor 'image_tensor:0', which has shape '(?, ?, ?, 3)'\r\n> rastervision_1 | \r\n> rastervision_1 | at ChildProcess.exithandler (child_process.js:294:12)\r\n> rastervision_1 | at ChildProcess.emit (events.js:189:13)\r\n> rastervision_1 | at maybeClose (internal/child_process.js:970:16)\r\n> rastervision_1 | at Process.ChildProcess._handle.onexit (internal/child_process.js:259:5)\r\n> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)\r\n> rastervision_1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.\r\n\r\nI am successfull at running the same code on TIF files, and JPEG.\n", "before_files": [{"content": "from abc import abstractmethod\nfrom copy import deepcopy\nimport logging\n\nimport rastervision as rv\nfrom rastervision.core.config import (Config, ConfigBuilder,\n BundledConfigMixin)\nfrom rastervision.data import (RasterTransformerConfig, StatsTransformerConfig)\nfrom rastervision.protos.raster_source_pb2 \\\n import RasterSourceConfig as RasterSourceConfigMsg\n\nlog = logging.getLogger(__name__)\n\n\nclass RasterSourceConfig(BundledConfigMixin, Config):\n deprecation_warnings = []\n\n def __init__(self, source_type, transformers=None, channel_order=None):\n if transformers is None:\n transformers = []\n\n self.source_type = source_type\n self.transformers = transformers\n self.channel_order = channel_order\n\n def to_proto(self):\n transformers = list(map(lambda c: c.to_proto(), self.transformers))\n msg = RasterSourceConfigMsg(\n source_type=self.source_type,\n channel_order=self.channel_order,\n transformers=transformers)\n return msg\n\n def save_bundle_files(self, bundle_dir):\n new_transformers = []\n files = []\n for transformer in self.transformers:\n new_transformer, t_files = transformer.save_bundle_files(\n bundle_dir)\n new_transformers.append(new_transformer)\n files.extend(t_files)\n\n new_config = self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n return (new_config, files)\n\n def load_bundle_files(self, bundle_dir):\n new_transformers = []\n for transformer in self.transformers:\n new_transformer = transformer.load_bundle_files(bundle_dir)\n new_transformers.append(new_transformer)\n return self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n\n @abstractmethod\n def create_source(self, tmp_dir, crs_transformer, extent, class_map):\n \"\"\"Create the Raster Source for this configuration.\n \"\"\"\n pass\n\n def to_builder(self):\n return rv._registry.get_config_builder(rv.RASTER_SOURCE,\n self.source_type)(self)\n\n @staticmethod\n def check_deprecation(source_type):\n # If source_type is deprecated and warning hasn't been shown yet, then warn.\n if (source_type in rv.raster_source_deprecated_map and\n source_type not in RasterSourceConfig.deprecation_warnings):\n RasterSourceConfig.deprecation_warnings.append(source_type)\n new_source_type = rv.raster_source_deprecated_map[source_type]\n log.warn(\n 'RasterSource {} is deprecated. Please use {} instead.'.format(\n source_type, new_source_type))\n\n def builder(source_type):\n RasterSourceConfig.check_deprecation(source_type)\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, source_type)()\n\n @staticmethod\n def from_proto(msg):\n \"\"\"Creates a TaskConfig from the specificed protobuf message\n \"\"\"\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, msg.source_type)() \\\n .from_proto(msg) \\\n .build()\n\n @abstractmethod\n def for_prediction(self, image_uri):\n \"\"\"Creates a new config with the image_uri.\"\"\"\n pass\n\n @abstractmethod\n def create_local(self, tmp_dir):\n \"\"\"Returns a new config with a local copy of the image data\n if this image is remote.\n \"\"\"\n pass\n\n def create_transformers(self):\n return list(map(lambda c: c.create_transformer(), self.transformers))\n\n def update_for_command(self, command_type, experiment_config,\n context=None):\n for transformer in self.transformers:\n transformer.update_for_command(command_type, experiment_config,\n context)\n\n def report_io(self, command_type, io_def):\n for transformer in self.transformers:\n transformer.report_io(command_type, io_def)\n\n\nclass RasterSourceConfigBuilder(ConfigBuilder):\n def from_proto(self, msg):\n transformers = list(\n map(lambda m: RasterTransformerConfig.from_proto(m),\n msg.transformers))\n\n return self.with_channel_order(list(msg.channel_order)) \\\n .with_transformers(transformers)\n\n def with_channel_order(self, channel_order):\n \"\"\"Defines the channel order for this raster source.\n\n This defines the subset of channel indices and their order to use when extracting\n chips from raw imagery.\n\n Args:\n channel_order: list of channel indices\n \"\"\"\n b = deepcopy(self)\n b.config['channel_order'] = channel_order\n return b\n\n def with_transformers(self, transformers):\n \"\"\"Transformers to be applied to the raster data.\n\n Args:\n transformers: A list of transformers to apply to the\n raster data.\n\n \"\"\"\n b = deepcopy(self)\n b.config['transformers'] = list(transformers)\n return b\n\n def with_transformer(self, transformer):\n \"\"\"A transformer to be applied to the raster data.\n\n Args:\n transformer: A transformer to apply to the raster\n data.\n\n \"\"\"\n return self.with_transformers([transformer])\n\n def with_stats_transformer(self):\n \"\"\"Add a stats transformer to the raster source.\"\"\"\n b = deepcopy(self)\n transformers = b.config.get('transformers')\n if transformers:\n b.config['transformers'] = transformers.append(\n StatsTransformerConfig())\n else:\n b.config['transformers'] = [StatsTransformerConfig()]\n return b\n", "path": "rastervision/data/raster_source/raster_source_config.py"}]}
| 3,792 | 169 |
gh_patches_debug_30017
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1022
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Empty auth dictionary should be valid
docker/compose#3265
</issue>
<code>
[start of docker/auth/auth.py]
1 # Copyright 2013 dotCloud inc.
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import base64
16 import json
17 import logging
18 import os
19
20 import six
21
22 from .. import errors
23
24 INDEX_NAME = 'docker.io'
25 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)
26 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
27 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
28
29 log = logging.getLogger(__name__)
30
31
32 def resolve_repository_name(repo_name):
33 if '://' in repo_name:
34 raise errors.InvalidRepository(
35 'Repository name cannot contain a scheme ({0})'.format(repo_name)
36 )
37
38 index_name, remote_name = split_repo_name(repo_name)
39 if index_name[0] == '-' or index_name[-1] == '-':
40 raise errors.InvalidRepository(
41 'Invalid index name ({0}). Cannot begin or end with a'
42 ' hyphen.'.format(index_name)
43 )
44 return resolve_index_name(index_name), remote_name
45
46
47 def resolve_index_name(index_name):
48 index_name = convert_to_hostname(index_name)
49 if index_name == 'index.' + INDEX_NAME:
50 index_name = INDEX_NAME
51 return index_name
52
53
54 def split_repo_name(repo_name):
55 parts = repo_name.split('/', 1)
56 if len(parts) == 1 or (
57 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
58 ):
59 # This is a docker index repo (ex: username/foobar or ubuntu)
60 return INDEX_NAME, repo_name
61 return tuple(parts)
62
63
64 def resolve_authconfig(authconfig, registry=None):
65 """
66 Returns the authentication data from the given auth configuration for a
67 specific registry. As with the Docker client, legacy entries in the config
68 with full URLs are stripped down to hostnames before checking for a match.
69 Returns None if no match was found.
70 """
71 # Default to the public index server
72 registry = resolve_index_name(registry) if registry else INDEX_NAME
73 log.debug("Looking for auth entry for {0}".format(repr(registry)))
74
75 if registry in authconfig:
76 log.debug("Found {0}".format(repr(registry)))
77 return authconfig[registry]
78
79 for key, config in six.iteritems(authconfig):
80 if resolve_index_name(key) == registry:
81 log.debug("Found {0}".format(repr(key)))
82 return config
83
84 log.debug("No entry found")
85 return None
86
87
88 def convert_to_hostname(url):
89 return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
90
91
92 def decode_auth(auth):
93 if isinstance(auth, six.string_types):
94 auth = auth.encode('ascii')
95 s = base64.b64decode(auth)
96 login, pwd = s.split(b':', 1)
97 return login.decode('utf8'), pwd.decode('utf8')
98
99
100 def encode_header(auth):
101 auth_json = json.dumps(auth).encode('ascii')
102 return base64.urlsafe_b64encode(auth_json)
103
104
105 def parse_auth(entries, raise_on_error=False):
106 """
107 Parses authentication entries
108
109 Args:
110 entries: Dict of authentication entries.
111 raise_on_error: If set to true, an invalid format will raise
112 InvalidConfigFile
113
114 Returns:
115 Authentication registry.
116 """
117
118 conf = {}
119 for registry, entry in six.iteritems(entries):
120 if not (isinstance(entry, dict) and 'auth' in entry):
121 log.debug(
122 'Config entry for key {0} is not auth config'.format(registry)
123 )
124 # We sometimes fall back to parsing the whole config as if it was
125 # the auth config by itself, for legacy purposes. In that case, we
126 # fail silently and return an empty conf if any of the keys is not
127 # formatted properly.
128 if raise_on_error:
129 raise errors.InvalidConfigFile(
130 'Invalid configuration for registry {0}'.format(registry)
131 )
132 return {}
133 username, password = decode_auth(entry['auth'])
134 log.debug(
135 'Found entry (registry={0}, username={1})'
136 .format(repr(registry), repr(username))
137 )
138 conf[registry] = {
139 'username': username,
140 'password': password,
141 'email': entry.get('email'),
142 'serveraddress': registry,
143 }
144 return conf
145
146
147 def find_config_file(config_path=None):
148 environment_path = os.path.join(
149 os.environ.get('DOCKER_CONFIG'),
150 os.path.basename(DOCKER_CONFIG_FILENAME)
151 ) if os.environ.get('DOCKER_CONFIG') else None
152
153 paths = [
154 config_path, # 1
155 environment_path, # 2
156 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3
157 os.path.join(
158 os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME
159 ) # 4
160 ]
161
162 for path in paths:
163 if path and os.path.exists(path):
164 return path
165 return None
166
167
168 def load_config(config_path=None):
169 """
170 Loads authentication data from a Docker configuration file in the given
171 root directory or if config_path is passed use given path.
172 Lookup priority:
173 explicit config_path parameter > DOCKER_CONFIG environment variable >
174 ~/.docker/config.json > ~/.dockercfg
175 """
176 config_file = find_config_file(config_path)
177
178 if not config_file:
179 log.debug("File doesn't exist")
180 return {}
181
182 try:
183 with open(config_file) as f:
184 data = json.load(f)
185 res = {}
186 if data.get('auths'):
187 log.debug("Found 'auths' section")
188 res.update(parse_auth(data['auths'], raise_on_error=True))
189 if data.get('HttpHeaders'):
190 log.debug("Found 'HttpHeaders' section")
191 res.update({'HttpHeaders': data['HttpHeaders']})
192 if res:
193 return res
194 else:
195 log.debug("Couldn't find 'auths' or 'HttpHeaders' sections")
196 f.seek(0)
197 return parse_auth(json.load(f))
198 except (IOError, KeyError, ValueError) as e:
199 # Likely missing new Docker config file or it's in an
200 # unknown format, continue to attempt to read old location
201 # and format.
202 log.debug(e)
203
204 log.debug("Attempting to parse legacy auth file format")
205 try:
206 data = []
207 with open(config_file) as f:
208 for line in f.readlines():
209 data.append(line.strip().split(' = ')[1])
210 if len(data) < 2:
211 # Not enough data
212 raise errors.InvalidConfigFile(
213 'Invalid or empty configuration file!'
214 )
215
216 username, password = decode_auth(data[0])
217 return {
218 INDEX_NAME: {
219 'username': username,
220 'password': password,
221 'email': data[1],
222 'serveraddress': INDEX_URL,
223 }
224 }
225 except Exception as e:
226 log.debug(e)
227 pass
228
229 log.debug("All parsing attempts failed - returning empty config")
230 return {}
231
[end of docker/auth/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/auth/auth.py b/docker/auth/auth.py
--- a/docker/auth/auth.py
+++ b/docker/auth/auth.py
@@ -117,7 +117,7 @@
conf = {}
for registry, entry in six.iteritems(entries):
- if not (isinstance(entry, dict) and 'auth' in entry):
+ if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(registry)
)
@@ -130,6 +130,16 @@
'Invalid configuration for registry {0}'.format(registry)
)
return {}
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug(
+ 'Auth data for {0} is absent. Client might be using a '
+ 'credentials store instead.'
+ )
+ return {}
+
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
@@ -189,6 +199,9 @@
if data.get('HttpHeaders'):
log.debug("Found 'HttpHeaders' section")
res.update({'HttpHeaders': data['HttpHeaders']})
+ if data.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': data['credsStore']})
if res:
return res
else:
|
{"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -117,7 +117,7 @@\n \n conf = {}\n for registry, entry in six.iteritems(entries):\n- if not (isinstance(entry, dict) and 'auth' in entry):\n+ if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n@@ -130,6 +130,16 @@\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n+ if 'auth' not in entry:\n+ # Starting with engine v1.11 (API 1.23), an empty dictionary is\n+ # a valid value in the auths config.\n+ # https://github.com/docker/compose/issues/3265\n+ log.debug(\n+ 'Auth data for {0} is absent. Client might be using a '\n+ 'credentials store instead.'\n+ )\n+ return {}\n+\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n@@ -189,6 +199,9 @@\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n+ if data.get('credsStore'):\n+ log.debug(\"Found 'credsStore' section\")\n+ res.update({'credsStore': data['credsStore']})\n if res:\n return res\n else:\n", "issue": "Empty auth dictionary should be valid\ndocker/compose#3265\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\n\nimport six\n\nfrom .. import errors\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not (isinstance(entry, dict) and 'auth' in entry):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]}
| 2,797 | 363 |
gh_patches_debug_687
|
rasdani/github-patches
|
git_diff
|
hylang__hy-2220
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add header notice to "stable" line documentation to point users to the alpha cycle documentation
I was reading documentation and noticed that hy.contrib.walk is mentioned there:
https://docs.hylang.org/en/stable/contrib/walk.html
however it appears that hy.contrib.walk file is no longer on the master branch.
https://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy
is it a bug in documentation or I'm missing something?
</issue>
<code>
[start of docs/conf.py]
1 # This file is execfile()d with the current directory set to its containing dir.
2
3 import re, os, sys, time, html
4
5 sys.path.insert(0, os.path.abspath('..'))
6
7 extensions = [
8 'sphinx.ext.napoleon',
9 'sphinx.ext.intersphinx',
10 'sphinx.ext.autodoc',
11 'sphinx.ext.viewcode',
12 'sphinxcontrib.hydomain',
13 ]
14
15 from get_version import __version__ as hy_version
16
17 # Read the Docs might dirty its checkout, so strip the dirty flag.
18 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version)
19
20 templates_path = ['_templates']
21 source_suffix = '.rst'
22
23 master_doc = 'index'
24
25 # General information about the project.
26 project = 'hy'
27 copyright = '%s the authors' % time.strftime('%Y')
28
29 # The version info for the project you're documenting, acts as replacement for
30 # |version| and |release|, also used in various other places throughout the
31 # built documents.
32 #
33 # The short X.Y version.
34 version = ".".join(hy_version.split(".")[:-1])
35 # The full version, including alpha/beta/rc tags.
36 release = hy_version
37 hy_descriptive_version = html.escape(hy_version)
38 if "+" in hy_version:
39 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>"
40
41 exclude_patterns = ['_build', 'coreteam.rst']
42 add_module_names = True
43
44 pygments_style = 'sphinx'
45
46 import sphinx_rtd_theme
47 html_theme = 'sphinx_rtd_theme'
48 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
49
50 # Add any paths that contain custom static files (such as style sheets) here,
51 # relative to this directory. They are copied after the builtin static files,
52 # so a file named "default.css" will overwrite the builtin "default.css".
53 html_static_path = ['_static']
54
55 html_use_smartypants = False
56 html_show_sphinx = False
57
58 html_context = dict(
59 hy_descriptive_version = hy_descriptive_version)
60
61 highlight_language = 'clojure'
62
63 intersphinx_mapping = dict(
64 py = ('https://docs.python.org/3/', None),
65 py3_10 = ('https://docs.python.org/3.10/', None),
66 hyrule = ('https://hyrule.readthedocs.io/en/master/', None))
67 # ** Generate Cheatsheet
68 import json
69 from pathlib import Path
70 from itertools import zip_longest
71
72 def refize(spec):
73 role = ':hy:func:'
74 if isinstance(spec, dict):
75 _name = spec['name']
76 uri = spec['uri']
77 if spec.get('internal'):
78 role = ':ref:'
79 else:
80 uri = spec
81 _name = str.split(uri, '.')[-1]
82 return '{}`{} <{}>`'.format(role, _name, uri)
83
84
85 def format_refs(refs, indent):
86 args = [iter(map(refize, refs))]
87 ref_groups = zip_longest(*args, fillvalue="")
88 return str.join(
89 ' \\\n' + ' ' * (indent + 3),
90 [str.join(' ', ref_group) for ref_group in ref_groups],
91 )
92
93
94 def format_row(category, divider_loc):
95 return '{title: <{width}} | {methods}'.format(
96 width=divider_loc,
97 title=category['name'],
98 methods=format_refs(category['methods'], divider_loc)
99 )
100
101
102 def format_table(table_spec):
103 table_name = table_spec['name']
104 categories = table_spec['categories']
105 longest_cat_name = max(len(category['name']) for category in categories)
106 table = [
107 table_name,
108 '-' * len(table_name),
109 '',
110 '=' * longest_cat_name + ' ' + '=' * 25,
111 *(format_row(category, longest_cat_name) for category in categories),
112 '=' * longest_cat_name + ' ' + '=' * 25,
113 ''
114 ]
115 return '\n'.join(table)
116
117
118 # Modifications to the cheatsheet should be added in `cheatsheet.json`
119 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())
120 cheatsheet = [
121 '..',
122 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',
123 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',
124 '',
125 '.. _cheatsheet:',
126 '',
127 'Cheatsheet',
128 '==========',
129 '',
130 *map(format_table, cheatsheet_spec),
131 ]
132 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet))
133
134
135 # ** Sphinx App Setup
136
137
138 def setup(app):
139 app.add_css_file('overrides.css')
140
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -56,7 +56,9 @@
html_show_sphinx = False
html_context = dict(
- hy_descriptive_version = hy_descriptive_version)
+ hy_descriptive_version = hy_descriptive_version,
+ has_active_alpha = True,
+)
highlight_language = 'clojure'
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -56,7 +56,9 @@\n html_show_sphinx = False\n \n html_context = dict(\n- hy_descriptive_version = hy_descriptive_version)\n+ hy_descriptive_version = hy_descriptive_version,\n+ has_active_alpha = True,\n+)\n \n highlight_language = 'clojure'\n", "issue": "Add header notice to \"stable\" line documentation to point users to the alpha cycle documentation\nI was reading documentation and noticed that hy.contrib.walk is mentioned there:\r\nhttps://docs.hylang.org/en/stable/contrib/walk.html\r\n\r\nhowever it appears that hy.contrib.walk file is no longer on the master branch. \r\nhttps://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy\r\n\r\nis it a bug in documentation or I'm missing something? \r\n\r\n\n", "before_files": [{"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = True\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None),\n py3_10 = ('https://docs.python.org/3.10/', None),\n hyrule = ('https://hyrule.readthedocs.io/en/master/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max(len(category['name']) for category in categories)\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]}
| 2,011 | 90 |
gh_patches_debug_27568
|
rasdani/github-patches
|
git_diff
|
spack__spack-18325
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries
It looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct:
```
def setup_build_environment(self, env):
env.set('AUTO_RPATH', 'true')
```
</issue>
<code>
[start of var/spack/repos/builtin/packages/py-lxml/package.py]
1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5
6 from spack import *
7
8
9 class PyLxml(PythonPackage):
10 """lxml is the most feature-rich and easy-to-use library for processing
11 XML and HTML in the Python language."""
12
13 homepage = "http://lxml.de/"
14 url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz"
15
16 version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')
17 version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')
18 version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')
19 version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')
20 version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')
21
22 variant('html5', default=False, description='Enable html5lib backend')
23 variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')
24 variant('cssselect', default=False, description='Enable cssselect module')
25
26 depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))
27 depends_on('py-setuptools', type='build')
28 depends_on('libxml2', type=('build', 'run'))
29 depends_on('libxslt', type=('build', 'run'))
30 depends_on('py-html5lib', when='+html5', type=('build', 'run'))
31 depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))
32 depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))
33
[end of var/spack/repos/builtin/packages/py-lxml/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py
--- a/var/spack/repos/builtin/packages/py-lxml/package.py
+++ b/var/spack/repos/builtin/packages/py-lxml/package.py
@@ -13,6 +13,7 @@
homepage = "http://lxml.de/"
url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz"
+ version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')
version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')
version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')
version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')
@@ -25,8 +26,8 @@
depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
- depends_on('libxml2', type=('build', 'run'))
- depends_on('libxslt', type=('build', 'run'))
+ depends_on('libxml2', type=('build', 'link', 'run'))
+ depends_on('libxslt', type=('build', 'link', 'run'))
depends_on('py-html5lib', when='+html5', type=('build', 'run'))
depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))
depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py\n--- a/var/spack/repos/builtin/packages/py-lxml/package.py\n+++ b/var/spack/repos/builtin/packages/py-lxml/package.py\n@@ -13,6 +13,7 @@\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n \n+ version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n@@ -25,8 +26,8 @@\n \n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n- depends_on('libxml2', type=('build', 'run'))\n- depends_on('libxslt', type=('build', 'run'))\n+ depends_on('libxml2', type=('build', 'link', 'run'))\n+ depends_on('libxslt', type=('build', 'link', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "issue": "Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries\nIt looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct:\r\n\r\n```\r\n def setup_build_environment(self, env):\r\n env.set('AUTO_RPATH', 'true')\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyLxml(PythonPackage):\n \"\"\"lxml is the most feature-rich and easy-to-use library for processing\n XML and HTML in the Python language.\"\"\"\n\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')\n version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')\n\n variant('html5', default=False, description='Enable html5lib backend')\n variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')\n variant('cssselect', default=False, description='Enable cssselect module')\n\n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n depends_on('libxml2', type=('build', 'run'))\n depends_on('libxslt', type=('build', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-lxml/package.py"}]}
| 1,379 | 587 |
gh_patches_debug_34919
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-380
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revisit dvc target from config in relation to phony stage files
</issue>
<code>
[start of dvc/command/repro.py]
1 from dvc.command.common.base import CmdBase
2
3 class CmdRepro(CmdBase):
4 def run(self):
5 recursive = not self.args.single_item
6 self.project.reproduce(self.args.targets,
7 recursive=recursive,
8 force=self.args.force)
9
[end of dvc/command/repro.py]
[start of dvc/project.py]
1 import os
2 import itertools
3 import networkx as nx
4
5 from dvc.logger import Logger
6 from dvc.exceptions import DvcException
7 from dvc.stage import Stage, Output, Dependency
8 from dvc.config import Config
9 from dvc.state import State
10 from dvc.lock import Lock
11 from dvc.scm import SCM
12 from dvc.cache import Cache
13 from dvc.data_cloud import DataCloud
14
15
16 class PipelineError(DvcException):
17 pass
18
19
20 class StageNotInPipelineError(PipelineError):
21 pass
22
23
24 class StageNotFoundError(DvcException):
25 pass
26
27
28 class Pipeline(object):
29
30 def __init__(self, project, G):
31 self.project = project
32 self.G = G
33
34 def graph(self):
35 return self.G
36
37 def stages(self):
38 return nx.get_node_attributes(self.G, 'stage')
39
40 def changed(self, stage):
41 for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):
42 if self.stages[node].changed():
43 return True
44 return False
45
46 def reproduce(self, stage):
47 if stage not in self.stages():
48 raise StageNotInPipelineError()
49
50 if not self.changed(stage):
51 raise PipelineNotChangedError()
52
53 for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):
54 self.stages[node].reproduce()
55
56 stage.reproduce()
57
58
59 class Project(object):
60 DVC_DIR = '.dvc'
61
62 def __init__(self, root_dir):
63 self.root_dir = os.path.abspath(os.path.realpath(root_dir))
64 self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)
65
66 self.scm = SCM(self.root_dir)
67 self.lock = Lock(self.dvc_dir)
68 self.cache = Cache(self.dvc_dir)
69 self.state = State(self.root_dir, self.dvc_dir)
70 self.config = Config(self.dvc_dir)
71 self.logger = Logger()
72 self.cloud = DataCloud(self.config._config)
73
74 @staticmethod
75 def init(root_dir):
76 """
77 Initiate dvc project in directory.
78
79 Args:
80 root_dir: Path to project's root directory.
81
82 Returns:
83 Project instance.
84
85 Raises:
86 KeyError: Raises an exception.
87 """
88 root_dir = os.path.abspath(root_dir)
89 dvc_dir = os.path.join(root_dir, Project.DVC_DIR)
90 os.mkdir(dvc_dir)
91
92 config = Config.init(dvc_dir)
93 cache = Cache.init(dvc_dir)
94 state = State.init(root_dir, dvc_dir)
95 lock = Lock(dvc_dir)
96
97 scm = SCM(root_dir)
98 scm.ignore_list([cache.cache_dir,
99 state.state_file,
100 lock.lock_file])
101
102 ignore_file = os.path.join(dvc_dir, scm.ignore_file())
103 scm.add([config.config_file, ignore_file])
104 scm.commit('DVC init')
105
106 return Project(root_dir)
107
108 def add(self, fname):
109 path = os.path.abspath(fname) + Stage.STAGE_FILE_SUFFIX
110 cwd = os.path.dirname(path)
111 outputs = [Output.loads(self, os.path.basename(fname), use_cache=True, cwd=cwd)]
112 stage = Stage(project=self,
113 path=path,
114 cmd=None,
115 cwd=cwd,
116 outs=outputs,
117 deps=[],
118 locked=True)
119 stage.save()
120 stage.dump()
121 return stage
122
123 def remove(self, fname):
124 stages = []
125 output = Output.loads(self, fname)
126 for out in self.outs():
127 if out.path == output.path:
128 stage = out.stage()
129 stages.append(stage)
130
131 if len(stages) == 0:
132 raise StageNotFoundError(fname)
133
134 for stage in stages:
135 stage.remove()
136
137 return stages
138
139 def run(self,
140 cmd=None,
141 deps=[],
142 outs=[],
143 outs_no_cache=[],
144 locked=False,
145 fname=Stage.STAGE_FILE,
146 cwd=os.curdir):
147 cwd = os.path.abspath(cwd)
148 path = os.path.join(cwd, fname)
149 outputs = Output.loads_from(self, outs, use_cache=True, cwd=cwd)
150 outputs += Output.loads_from(self, outs_no_cache, use_cache=False, cwd=cwd)
151 deps = Dependency.loads_from(self, deps, use_cache=False, cwd=cwd)
152
153 stage = Stage(project=self,
154 path=path,
155 cmd=cmd,
156 cwd=cwd,
157 outs=outputs,
158 deps=deps,
159 locked=locked)
160 stage.run()
161 stage.dump()
162 return stage
163
164 def reproduce(self, targets, recursive=True, force=False):
165 reproduced = []
166 stages = nx.get_node_attributes(self.graph(), 'stage')
167 for target in targets:
168 node = os.path.relpath(os.path.abspath(target), self.root_dir)
169 if node not in stages:
170 raise StageNotFoundError(target)
171
172 if recursive:
173 for n in nx.dfs_postorder_nodes(self.graph(), node):
174 stages[n].reproduce(force=force)
175 stages[n].dump()
176 reproduced.append(stages[n])
177
178 stages[node].reproduce(force=force)
179 stages[node].dump()
180 reproduced.append(stages[node])
181
182 return reproduced
183
184 def checkout(self):
185 for stage in self.stages():
186 stage.checkout()
187
188 def _used_cache(self):
189 clist = []
190 for stage in self.stages():
191 for entry in itertools.chain(stage.outs, stage.deps):
192 if not entry.use_cache:
193 continue
194 if entry.cache not in clist:
195 clist.append(entry.cache)
196 return clist
197
198 def gc(self):
199 clist = self._used_cache()
200 for cache in self.cache.all():
201 if cache in clist:
202 continue
203 os.unlink(cache)
204 self.logger.info('\'{}\' was removed'.format(cache))
205
206 def push(self, jobs=1):
207 self.cloud.push(self._used_cache(), jobs)
208
209 def pull(self, jobs=1):
210 self.cloud.pull(self._used_cache(), jobs)
211 for stage in self.stages():
212 for entry in itertools.chain(stage.outs, stage.deps):
213 if entry.use_cache:
214 entry.link()
215
216 def status(self, jobs=1):
217 return self.cloud.status(self._used_cache(), jobs)
218
219 def graph(self):
220 G = nx.DiGraph()
221
222 for stage in self.stages():
223 node = os.path.relpath(stage.path, self.root_dir)
224 G.add_node(node, stage=stage)
225 for dep in stage.deps:
226 dep_stage = dep.stage()
227 if not dep_stage:
228 continue
229 dep_node = os.path.relpath(dep_stage.path, self.root_dir)
230 G.add_node(dep_node, stage=dep_stage)
231 G.add_edge(node, dep_node)
232
233 return G
234
235 def stages(self):
236 stages = []
237 for root, dirs, files in os.walk(self.root_dir):
238 for fname in files:
239 path = os.path.join(root, fname)
240 if not Stage.is_stage_file(path):
241 continue
242 stages.append(Stage.load(self, path))
243 return stages
244
245 def outs(self):
246 outs = []
247 for stage in self.stages():
248 outs += stage.outs
249 return outs
250
251 def pipelines(self):
252 pipelines = []
253 for G in nx.weakly_connected_component_subgraphs(self.graph()):
254 pipeline = Pipeline(self, G)
255 pipelines.append(pipeline)
256
257 return pipelines
258
[end of dvc/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dvc/command/repro.py b/dvc/command/repro.py
--- a/dvc/command/repro.py
+++ b/dvc/command/repro.py
@@ -1,8 +1,16 @@
+from dvc.exceptions import DvcException
from dvc.command.common.base import CmdBase
class CmdRepro(CmdBase):
def run(self):
recursive = not self.args.single_item
- self.project.reproduce(self.args.targets,
- recursive=recursive,
- force=self.args.force)
+ for target in self.args.targets:
+ try:
+ self.project.reproduce(target,
+ recursive=recursive,
+ force=self.args.force)
+ except DvcException as ex:
+ msg = 'Failed to reproduce {}: {}'.format(target, str(ex))
+ self.project.logger.error(msg)
+ return 1
+ return 0
diff --git a/dvc/project.py b/dvc/project.py
--- a/dvc/project.py
+++ b/dvc/project.py
@@ -22,7 +22,9 @@
class StageNotFoundError(DvcException):
- pass
+ def __init__(self, path):
+ msg = 'Stage file {} does not exist'.format(path)
+ super(StageNotFoundError, self).__init__(msg)
class Pipeline(object):
@@ -161,23 +163,22 @@
stage.dump()
return stage
- def reproduce(self, targets, recursive=True, force=False):
+ def reproduce(self, target, recursive=True, force=False):
reproduced = []
stages = nx.get_node_attributes(self.graph(), 'stage')
- for target in targets:
- node = os.path.relpath(os.path.abspath(target), self.root_dir)
- if node not in stages:
- raise StageNotFoundError(target)
-
- if recursive:
- for n in nx.dfs_postorder_nodes(self.graph(), node):
- stages[n].reproduce(force=force)
- stages[n].dump()
- reproduced.append(stages[n])
-
- stages[node].reproduce(force=force)
- stages[node].dump()
- reproduced.append(stages[node])
+ node = os.path.relpath(os.path.abspath(target), self.root_dir)
+ if node not in stages:
+ raise StageNotFoundError(target)
+
+ if recursive:
+ for n in nx.dfs_postorder_nodes(self.graph(), node):
+ stages[n].reproduce(force=force)
+ stages[n].dump()
+ reproduced.append(stages[n])
+
+ stages[node].reproduce(force=force)
+ stages[node].dump()
+ reproduced.append(stages[node])
return reproduced
|
{"golden_diff": "diff --git a/dvc/command/repro.py b/dvc/command/repro.py\n--- a/dvc/command/repro.py\n+++ b/dvc/command/repro.py\n@@ -1,8 +1,16 @@\n+from dvc.exceptions import DvcException\n from dvc.command.common.base import CmdBase\n \n class CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n- self.project.reproduce(self.args.targets,\n- recursive=recursive,\n- force=self.args.force)\n+ for target in self.args.targets:\n+ try:\n+ self.project.reproduce(target,\n+ recursive=recursive,\n+ force=self.args.force)\n+ except DvcException as ex:\n+ msg = 'Failed to reproduce {}: {}'.format(target, str(ex))\n+ self.project.logger.error(msg)\n+ return 1\n+ return 0\ndiff --git a/dvc/project.py b/dvc/project.py\n--- a/dvc/project.py\n+++ b/dvc/project.py\n@@ -22,7 +22,9 @@\n \n \n class StageNotFoundError(DvcException):\n- pass\n+ def __init__(self, path):\n+ msg = 'Stage file {} does not exist'.format(path)\n+ super(StageNotFoundError, self).__init__(msg)\n \n \n class Pipeline(object):\n@@ -161,23 +163,22 @@\n stage.dump()\n return stage\n \n- def reproduce(self, targets, recursive=True, force=False):\n+ def reproduce(self, target, recursive=True, force=False):\n reproduced = []\n stages = nx.get_node_attributes(self.graph(), 'stage')\n- for target in targets:\n- node = os.path.relpath(os.path.abspath(target), self.root_dir)\n- if node not in stages:\n- raise StageNotFoundError(target)\n-\n- if recursive:\n- for n in nx.dfs_postorder_nodes(self.graph(), node):\n- stages[n].reproduce(force=force)\n- stages[n].dump()\n- reproduced.append(stages[n])\n-\n- stages[node].reproduce(force=force)\n- stages[node].dump()\n- reproduced.append(stages[node])\n+ node = os.path.relpath(os.path.abspath(target), self.root_dir)\n+ if node not in stages:\n+ raise StageNotFoundError(target)\n+\n+ if recursive:\n+ for n in nx.dfs_postorder_nodes(self.graph(), node):\n+ stages[n].reproduce(force=force)\n+ stages[n].dump()\n+ reproduced.append(stages[n])\n+\n+ stages[node].reproduce(force=force)\n+ stages[node].dump()\n+ reproduced.append(stages[node])\n \n return reproduced\n", "issue": "Revisit dvc target from config in relation to phony stage files\n\n", "before_files": [{"content": "from dvc.command.common.base import CmdBase\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n self.project.reproduce(self.args.targets,\n recursive=recursive,\n force=self.args.force)\n", "path": "dvc/command/repro.py"}, {"content": "import os\nimport itertools\nimport networkx as nx\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output, Dependency\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.data_cloud import DataCloud\n\n\nclass PipelineError(DvcException):\n pass\n\n\nclass StageNotInPipelineError(PipelineError):\n pass\n\n\nclass StageNotFoundError(DvcException):\n pass\n\n\nclass Pipeline(object):\n\n def __init__(self, project, G):\n self.project = project\n self.G = G\n\n def graph(self):\n return self.G\n\n def stages(self):\n return nx.get_node_attributes(self.G, 'stage')\n\n def changed(self, stage):\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n if self.stages[node].changed():\n return True\n return False\n\n def reproduce(self, stage):\n if stage not in self.stages():\n raise StageNotInPipelineError()\n\n if not self.changed(stage):\n raise PipelineNotChangedError()\n\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n self.stages[node].reproduce()\n\n stage.reproduce()\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger()\n self.cloud = DataCloud(self.config._config)\n\n @staticmethod\n def init(root_dir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n scm.commit('DVC init')\n\n return Project(root_dir)\n\n def add(self, fname):\n path = os.path.abspath(fname) + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(path)\n outputs = [Output.loads(self, os.path.basename(fname), use_cache=True, cwd=cwd)]\n stage = Stage(project=self,\n path=path,\n cmd=None,\n cwd=cwd,\n outs=outputs,\n deps=[],\n locked=True)\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, fname):\n stages = []\n output = Output.loads(self, fname)\n for out in self.outs():\n if out.path == output.path:\n stage = out.stage()\n stages.append(stage)\n\n if len(stages) == 0:\n raise StageNotFoundError(fname) \n\n for stage in stages:\n stage.remove()\n\n return stages\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n locked=False,\n fname=Stage.STAGE_FILE,\n cwd=os.curdir):\n cwd = os.path.abspath(cwd)\n path = os.path.join(cwd, fname)\n outputs = Output.loads_from(self, outs, use_cache=True, cwd=cwd)\n outputs += Output.loads_from(self, outs_no_cache, use_cache=False, cwd=cwd)\n deps = Dependency.loads_from(self, deps, use_cache=False, cwd=cwd)\n\n stage = Stage(project=self,\n path=path,\n cmd=cmd,\n cwd=cwd,\n outs=outputs,\n deps=deps,\n locked=locked)\n stage.run()\n stage.dump()\n return stage\n\n def reproduce(self, targets, recursive=True, force=False):\n reproduced = []\n stages = nx.get_node_attributes(self.graph(), 'stage')\n for target in targets:\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n stages[n].reproduce(force=force)\n stages[n].dump()\n reproduced.append(stages[n])\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n reproduced.append(stages[node])\n\n return reproduced\n\n def checkout(self):\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self):\n clist = []\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if not entry.use_cache:\n continue\n if entry.cache not in clist:\n clist.append(entry.cache)\n return clist\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info('\\'{}\\' was removed'.format(cache))\n\n def push(self, jobs=1):\n self.cloud.push(self._used_cache(), jobs)\n\n def pull(self, jobs=1):\n self.cloud.pull(self._used_cache(), jobs)\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if entry.use_cache:\n entry.link()\n\n def status(self, jobs=1):\n return self.cloud.status(self._used_cache(), jobs)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n\n def pipelines(self):\n pipelines = []\n for G in nx.weakly_connected_component_subgraphs(self.graph()):\n pipeline = Pipeline(self, G)\n pipelines.append(pipeline)\n\n return pipelines\n", "path": "dvc/project.py"}]}
| 2,898 | 581 |
gh_patches_debug_35275
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-11634
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] xorg/system: generated pkg-config files lack variables
While trying to add xkbcommon 1.0.1, I had to add pkg-config generator to xkbcommon recipe because since 1.0.0 a new optional component depends on libxml2, and meson uses pkg-config files.
xkbcommon depends on xorg, but currently xkbcommon's recipe relies on xorg system installed pkg-config files.
As soon as conan pkg-config files are generated for xkbcommon build, they take precedence, but one of them lacks an important variable for xkbcommon: `xkb_base` in `xkeyboard-config` xorg's component, used to set a definition at build time.
xkbcommon can build without this variable, but il will fail at runtime.
I'm pretty sure that this issue can't be fixed right now. It requires https://github.com/conan-io/conan/issues/7720
</issue>
<code>
[start of recipes/xorg/all/conanfile.py]
1 from conans import ConanFile, tools
2 from conans.errors import ConanException, ConanInvalidConfiguration
3 import os
4
5 required_conan_version = ">=1.32"
6
7 class ConanXOrg(ConanFile):
8 name = "xorg"
9 url = "https://github.com/conan-io/conan-center-index"
10 license = "MIT"
11 homepage = "https://www.x.org/wiki/"
12 description = "The X.Org project provides an open source implementation of the X Window System."
13 settings = "os"
14 topics = ("x11", "xorg")
15
16 def configure(self):
17 if self.settings.os not in ["Linux", "FreeBSD"]:
18 raise ConanInvalidConfiguration("This recipe supports only Linux and FreeBSD")
19
20 def package_id(self):
21 self.info.header_only()
22
23 def _fill_cppinfo_from_pkgconfig(self, name):
24 pkg_config = tools.PkgConfig(name)
25 if not pkg_config.provides:
26 raise ConanException("OpenGL development files aren't available, give up")
27 libs = [lib[2:] for lib in pkg_config.libs_only_l]
28 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]
29 ldflags = [flag for flag in pkg_config.libs_only_other]
30 include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
31 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
32 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
33
34 self.cpp_info.components[name].system_libs = libs
35 self.cpp_info.components[name].libdirs = lib_dirs
36 self.cpp_info.components[name].sharedlinkflags = ldflags
37 self.cpp_info.components[name].exelinkflags = ldflags
38 self.cpp_info.components[name].defines = defines
39 self.cpp_info.components[name].includedirs = include_dirs
40 self.cpp_info.components[name].cflags = cflags
41 self.cpp_info.components[name].cxxflags = cflags
42 self.cpp_info.components[name].version = pkg_config.version[0]
43
44 def system_requirements(self):
45 packages = []
46 if tools.os_info.is_linux and self.settings.os == "Linux":
47 if tools.os_info.with_apt:
48 packages = ["libx11-dev", "libx11-xcb-dev", "libfontenc-dev", "libice-dev", "libsm-dev", "libxau-dev", "libxaw7-dev",
49 "libxcomposite-dev", "libxcursor-dev", "libxdamage-dev", "libxdmcp-dev", "libxext-dev", "libxfixes-dev",
50 "libxi-dev", "libxinerama-dev", "libxkbfile-dev", "libxmu-dev", "libxmuu-dev",
51 "libxpm-dev", "libxrandr-dev", "libxrender-dev", "libxres-dev", "libxss-dev", "libxt-dev", "libxtst-dev",
52 "libxv-dev", "libxvmc-dev", "libxxf86vm-dev", "xtrans-dev", "libxcb-render0-dev",
53 "libxcb-render-util0-dev", "libxcb-xkb-dev", "libxcb-icccm4-dev", "libxcb-image0-dev",
54 "libxcb-keysyms1-dev", "libxcb-randr0-dev", "libxcb-shape0-dev", "libxcb-sync-dev", "libxcb-xfixes0-dev",
55 "libxcb-xinerama0-dev", "xkb-data", "libxcb-dri3-dev", "uuid-dev"]
56 if (tools.os_info.linux_distro == "ubuntu" and tools.os_info.os_version < "15") or\
57 (tools.os_info.linux_distro == "debian" and tools.os_info.os_version < "12") or\
58 (tools.os_info.linux_distro == "raspbian" and tools.os_info.os_version < "12"):
59 packages.append( "libxcb-util0-dev" )
60 else:
61 packages.append( "libxcb-util-dev" )
62 elif tools.os_info.with_yum or tools.os_info.with_dnf or tools.os_info.with_zypper:
63 packages = ["libxcb-devel", "libfontenc-devel", "libXaw-devel", "libXcomposite-devel",
64 "libXcursor-devel", "libXdmcp-devel", "libXtst-devel", "libXinerama-devel",
65 "libxkbfile-devel", "libXrandr-devel", "libXres-devel", "libXScrnSaver-devel", "libXvMC-devel",
66 "xorg-x11-xtrans-devel", "xcb-util-wm-devel", "xcb-util-image-devel", "xcb-util-keysyms-devel",
67 "xcb-util-renderutil-devel", "libXdamage-devel", "libXxf86vm-devel", "libXv-devel",
68 "xcb-util-devel", "libuuid-devel"]
69 packages.append("xkeyboard-config" if tools.os_info.with_zypper else "xkeyboard-config-devel")
70 elif tools.os_info.with_pacman:
71 packages = ["libxcb", "libfontenc", "libice", "libsm", "libxaw", "libxcomposite", "libxcursor",
72 "libxdamage", "libxdmcp", "libxtst", "libxinerama", "libxkbfile", "libxrandr", "libxres",
73 "libxss", "libxvmc", "xtrans", "xcb-util-wm", "xcb-util-image","xcb-util-keysyms", "xcb-util-renderutil",
74 "libxxf86vm", "libxv", "xkeyboard-config", "xcb-util", "util-linux-libs"]
75 else:
76 self.output.warn("Do not know how to install 'xorg' for {}.".format(tools.os_info.linux_distro))
77
78 elif tools.os_info.is_freebsd and self.settings.os == "FreeBSD":
79 packages = ["libX11", "libfontenc", "libice", "libsm", "libxaw", "libxcomposite", "libxcursor",
80 "libxdamage", "libxdmcp", "libxtst", "libxinerama", "libxkbfile", "libxrandr", "libxres",
81 "libXScrnSaver", "libxvmc", "xtrans", "xcb-util-wm", "xcb-util-image", "xcb-util-keysyms", "xcb-util-renderutil",
82 "libxxf86vm", "libxv", "xkeyboard-config", "xcb-util"]
83 if packages:
84 package_tool = tools.SystemPackageTool(conanfile=self, default_mode="verify")
85 package_tool.install_packages(update=True, packages=packages)
86
87 def package_info(self):
88 for name in ["x11", "x11-xcb", "fontenc", "ice", "sm", "xau", "xaw7",
89 "xcomposite", "xcursor", "xdamage", "xdmcp", "xext", "xfixes", "xi",
90 "xinerama", "xkbfile", "xmu", "xmuu", "xpm", "xrandr", "xrender", "xres",
91 "xscrnsaver", "xt", "xtst", "xv", "xvmc", "xxf86vm", "xtrans",
92 "xcb-xkb", "xcb-icccm", "xcb-image", "xcb-keysyms", "xcb-randr", "xcb-render",
93 "xcb-renderutil", "xcb-shape", "xcb-shm", "xcb-sync", "xcb-xfixes",
94 "xcb-xinerama", "xcb", "xkeyboard-config", "xcb-atom", "xcb-aux", "xcb-event", "xcb-util",
95 "xcb-dri3"] + ([] if self.settings.os == "FreeBSD" else ["uuid"]):
96 self._fill_cppinfo_from_pkgconfig(name)
97 if self.settings.os == "Linux":
98 self.cpp_info.components["sm"].requires.append("uuid")
99
100
[end of recipes/xorg/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/recipes/xorg/all/conanfile.py b/recipes/xorg/all/conanfile.py
--- a/recipes/xorg/all/conanfile.py
+++ b/recipes/xorg/all/conanfile.py
@@ -1,6 +1,5 @@
from conans import ConanFile, tools
from conans.errors import ConanException, ConanInvalidConfiguration
-import os
required_conan_version = ">=1.32"
@@ -30,6 +29,7 @@
include_dirs = [include[2:] for include in pkg_config.cflags_only_I]
cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")]
defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")]
+ variables = pkg_config.variables
self.cpp_info.components[name].system_libs = libs
self.cpp_info.components[name].libdirs = lib_dirs
@@ -40,6 +40,10 @@
self.cpp_info.components[name].cflags = cflags
self.cpp_info.components[name].cxxflags = cflags
self.cpp_info.components[name].version = pkg_config.version[0]
+ self.cpp_info.components[name].set_property("component_version", pkg_config.version[0])
+ self.cpp_info.components[name].set_property(
+ "pkg_config_custom_content",
+ "\n".join("%s=%s" % (key, value) for key,value in variables.items()))
def system_requirements(self):
packages = []
@@ -94,6 +98,8 @@
"xcb-xinerama", "xcb", "xkeyboard-config", "xcb-atom", "xcb-aux", "xcb-event", "xcb-util",
"xcb-dri3"] + ([] if self.settings.os == "FreeBSD" else ["uuid"]):
self._fill_cppinfo_from_pkgconfig(name)
+ self.cpp_info.components[name].set_property("pkg_config_name", name)
+
if self.settings.os == "Linux":
self.cpp_info.components["sm"].requires.append("uuid")
|
{"golden_diff": "diff --git a/recipes/xorg/all/conanfile.py b/recipes/xorg/all/conanfile.py\n--- a/recipes/xorg/all/conanfile.py\n+++ b/recipes/xorg/all/conanfile.py\n@@ -1,6 +1,5 @@\n from conans import ConanFile, tools\n from conans.errors import ConanException, ConanInvalidConfiguration\n-import os\n \n required_conan_version = \">=1.32\"\n \n@@ -30,6 +29,7 @@\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n+ variables = pkg_config.variables\n \n self.cpp_info.components[name].system_libs = libs\n self.cpp_info.components[name].libdirs = lib_dirs\n@@ -40,6 +40,10 @@\n self.cpp_info.components[name].cflags = cflags\n self.cpp_info.components[name].cxxflags = cflags\n self.cpp_info.components[name].version = pkg_config.version[0]\n+ self.cpp_info.components[name].set_property(\"component_version\", pkg_config.version[0])\n+ self.cpp_info.components[name].set_property(\n+ \"pkg_config_custom_content\",\n+ \"\\n\".join(\"%s=%s\" % (key, value) for key,value in variables.items()))\n \n def system_requirements(self):\n packages = []\n@@ -94,6 +98,8 @@\n \"xcb-xinerama\", \"xcb\", \"xkeyboard-config\", \"xcb-atom\", \"xcb-aux\", \"xcb-event\", \"xcb-util\",\n \"xcb-dri3\"] + ([] if self.settings.os == \"FreeBSD\" else [\"uuid\"]):\n self._fill_cppinfo_from_pkgconfig(name)\n+ self.cpp_info.components[name].set_property(\"pkg_config_name\", name)\n+ \n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"sm\"].requires.append(\"uuid\")\n", "issue": "[package] xorg/system: generated pkg-config files lack variables\nWhile trying to add xkbcommon 1.0.1, I had to add pkg-config generator to xkbcommon recipe because since 1.0.0 a new optional component depends on libxml2, and meson uses pkg-config files.\r\nxkbcommon depends on xorg, but currently xkbcommon's recipe relies on xorg system installed pkg-config files.\r\nAs soon as conan pkg-config files are generated for xkbcommon build, they take precedence, but one of them lacks an important variable for xkbcommon: `xkb_base` in `xkeyboard-config` xorg's component, used to set a definition at build time.\r\n\r\nxkbcommon can build without this variable, but il will fail at runtime.\r\n\r\nI'm pretty sure that this issue can't be fixed right now. It requires https://github.com/conan-io/conan/issues/7720\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.32\"\n\nclass ConanXOrg(ConanFile):\n name = \"xorg\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"MIT\"\n homepage = \"https://www.x.org/wiki/\"\n description = \"The X.Org project provides an open source implementation of the X Window System.\"\n settings = \"os\"\n topics = (\"x11\", \"xorg\")\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"This recipe supports only Linux and FreeBSD\")\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"OpenGL development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.components[name].system_libs = libs\n self.cpp_info.components[name].libdirs = lib_dirs\n self.cpp_info.components[name].sharedlinkflags = ldflags\n self.cpp_info.components[name].exelinkflags = ldflags\n self.cpp_info.components[name].defines = defines\n self.cpp_info.components[name].includedirs = include_dirs\n self.cpp_info.components[name].cflags = cflags\n self.cpp_info.components[name].cxxflags = cflags\n self.cpp_info.components[name].version = pkg_config.version[0]\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_apt:\n packages = [\"libx11-dev\", \"libx11-xcb-dev\", \"libfontenc-dev\", \"libice-dev\", \"libsm-dev\", \"libxau-dev\", \"libxaw7-dev\",\n \"libxcomposite-dev\", \"libxcursor-dev\", \"libxdamage-dev\", \"libxdmcp-dev\", \"libxext-dev\", \"libxfixes-dev\", \n \"libxi-dev\", \"libxinerama-dev\", \"libxkbfile-dev\", \"libxmu-dev\", \"libxmuu-dev\",\n \"libxpm-dev\", \"libxrandr-dev\", \"libxrender-dev\", \"libxres-dev\", \"libxss-dev\", \"libxt-dev\", \"libxtst-dev\", \n \"libxv-dev\", \"libxvmc-dev\", \"libxxf86vm-dev\", \"xtrans-dev\", \"libxcb-render0-dev\",\n \"libxcb-render-util0-dev\", \"libxcb-xkb-dev\", \"libxcb-icccm4-dev\", \"libxcb-image0-dev\",\n \"libxcb-keysyms1-dev\", \"libxcb-randr0-dev\", \"libxcb-shape0-dev\", \"libxcb-sync-dev\", \"libxcb-xfixes0-dev\",\n \"libxcb-xinerama0-dev\", \"xkb-data\", \"libxcb-dri3-dev\", \"uuid-dev\"]\n if (tools.os_info.linux_distro == \"ubuntu\" and tools.os_info.os_version < \"15\") or\\\n (tools.os_info.linux_distro == \"debian\" and tools.os_info.os_version < \"12\") or\\\n (tools.os_info.linux_distro == \"raspbian\" and tools.os_info.os_version < \"12\"):\n packages.append( \"libxcb-util0-dev\" )\n else:\n packages.append( \"libxcb-util-dev\" )\n elif tools.os_info.with_yum or tools.os_info.with_dnf or tools.os_info.with_zypper:\n packages = [\"libxcb-devel\", \"libfontenc-devel\", \"libXaw-devel\", \"libXcomposite-devel\",\n \"libXcursor-devel\", \"libXdmcp-devel\", \"libXtst-devel\", \"libXinerama-devel\",\n \"libxkbfile-devel\", \"libXrandr-devel\", \"libXres-devel\", \"libXScrnSaver-devel\", \"libXvMC-devel\",\n \"xorg-x11-xtrans-devel\", \"xcb-util-wm-devel\", \"xcb-util-image-devel\", \"xcb-util-keysyms-devel\",\n \"xcb-util-renderutil-devel\", \"libXdamage-devel\", \"libXxf86vm-devel\", \"libXv-devel\",\n \"xcb-util-devel\", \"libuuid-devel\"]\n packages.append(\"xkeyboard-config\" if tools.os_info.with_zypper else \"xkeyboard-config-devel\")\n elif tools.os_info.with_pacman:\n packages = [\"libxcb\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libxss\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\",\"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\", \"util-linux-libs\"]\n else:\n self.output.warn(\"Do not know how to install 'xorg' for {}.\".format(tools.os_info.linux_distro))\n \n elif tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libX11\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libXScrnSaver\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\", \"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode=\"verify\")\n package_tool.install_packages(update=True, packages=packages)\n\n def package_info(self):\n for name in [\"x11\", \"x11-xcb\", \"fontenc\", \"ice\", \"sm\", \"xau\", \"xaw7\",\n \"xcomposite\", \"xcursor\", \"xdamage\", \"xdmcp\", \"xext\", \"xfixes\", \"xi\",\n \"xinerama\", \"xkbfile\", \"xmu\", \"xmuu\", \"xpm\", \"xrandr\", \"xrender\", \"xres\",\n \"xscrnsaver\", \"xt\", \"xtst\", \"xv\", \"xvmc\", \"xxf86vm\", \"xtrans\",\n \"xcb-xkb\", \"xcb-icccm\", \"xcb-image\", \"xcb-keysyms\", \"xcb-randr\", \"xcb-render\",\n \"xcb-renderutil\", \"xcb-shape\", \"xcb-shm\", \"xcb-sync\", \"xcb-xfixes\",\n \"xcb-xinerama\", \"xcb\", \"xkeyboard-config\", \"xcb-atom\", \"xcb-aux\", \"xcb-event\", \"xcb-util\",\n \"xcb-dri3\"] + ([] if self.settings.os == \"FreeBSD\" else [\"uuid\"]):\n self._fill_cppinfo_from_pkgconfig(name)\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"sm\"].requires.append(\"uuid\")\n\n", "path": "recipes/xorg/all/conanfile.py"}]}
| 2,699 | 453 |
gh_patches_debug_12522
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-6105
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OAuth source is added to user attributes each time user write is run
**Describe the bug**
The user attribute `goauthentik.io/user/sources` gets appended each time an user write stage is run.
```
goauthentik.io/user/sources:
- Wordpress
- Wordpress
- Wordpress
- Wordpress
- Wordpress
...
```
**To Reproduce**
Steps to reproduce the behavior:
1. Configure OAuth source
2. Add User Write Stage to default-source-authentication flow
3. Login a few times
4. Each login adds a new entry under `goauthentik.io/user/sources`
**Expected behavior**
It should only add the source if it isn't there already
**Version and Deployment (please complete the following information):**
- authentik version: 2023.5.4
- Deployment: docker
</issue>
<code>
[start of authentik/stages/user_write/stage.py]
1 """Write stage logic"""
2 from typing import Any, Optional
3
4 from django.contrib.auth import update_session_auth_hash
5 from django.db import transaction
6 from django.db.utils import IntegrityError, InternalError
7 from django.http import HttpRequest, HttpResponse
8 from django.utils.translation import gettext as _
9 from rest_framework.exceptions import ValidationError
10
11 from authentik.core.middleware import SESSION_KEY_IMPERSONATE_USER
12 from authentik.core.models import USER_ATTRIBUTE_SOURCES, User, UserSourceConnection
13 from authentik.core.sources.stage import PLAN_CONTEXT_SOURCES_CONNECTION
14 from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER
15 from authentik.flows.stage import StageView
16 from authentik.flows.views.executor import FlowExecutorView
17 from authentik.stages.password import BACKEND_INBUILT
18 from authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND
19 from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT
20 from authentik.stages.user_write.models import UserCreationMode
21 from authentik.stages.user_write.signals import user_write
22
23 PLAN_CONTEXT_GROUPS = "groups"
24 PLAN_CONTEXT_USER_PATH = "user_path"
25
26
27 class UserWriteStageView(StageView):
28 """Finalise Enrollment flow by creating a user object."""
29
30 def __init__(self, executor: FlowExecutorView, **kwargs):
31 super().__init__(executor, **kwargs)
32 self.disallowed_user_attributes = [
33 "groups",
34 ]
35
36 @staticmethod
37 def write_attribute(user: User, key: str, value: Any):
38 """Allow use of attributes.foo.bar when writing to a user, with full
39 recursion"""
40 parts = key.replace("_", ".").split(".")
41 if len(parts) < 1: # pragma: no cover
42 return
43 # Function will always be called with a key like attributes.
44 # this is just a sanity check to ensure that is removed
45 if parts[0] == "attributes":
46 parts = parts[1:]
47 attrs = user.attributes
48 for comp in parts[:-1]:
49 if comp not in attrs:
50 attrs[comp] = {}
51 attrs = attrs.get(comp)
52 attrs[parts[-1]] = value
53
54 def post(self, request: HttpRequest) -> HttpResponse:
55 """Wrapper for post requests"""
56 return self.get(request)
57
58 def ensure_user(self) -> tuple[Optional[User], bool]:
59 """Ensure a user exists"""
60 user_created = False
61 path = self.executor.plan.context.get(
62 PLAN_CONTEXT_USER_PATH, self.executor.current_stage.user_path_template
63 )
64 if path == "":
65 path = User.default_path()
66 if not self.request.user.is_anonymous:
67 self.executor.plan.context.setdefault(PLAN_CONTEXT_PENDING_USER, self.request.user)
68 if (
69 PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context
70 or self.executor.current_stage.user_creation_mode == UserCreationMode.ALWAYS_CREATE
71 ):
72 if self.executor.current_stage.user_creation_mode == UserCreationMode.NEVER_CREATE:
73 return None, False
74 self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] = User(
75 is_active=not self.executor.current_stage.create_users_as_inactive,
76 path=path,
77 )
78 self.executor.plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT
79 self.logger.debug(
80 "Created new user",
81 flow_slug=self.executor.flow.slug,
82 )
83 user_created = True
84 user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]
85 return user, user_created
86
87 def update_user(self, user: User):
88 """Update `user` with data from plan context
89
90 Only simple attributes are updated, nothing which requires a foreign key or m2m"""
91 data: dict = self.executor.plan.context[PLAN_CONTEXT_PROMPT]
92 # This is always sent back but not written to the user
93 data.pop("component", None)
94 for key, value in data.items():
95 setter_name = f"set_{key}"
96 # Check if user has a setter for this key, like set_password
97 if hasattr(user, setter_name):
98 setter = getattr(user, setter_name)
99 if callable(setter):
100 setter(value)
101 elif key in self.disallowed_user_attributes:
102 self.logger.info("discarding key", key=key)
103 continue
104 # For exact attributes match, update the dictionary in place
105 elif key == "attributes":
106 user.attributes.update(value)
107 # If using dot notation, use the correct helper to update the nested value
108 elif key.startswith("attributes.") or key.startswith("attributes_"):
109 UserWriteStageView.write_attribute(user, key, value)
110 # User has this key already
111 elif hasattr(user, key):
112 setattr(user, key, value)
113 # If none of the cases above matched, we have an attribute that the user doesn't have,
114 # has no setter for, is not a nested attributes value and as such is invalid
115 else:
116 self.logger.info("discarding key", key=key)
117 continue
118 # Check if we're writing from a source, and save the source to the attributes
119 if PLAN_CONTEXT_SOURCES_CONNECTION in self.executor.plan.context:
120 if USER_ATTRIBUTE_SOURCES not in user.attributes or not isinstance(
121 user.attributes.get(USER_ATTRIBUTE_SOURCES), list
122 ):
123 user.attributes[USER_ATTRIBUTE_SOURCES] = []
124 connection: UserSourceConnection = self.executor.plan.context[
125 PLAN_CONTEXT_SOURCES_CONNECTION
126 ]
127 user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)
128
129 def get(self, request: HttpRequest) -> HttpResponse:
130 """Save data in the current flow to the currently pending user. If no user is pending,
131 a new user is created."""
132 if PLAN_CONTEXT_PROMPT not in self.executor.plan.context:
133 message = _("No Pending data.")
134 self.logger.debug(message)
135 return self.executor.stage_invalid(message)
136 data = self.executor.plan.context[PLAN_CONTEXT_PROMPT]
137 user, user_created = self.ensure_user()
138 if not user:
139 message = _("No user found and can't create new user.")
140 self.logger.info(message)
141 return self.executor.stage_invalid(message)
142 # Before we change anything, check if the user is the same as in the request
143 # and we're updating a password. In that case we need to update the session hash
144 # Also check that we're not currently impersonating, so we don't update the session
145 should_update_session = False
146 if (
147 any("password" in x for x in data.keys())
148 and self.request.user.pk == user.pk
149 and SESSION_KEY_IMPERSONATE_USER not in self.request.session
150 ):
151 should_update_session = True
152 try:
153 self.update_user(user)
154 except ValidationError as exc:
155 self.logger.warning("failed to update user", exc=exc)
156 return self.executor.stage_invalid(_("Failed to update user. Please try again later."))
157 # Extra check to prevent flows from saving a user with a blank username
158 if user.username == "":
159 self.logger.warning("Aborting write to empty username", user=user)
160 return self.executor.stage_invalid()
161 try:
162 with transaction.atomic():
163 user.save()
164 if self.executor.current_stage.create_users_group:
165 user.ak_groups.add(self.executor.current_stage.create_users_group)
166 if PLAN_CONTEXT_GROUPS in self.executor.plan.context:
167 user.ak_groups.add(*self.executor.plan.context[PLAN_CONTEXT_GROUPS])
168 except (IntegrityError, ValueError, TypeError, InternalError) as exc:
169 self.logger.warning("Failed to save user", exc=exc)
170 return self.executor.stage_invalid(_("Failed to update user. Please try again later."))
171 user_write.send(sender=self, request=request, user=user, data=data, created=user_created)
172 # Check if the password has been updated, and update the session auth hash
173 if should_update_session:
174 update_session_auth_hash(self.request, user)
175 self.logger.debug("Updated session hash", user=user)
176 self.logger.debug(
177 "Updated existing user",
178 user=user,
179 flow_slug=self.executor.flow.slug,
180 )
181 return self.executor.stage_ok()
182
[end of authentik/stages/user_write/stage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/authentik/stages/user_write/stage.py b/authentik/stages/user_write/stage.py
--- a/authentik/stages/user_write/stage.py
+++ b/authentik/stages/user_write/stage.py
@@ -124,7 +124,8 @@
connection: UserSourceConnection = self.executor.plan.context[
PLAN_CONTEXT_SOURCES_CONNECTION
]
- user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)
+ if connection.source.name not in user.attributes[USER_ATTRIBUTE_SOURCES]:
+ user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)
def get(self, request: HttpRequest) -> HttpResponse:
"""Save data in the current flow to the currently pending user. If no user is pending,
|
{"golden_diff": "diff --git a/authentik/stages/user_write/stage.py b/authentik/stages/user_write/stage.py\n--- a/authentik/stages/user_write/stage.py\n+++ b/authentik/stages/user_write/stage.py\n@@ -124,7 +124,8 @@\n connection: UserSourceConnection = self.executor.plan.context[\n PLAN_CONTEXT_SOURCES_CONNECTION\n ]\n- user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n+ if connection.source.name not in user.attributes[USER_ATTRIBUTE_SOURCES]:\n+ user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n \n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Save data in the current flow to the currently pending user. If no user is pending,\n", "issue": "OAuth source is added to user attributes each time user write is run\n**Describe the bug**\r\nThe user attribute `goauthentik.io/user/sources` gets appended each time an user write stage is run.\r\n\r\n```\r\ngoauthentik.io/user/sources:\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n ...\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Configure OAuth source\r\n2. Add User Write Stage to default-source-authentication flow\r\n3. Login a few times\r\n4. Each login adds a new entry under `goauthentik.io/user/sources`\r\n\r\n**Expected behavior**\r\nIt should only add the source if it isn't there already\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.5.4\r\n- Deployment: docker\n", "before_files": [{"content": "\"\"\"Write stage logic\"\"\"\nfrom typing import Any, Optional\n\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError, InternalError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\nfrom rest_framework.exceptions import ValidationError\n\nfrom authentik.core.middleware import SESSION_KEY_IMPERSONATE_USER\nfrom authentik.core.models import USER_ATTRIBUTE_SOURCES, User, UserSourceConnection\nfrom authentik.core.sources.stage import PLAN_CONTEXT_SOURCES_CONNECTION\nfrom authentik.flows.planner import PLAN_CONTEXT_PENDING_USER\nfrom authentik.flows.stage import StageView\nfrom authentik.flows.views.executor import FlowExecutorView\nfrom authentik.stages.password import BACKEND_INBUILT\nfrom authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\nfrom authentik.stages.user_write.models import UserCreationMode\nfrom authentik.stages.user_write.signals import user_write\n\nPLAN_CONTEXT_GROUPS = \"groups\"\nPLAN_CONTEXT_USER_PATH = \"user_path\"\n\n\nclass UserWriteStageView(StageView):\n \"\"\"Finalise Enrollment flow by creating a user object.\"\"\"\n\n def __init__(self, executor: FlowExecutorView, **kwargs):\n super().__init__(executor, **kwargs)\n self.disallowed_user_attributes = [\n \"groups\",\n ]\n\n @staticmethod\n def write_attribute(user: User, key: str, value: Any):\n \"\"\"Allow use of attributes.foo.bar when writing to a user, with full\n recursion\"\"\"\n parts = key.replace(\"_\", \".\").split(\".\")\n if len(parts) < 1: # pragma: no cover\n return\n # Function will always be called with a key like attributes.\n # this is just a sanity check to ensure that is removed\n if parts[0] == \"attributes\":\n parts = parts[1:]\n attrs = user.attributes\n for comp in parts[:-1]:\n if comp not in attrs:\n attrs[comp] = {}\n attrs = attrs.get(comp)\n attrs[parts[-1]] = value\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Wrapper for post requests\"\"\"\n return self.get(request)\n\n def ensure_user(self) -> tuple[Optional[User], bool]:\n \"\"\"Ensure a user exists\"\"\"\n user_created = False\n path = self.executor.plan.context.get(\n PLAN_CONTEXT_USER_PATH, self.executor.current_stage.user_path_template\n )\n if path == \"\":\n path = User.default_path()\n if not self.request.user.is_anonymous:\n self.executor.plan.context.setdefault(PLAN_CONTEXT_PENDING_USER, self.request.user)\n if (\n PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context\n or self.executor.current_stage.user_creation_mode == UserCreationMode.ALWAYS_CREATE\n ):\n if self.executor.current_stage.user_creation_mode == UserCreationMode.NEVER_CREATE:\n return None, False\n self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] = User(\n is_active=not self.executor.current_stage.create_users_as_inactive,\n path=path,\n )\n self.executor.plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT\n self.logger.debug(\n \"Created new user\",\n flow_slug=self.executor.flow.slug,\n )\n user_created = True\n user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]\n return user, user_created\n\n def update_user(self, user: User):\n \"\"\"Update `user` with data from plan context\n\n Only simple attributes are updated, nothing which requires a foreign key or m2m\"\"\"\n data: dict = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n # This is always sent back but not written to the user\n data.pop(\"component\", None)\n for key, value in data.items():\n setter_name = f\"set_{key}\"\n # Check if user has a setter for this key, like set_password\n if hasattr(user, setter_name):\n setter = getattr(user, setter_name)\n if callable(setter):\n setter(value)\n elif key in self.disallowed_user_attributes:\n self.logger.info(\"discarding key\", key=key)\n continue\n # For exact attributes match, update the dictionary in place\n elif key == \"attributes\":\n user.attributes.update(value)\n # If using dot notation, use the correct helper to update the nested value\n elif key.startswith(\"attributes.\") or key.startswith(\"attributes_\"):\n UserWriteStageView.write_attribute(user, key, value)\n # User has this key already\n elif hasattr(user, key):\n setattr(user, key, value)\n # If none of the cases above matched, we have an attribute that the user doesn't have,\n # has no setter for, is not a nested attributes value and as such is invalid\n else:\n self.logger.info(\"discarding key\", key=key)\n continue\n # Check if we're writing from a source, and save the source to the attributes\n if PLAN_CONTEXT_SOURCES_CONNECTION in self.executor.plan.context:\n if USER_ATTRIBUTE_SOURCES not in user.attributes or not isinstance(\n user.attributes.get(USER_ATTRIBUTE_SOURCES), list\n ):\n user.attributes[USER_ATTRIBUTE_SOURCES] = []\n connection: UserSourceConnection = self.executor.plan.context[\n PLAN_CONTEXT_SOURCES_CONNECTION\n ]\n user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n\n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Save data in the current flow to the currently pending user. If no user is pending,\n a new user is created.\"\"\"\n if PLAN_CONTEXT_PROMPT not in self.executor.plan.context:\n message = _(\"No Pending data.\")\n self.logger.debug(message)\n return self.executor.stage_invalid(message)\n data = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n user, user_created = self.ensure_user()\n if not user:\n message = _(\"No user found and can't create new user.\")\n self.logger.info(message)\n return self.executor.stage_invalid(message)\n # Before we change anything, check if the user is the same as in the request\n # and we're updating a password. In that case we need to update the session hash\n # Also check that we're not currently impersonating, so we don't update the session\n should_update_session = False\n if (\n any(\"password\" in x for x in data.keys())\n and self.request.user.pk == user.pk\n and SESSION_KEY_IMPERSONATE_USER not in self.request.session\n ):\n should_update_session = True\n try:\n self.update_user(user)\n except ValidationError as exc:\n self.logger.warning(\"failed to update user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n # Extra check to prevent flows from saving a user with a blank username\n if user.username == \"\":\n self.logger.warning(\"Aborting write to empty username\", user=user)\n return self.executor.stage_invalid()\n try:\n with transaction.atomic():\n user.save()\n if self.executor.current_stage.create_users_group:\n user.ak_groups.add(self.executor.current_stage.create_users_group)\n if PLAN_CONTEXT_GROUPS in self.executor.plan.context:\n user.ak_groups.add(*self.executor.plan.context[PLAN_CONTEXT_GROUPS])\n except (IntegrityError, ValueError, TypeError, InternalError) as exc:\n self.logger.warning(\"Failed to save user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n user_write.send(sender=self, request=request, user=user, data=data, created=user_created)\n # Check if the password has been updated, and update the session auth hash\n if should_update_session:\n update_session_auth_hash(self.request, user)\n self.logger.debug(\"Updated session hash\", user=user)\n self.logger.debug(\n \"Updated existing user\",\n user=user,\n flow_slug=self.executor.flow.slug,\n )\n return self.executor.stage_ok()\n", "path": "authentik/stages/user_write/stage.py"}]}
| 2,871 | 166 |
gh_patches_debug_33559
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-954
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Condition: <list> on Resource triggers "Unknown exception while processing rule"
*cfn-lint version: (`cfn-lint --version`)*
cfn-lint 0.21.4
*Description of issue.*
A resource with a Condition property defined as a list triggers:
```
E0002 Unknown exception while processing rule E8002: unhashable type: 'list_node'
/tmp/cfn-lint-condition-list-error.yaml:1:1
```
I believe that the use of lists / multiple values for a Condition property of a Resource is probably not legal (although I was unable to find clear confirmation of that in the documentation during a quick scan), but it should probably trigger a lint error rather than an exception.
It would also be helpful, if possible, to include the template line-number where the exception was triggered, rather than line:char 1:1 to make tracking the cause of such problems easier.
I have also seen the same exception, but for rule W1001, I though it was the same cause, but my reproduce test didn't re-trigger the W1001 case.
*Reproduce example*
```
AWSTemplateFormatVersion: 2010-09-09
Description: "cfn-lint condition list error"
Conditions:
Cond1: !Equals [ !Ref 'AWS::Region', 'us-east-1' ]
Cond2: !Equals [ !Ref 'AWS::Region', 'eu-west-1' ]
Resources:
EIP1:
Type: AWS::EC2::EIP
Condition:
- Cond1
- Cond2
Properties:
Domain: 'vpc'
EIP2:
Type: AWS::EC2::EIP
Condition: Cond1
Properties:
Domain: 'vpc'
EIP3:
Type: AWS::EC2::EIP
Condition: Cond2
Properties:
Domain: 'vpc'
```
</issue>
<code>
[start of src/cfnlint/rules/conditions/Exists.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import six
18 from cfnlint import CloudFormationLintRule
19 from cfnlint import RuleMatch
20
21
22 class Exists(CloudFormationLintRule):
23 """Check if used Conditions are defined """
24 id = 'E8002'
25 shortdesc = 'Check if the referenced Conditions are defined'
26 description = 'Making sure the used conditions are actually defined in the Conditions section'
27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html'
28 tags = ['conditions']
29
30 def match(self, cfn):
31 """Check CloudFormation Conditions"""
32
33 matches = []
34 ref_conditions = {}
35
36 # Get all defined conditions
37 conditions = cfn.template.get('Conditions', {})
38
39 # Get all "If's" that reference a Condition
40 iftrees = cfn.search_deep_keys('Fn::If')
41 for iftree in iftrees:
42 if isinstance(iftree[-1], list):
43 ref_conditions[iftree[-1][0]] = iftree
44 else:
45 ref_conditions[iftree[-1]] = iftree
46
47 # Get resource's Conditions
48 for resource_name, resource_values in cfn.get_resources().items():
49 if 'Condition' in resource_values:
50 path = ['Resources', resource_name, 'Condition']
51 ref_conditions[resource_values['Condition']] = path
52
53 # Get conditions used by another condition
54 condtrees = cfn.search_deep_keys('Condition')
55
56 for condtree in condtrees:
57 if condtree[0] == 'Conditions':
58 if isinstance(condtree[-1], (str, six.text_type, six.string_types)):
59 path = ['Conditions', condtree[-1]]
60 ref_conditions[condtree[-1]] = path
61
62 # Get Output Conditions
63 for _, output_values in cfn.template.get('Outputs', {}).items():
64 if 'Condition' in output_values:
65 path = ['Outputs', output_values['Condition']]
66 ref_conditions[output_values['Condition']] = path
67
68 # Check if all the conditions are defined
69 for ref_condition, ref_path in ref_conditions.items():
70 if ref_condition not in conditions:
71 message = 'Condition {0} is not defined.'
72 matches.append(RuleMatch(
73 ref_path,
74 message.format(ref_condition)
75 ))
76
77 return matches
78
[end of src/cfnlint/rules/conditions/Exists.py]
[start of src/cfnlint/rules/resources/Configuration.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19 import cfnlint.helpers
20
21
22 class Configuration(CloudFormationLintRule):
23 """Check Base Resource Configuration"""
24 id = 'E3001'
25 shortdesc = 'Basic CloudFormation Resource Check'
26 description = 'Making sure the basic CloudFormation resources ' + \
27 'are properly configured'
28 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'
29 tags = ['resources']
30
31 def match(self, cfn):
32 """Check CloudFormation Resources"""
33
34 matches = []
35
36 valid_attributes = [
37 'CreationPolicy',
38 'DeletionPolicy',
39 'DependsOn',
40 'Metadata',
41 'UpdatePolicy',
42 'UpdateReplacePolicy',
43 'Properties',
44 'Type',
45 'Condition'
46 ]
47
48 valid_custom_attributes = [
49 'Version',
50 'Properties',
51 'DependsOn',
52 'Metadata',
53 'Condition',
54 'Type',
55 ]
56
57 resources = cfn.template.get('Resources', {})
58 if not isinstance(resources, dict):
59 message = 'Resource not properly configured'
60 matches.append(RuleMatch(['Resources'], message))
61 else:
62 for resource_name, resource_values in cfn.template.get('Resources', {}).items():
63 self.logger.debug('Validating resource %s base configuration', resource_name)
64 if not isinstance(resource_values, dict):
65 message = 'Resource not properly configured at {0}'
66 matches.append(RuleMatch(
67 ['Resources', resource_name],
68 message.format(resource_name)
69 ))
70 continue
71 resource_type = resource_values.get('Type', '')
72 check_attributes = []
73 if resource_type.startswith('Custom::') or resource_type == 'AWS::CloudFormation::CustomResource':
74 check_attributes = valid_custom_attributes
75 else:
76 check_attributes = valid_attributes
77
78 for property_key, _ in resource_values.items():
79 if property_key not in check_attributes:
80 message = 'Invalid resource attribute {0} for resource {1}'
81 matches.append(RuleMatch(
82 ['Resources', resource_name, property_key],
83 message.format(property_key, resource_name)))
84
85 resource_type = resource_values.get('Type', '')
86 if not resource_type:
87 message = 'Type not defined for resource {0}'
88 matches.append(RuleMatch(
89 ['Resources', resource_name],
90 message.format(resource_name)
91 ))
92 else:
93 self.logger.debug('Check resource types by region...')
94 for region, specs in cfnlint.helpers.RESOURCE_SPECS.items():
95 if region in cfn.regions:
96 if resource_type not in specs['ResourceTypes']:
97 if not resource_type.startswith(('Custom::', 'AWS::Serverless::')):
98 message = 'Invalid or unsupported Type {0} for resource {1} in {2}'
99 matches.append(RuleMatch(
100 ['Resources', resource_name, 'Type'],
101 message.format(resource_type, resource_name, region)
102 ))
103
104 if 'Properties' not in resource_values:
105 resource_spec = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]
106 if resource_type in resource_spec['ResourceTypes']:
107 properties_spec = resource_spec['ResourceTypes'][resource_type]['Properties']
108 # pylint: disable=len-as-condition
109 if len(properties_spec) > 0:
110 required = 0
111 for _, property_spec in properties_spec.items():
112 if property_spec.get('Required', False):
113 required += 1
114 if required > 0:
115 if resource_type == 'AWS::CloudFormation::WaitCondition' and 'CreationPolicy' in resource_values.keys():
116 self.logger.debug('Exception to required properties section as CreationPolicy is defined.')
117 else:
118 message = 'Properties not defined for resource {0}'
119 matches.append(RuleMatch(
120 ['Resources', resource_name],
121 message.format(resource_name)
122 ))
123
124 return matches
125
[end of src/cfnlint/rules/resources/Configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/conditions/Exists.py b/src/cfnlint/rules/conditions/Exists.py
--- a/src/cfnlint/rules/conditions/Exists.py
+++ b/src/cfnlint/rules/conditions/Exists.py
@@ -46,9 +46,10 @@
# Get resource's Conditions
for resource_name, resource_values in cfn.get_resources().items():
- if 'Condition' in resource_values:
+ condition = resource_values.get('Condition')
+ if isinstance(condition, six.string_types): # make sure its a string
path = ['Resources', resource_name, 'Condition']
- ref_conditions[resource_values['Condition']] = path
+ ref_conditions[condition] = path
# Get conditions used by another condition
condtrees = cfn.search_deep_keys('Condition')
diff --git a/src/cfnlint/rules/resources/Configuration.py b/src/cfnlint/rules/resources/Configuration.py
--- a/src/cfnlint/rules/resources/Configuration.py
+++ b/src/cfnlint/rules/resources/Configuration.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
import cfnlint.helpers
@@ -82,6 +83,15 @@
['Resources', resource_name, property_key],
message.format(property_key, resource_name)))
+ # validate condition is a string
+ condition = resource_values.get('Condition', '')
+ if not isinstance(condition, six.string_types):
+ message = 'Condition for resource {0} should be a string'
+ matches.append(RuleMatch(
+ ['Resources', resource_name, 'Condition'],
+ message.format(resource_name)
+ ))
+
resource_type = resource_values.get('Type', '')
if not resource_type:
message = 'Type not defined for resource {0}'
|
{"golden_diff": "diff --git a/src/cfnlint/rules/conditions/Exists.py b/src/cfnlint/rules/conditions/Exists.py\n--- a/src/cfnlint/rules/conditions/Exists.py\n+++ b/src/cfnlint/rules/conditions/Exists.py\n@@ -46,9 +46,10 @@\n \n # Get resource's Conditions\n for resource_name, resource_values in cfn.get_resources().items():\n- if 'Condition' in resource_values:\n+ condition = resource_values.get('Condition')\n+ if isinstance(condition, six.string_types): # make sure its a string\n path = ['Resources', resource_name, 'Condition']\n- ref_conditions[resource_values['Condition']] = path\n+ ref_conditions[condition] = path\n \n # Get conditions used by another condition\n condtrees = cfn.search_deep_keys('Condition')\ndiff --git a/src/cfnlint/rules/resources/Configuration.py b/src/cfnlint/rules/resources/Configuration.py\n--- a/src/cfnlint/rules/resources/Configuration.py\n+++ b/src/cfnlint/rules/resources/Configuration.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n import cfnlint.helpers\n@@ -82,6 +83,15 @@\n ['Resources', resource_name, property_key],\n message.format(property_key, resource_name)))\n \n+ # validate condition is a string\n+ condition = resource_values.get('Condition', '')\n+ if not isinstance(condition, six.string_types):\n+ message = 'Condition for resource {0} should be a string'\n+ matches.append(RuleMatch(\n+ ['Resources', resource_name, 'Condition'],\n+ message.format(resource_name)\n+ ))\n+\n resource_type = resource_values.get('Type', '')\n if not resource_type:\n message = 'Type not defined for resource {0}'\n", "issue": "Condition: <list> on Resource triggers \"Unknown exception while processing rule\"\n*cfn-lint version: (`cfn-lint --version`)*\r\ncfn-lint 0.21.4\r\n\r\n*Description of issue.*\r\nA resource with a Condition property defined as a list triggers:\r\n```\r\nE0002 Unknown exception while processing rule E8002: unhashable type: 'list_node'\r\n/tmp/cfn-lint-condition-list-error.yaml:1:1\r\n```\r\nI believe that the use of lists / multiple values for a Condition property of a Resource is probably not legal (although I was unable to find clear confirmation of that in the documentation during a quick scan), but it should probably trigger a lint error rather than an exception.\r\n\r\nIt would also be helpful, if possible, to include the template line-number where the exception was triggered, rather than line:char 1:1 to make tracking the cause of such problems easier.\r\n\r\nI have also seen the same exception, but for rule W1001, I though it was the same cause, but my reproduce test didn't re-trigger the W1001 case.\r\n\r\n*Reproduce example*\r\n```\r\nAWSTemplateFormatVersion: 2010-09-09\r\nDescription: \"cfn-lint condition list error\"\r\n\r\nConditions:\r\n Cond1: !Equals [ !Ref 'AWS::Region', 'us-east-1' ]\r\n Cond2: !Equals [ !Ref 'AWS::Region', 'eu-west-1' ]\r\n\r\nResources:\r\n\r\n EIP1:\r\n Type: AWS::EC2::EIP\r\n Condition:\r\n - Cond1\r\n - Cond2\r\n Properties:\r\n Domain: 'vpc'\r\n\r\n EIP2:\r\n Type: AWS::EC2::EIP\r\n Condition: Cond1\r\n Properties:\r\n Domain: 'vpc'\r\n\r\n EIP3:\r\n Type: AWS::EC2::EIP\r\n Condition: Cond2\r\n Properties:\r\n Domain: 'vpc'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Exists(CloudFormationLintRule):\n \"\"\"Check if used Conditions are defined \"\"\"\n id = 'E8002'\n shortdesc = 'Check if the referenced Conditions are defined'\n description = 'Making sure the used conditions are actually defined in the Conditions section'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html'\n tags = ['conditions']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Conditions\"\"\"\n\n matches = []\n ref_conditions = {}\n\n # Get all defined conditions\n conditions = cfn.template.get('Conditions', {})\n\n # Get all \"If's\" that reference a Condition\n iftrees = cfn.search_deep_keys('Fn::If')\n for iftree in iftrees:\n if isinstance(iftree[-1], list):\n ref_conditions[iftree[-1][0]] = iftree\n else:\n ref_conditions[iftree[-1]] = iftree\n\n # Get resource's Conditions\n for resource_name, resource_values in cfn.get_resources().items():\n if 'Condition' in resource_values:\n path = ['Resources', resource_name, 'Condition']\n ref_conditions[resource_values['Condition']] = path\n\n # Get conditions used by another condition\n condtrees = cfn.search_deep_keys('Condition')\n\n for condtree in condtrees:\n if condtree[0] == 'Conditions':\n if isinstance(condtree[-1], (str, six.text_type, six.string_types)):\n path = ['Conditions', condtree[-1]]\n ref_conditions[condtree[-1]] = path\n\n # Get Output Conditions\n for _, output_values in cfn.template.get('Outputs', {}).items():\n if 'Condition' in output_values:\n path = ['Outputs', output_values['Condition']]\n ref_conditions[output_values['Condition']] = path\n\n # Check if all the conditions are defined\n for ref_condition, ref_path in ref_conditions.items():\n if ref_condition not in conditions:\n message = 'Condition {0} is not defined.'\n matches.append(RuleMatch(\n ref_path,\n message.format(ref_condition)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/conditions/Exists.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check Base Resource Configuration\"\"\"\n id = 'E3001'\n shortdesc = 'Basic CloudFormation Resource Check'\n description = 'Making sure the basic CloudFormation resources ' + \\\n 'are properly configured'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['resources']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Resources\"\"\"\n\n matches = []\n\n valid_attributes = [\n 'CreationPolicy',\n 'DeletionPolicy',\n 'DependsOn',\n 'Metadata',\n 'UpdatePolicy',\n 'UpdateReplacePolicy',\n 'Properties',\n 'Type',\n 'Condition'\n ]\n\n valid_custom_attributes = [\n 'Version',\n 'Properties',\n 'DependsOn',\n 'Metadata',\n 'Condition',\n 'Type',\n ]\n\n resources = cfn.template.get('Resources', {})\n if not isinstance(resources, dict):\n message = 'Resource not properly configured'\n matches.append(RuleMatch(['Resources'], message))\n else:\n for resource_name, resource_values in cfn.template.get('Resources', {}).items():\n self.logger.debug('Validating resource %s base configuration', resource_name)\n if not isinstance(resource_values, dict):\n message = 'Resource not properly configured at {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n continue\n resource_type = resource_values.get('Type', '')\n check_attributes = []\n if resource_type.startswith('Custom::') or resource_type == 'AWS::CloudFormation::CustomResource':\n check_attributes = valid_custom_attributes\n else:\n check_attributes = valid_attributes\n\n for property_key, _ in resource_values.items():\n if property_key not in check_attributes:\n message = 'Invalid resource attribute {0} for resource {1}'\n matches.append(RuleMatch(\n ['Resources', resource_name, property_key],\n message.format(property_key, resource_name)))\n\n resource_type = resource_values.get('Type', '')\n if not resource_type:\n message = 'Type not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n else:\n self.logger.debug('Check resource types by region...')\n for region, specs in cfnlint.helpers.RESOURCE_SPECS.items():\n if region in cfn.regions:\n if resource_type not in specs['ResourceTypes']:\n if not resource_type.startswith(('Custom::', 'AWS::Serverless::')):\n message = 'Invalid or unsupported Type {0} for resource {1} in {2}'\n matches.append(RuleMatch(\n ['Resources', resource_name, 'Type'],\n message.format(resource_type, resource_name, region)\n ))\n\n if 'Properties' not in resource_values:\n resource_spec = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]\n if resource_type in resource_spec['ResourceTypes']:\n properties_spec = resource_spec['ResourceTypes'][resource_type]['Properties']\n # pylint: disable=len-as-condition\n if len(properties_spec) > 0:\n required = 0\n for _, property_spec in properties_spec.items():\n if property_spec.get('Required', False):\n required += 1\n if required > 0:\n if resource_type == 'AWS::CloudFormation::WaitCondition' and 'CreationPolicy' in resource_values.keys():\n self.logger.debug('Exception to required properties section as CreationPolicy is defined.')\n else:\n message = 'Properties not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/Configuration.py"}]}
| 3,146 | 442 |
gh_patches_debug_1252
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4762
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]
#### Problem Description
A clear and concise description of what the bug is.
When too many requests come simultaneously, mitmdump called an error and quited.
Traceback (most recent call last):
File "mitmdump", line 3, in <module>
File "mitmproxy\tools\main.py", line 147, in mitmdump
File "mitmproxy\tools\main.py", line 114, in run
File "mitmproxy\master.py", line 76, in run
File "mitmproxy\master.py", line 59, in run_loop
File "mitmproxy\master.py", line 95, in shutdown
File "asyncio\base_events.py", line 629, in run_until_complete
File "asyncio\base_events.py", line 596, in run_forever
File "asyncio\base_events.py", line 1854, in _run_once
File "selectors.py", line 324, in select
File "selectors.py", line 315, in _select
ValueError: too many file descriptors in select()
[77436] Failed to execute script 'mitmdump' due to unhandled exception!
I googled the error message, and found the following answer. Don't know if it's related.
https://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select
#### Steps to reproduce the behavior:
1. I use the following command
`mitmdump.exe -p 8080 --anticomp -q -s "d:\redirect-router.py"`
In the script, I re-write the host for a specific URL
2.
3.
#### System Information
Paste the output of "mitmproxy --version" here.
mitmproxy --version
Mitmproxy: 7.0.2 binary
Python: 3.9.6
OpenSSL: OpenSSL 1.1.1k 25 Mar 2021
Platform: Windows-10-10.0.18363-SP0
</issue>
<code>
[start of mitmproxy/__init__.py]
1 import asyncio
2 import sys
3
4 if sys.platform == 'win32':
5 # workaround for
6 # https://github.com/tornadoweb/tornado/issues/2751
7 # https://www.tornadoweb.org/en/stable/index.html#installation
8 # (copied multiple times in the codebase, please remove all occurrences)
9 asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
10
[end of mitmproxy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py
--- a/mitmproxy/__init__.py
+++ b/mitmproxy/__init__.py
@@ -1,9 +0,0 @@
-import asyncio
-import sys
-
-if sys.platform == 'win32':
- # workaround for
- # https://github.com/tornadoweb/tornado/issues/2751
- # https://www.tornadoweb.org/en/stable/index.html#installation
- # (copied multiple times in the codebase, please remove all occurrences)
- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
{"golden_diff": "diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py\n--- a/mitmproxy/__init__.py\n+++ b/mitmproxy/__init__.py\n@@ -1,9 +0,0 @@\n-import asyncio\n-import sys\n-\n-if sys.platform == 'win32':\n- # workaround for\n- # https://github.com/tornadoweb/tornado/issues/2751\n- # https://www.tornadoweb.org/en/stable/index.html#installation\n- # (copied multiple times in the codebase, please remove all occurrences)\n- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "issue": "When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]\n#### Problem Description\r\nA clear and concise description of what the bug is.\r\nWhen too many requests come simultaneously, mitmdump called an error and quited.\r\nTraceback (most recent call last):\r\n File \"mitmdump\", line 3, in <module>\r\n File \"mitmproxy\\tools\\main.py\", line 147, in mitmdump\r\n File \"mitmproxy\\tools\\main.py\", line 114, in run\r\n File \"mitmproxy\\master.py\", line 76, in run\r\n File \"mitmproxy\\master.py\", line 59, in run_loop\r\n File \"mitmproxy\\master.py\", line 95, in shutdown\r\n File \"asyncio\\base_events.py\", line 629, in run_until_complete\r\n File \"asyncio\\base_events.py\", line 596, in run_forever\r\n File \"asyncio\\base_events.py\", line 1854, in _run_once\r\n File \"selectors.py\", line 324, in select\r\n File \"selectors.py\", line 315, in _select\r\nValueError: too many file descriptors in select()\r\n[77436] Failed to execute script 'mitmdump' due to unhandled exception!\r\n\r\nI googled the error message, and found the following answer. Don't know if it's related.\r\nhttps://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select\r\n\r\n#### Steps to reproduce the behavior:\r\n1. I use the following command\r\n`mitmdump.exe -p 8080 --anticomp -q -s \"d:\\redirect-router.py\"`\r\nIn the script, I re-write the host for a specific URL\r\n2. \r\n3. \r\n\r\n#### System Information\r\nPaste the output of \"mitmproxy --version\" here.\r\nmitmproxy --version\r\nMitmproxy: 7.0.2 binary\r\nPython: 3.9.6\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: Windows-10-10.0.18363-SP0\n", "before_files": [{"content": "import asyncio\nimport sys\n\nif sys.platform == 'win32':\n # workaround for\n # https://github.com/tornadoweb/tornado/issues/2751\n # https://www.tornadoweb.org/en/stable/index.html#installation\n # (copied multiple times in the codebase, please remove all occurrences)\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "path": "mitmproxy/__init__.py"}]}
| 1,149 | 144 |
gh_patches_debug_30874
|
rasdani/github-patches
|
git_diff
|
fal-ai__dbt-fal-544
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add the dbt target.name as a property on the context object
**Context**
I am sending slack messages and to keep things simple I have opted for one channel. (for some given set of models, that's handled through meta anyway i.e. slack_channel_id).
However, doing this - it's hard to tell what the given environment was if you use one channel for all dbt invocations.
**Describe the solution you'd like**
I would like to be able to use context to access dbt properties such as the target.name e.g.
```
context.target.name # dev, test, prod, etc.
```
**Describe alternatives you've considered**
This can be worked around by injecting the {{ target.name }} as a meta field to a model.yml e.g:
```
version: 2
models:
- name: my_funky_model
meta:
SLACK_CHANNEL_ID: XXXXXXXXXX
TARGET: "{{ target.name }}"
fal:
scripts:
- fal_scripts/notify_slack.py
```
However this does seem redundant/laborious to add to all .yml definitions
</issue>
<code>
[start of src/fal/fal_script.py]
1 import os
2 import json
3 from typing import Dict, Any, List, Optional, Union, Callable
4 from pathlib import Path
5 from functools import partial
6 from dataclasses import dataclass, field
7 from deprecation import deprecated
8
9 from faldbt.parse import normalize_path
10 from faldbt.project import DbtModel, FalDbt
11
12 from dbt.contracts.results import RunStatus
13 from dbt.config.runtime import RuntimeConfig
14 from fal.logger import LOGGER
15
16 from dbt.contracts.graph.parsed import ColumnInfo
17
18
19 class Hook:
20 path: str
21 arguments: Dict[str, Any]
22
23
24 @dataclass
25 class LocalHook(Hook):
26 path: str
27 arguments: Dict[str, Any] = field(default_factory=dict)
28
29
30 @dataclass
31 class IsolatedHook(Hook):
32 path: str
33 environment_name: str
34 arguments: Dict[str, Any] = field(default_factory=dict)
35
36
37 def _is_local_environment(environment_name: str) -> None:
38 return environment_name == "local"
39
40
41 def create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook:
42 if isinstance(raw_hook, str):
43 raw_hook = {"path": raw_hook}
44
45 if not isinstance(raw_hook, dict):
46 raise ValueError(f"Unrecognized hook value: {raw_hook}")
47
48 if "path" not in raw_hook:
49 raise ValueError(f"A hook must specify path.")
50
51 environment_name = raw_hook.get("environment", default_environment_name)
52 if environment_name and not _is_local_environment(environment_name):
53 return IsolatedHook(
54 raw_hook["path"],
55 environment_name,
56 raw_hook.get("with", {}),
57 )
58 else:
59 return LocalHook(raw_hook["path"], raw_hook.get("with", {}))
60
61
62 @dataclass
63 class CurrentAdapterResponse:
64 message: str
65 code: Optional[str]
66 rows_affected: Optional[int]
67
68
69 @dataclass
70 class CurrentModel:
71 name: str
72 alias: str
73 status: RunStatus
74 columns: Dict[str, ColumnInfo]
75 tests: List[Any]
76 meta: Dict[Any, Any]
77 adapter_response: Optional[CurrentAdapterResponse]
78
79
80 @dataclass
81 class CurrentTest:
82 name: str
83 model_name: str
84 column: str
85 status: str
86
87 @property
88 @deprecated(details="Use 'model_name' instead")
89 def modelname(self):
90 return self.model_name
91
92
93 @dataclass
94 class ContextConfig:
95 target_path: Path
96
97 def __init__(self, config: RuntimeConfig):
98 self.target_path = Path(
99 os.path.realpath(os.path.join(config.project_root, config.target_path))
100 )
101
102
103 @dataclass
104 class Context:
105 current_model: Union[CurrentModel, None]
106 config: ContextConfig
107 _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)
108
109 @property
110 def arguments(self) -> Dict[str, Any]:
111 if self._arguments is None:
112 raise ValueError(
113 "'context.arguments' is only accessible from hooks, "
114 "not from scripts/models"
115 )
116 return self._arguments
117
118
119 @dataclass(frozen=True, init=False)
120 class FalScript:
121 model: Optional[DbtModel]
122 path: Path
123 faldbt: FalDbt
124 hook_arguments: Optional[Dict[str, Any]]
125 is_hook: bool
126
127 def __init__(
128 self,
129 faldbt: FalDbt,
130 model: Optional[DbtModel],
131 path: str,
132 hook_arguments: Optional[Dict[str, Any]] = None,
133 is_hook: bool = False,
134 ):
135 # Necessary because of frozen=True
136 object.__setattr__(self, "model", model)
137 object.__setattr__(self, "path", normalize_path(faldbt.scripts_dir, path))
138 object.__setattr__(self, "faldbt", faldbt)
139 object.__setattr__(self, "hook_arguments", hook_arguments)
140 object.__setattr__(self, "is_hook", is_hook)
141
142 @classmethod
143 def from_hook(cls, faldbt: FalDbt, model: DbtModel, hook: Hook):
144 """
145 Creates a FalScript from a hook
146 """
147 assert isinstance(hook, LocalHook)
148 return cls(
149 faldbt=faldbt,
150 model=model,
151 path=hook.path,
152 hook_arguments=hook.arguments,
153 is_hook=True,
154 )
155
156 @classmethod
157 def model_script(cls, faldbt: FalDbt, model: DbtModel):
158 script = FalScript(faldbt, model, "")
159 # HACK: Set the script path specially for this case
160 object.__setattr__(script, "path", model.python_model)
161 return script
162
163 def exec(self):
164 """
165 Executes the script
166 """
167 # Enable local imports
168 try:
169 source_code = python_from_file(self.path)
170 program = compile(source_code, self.path, "exec")
171
172 exec_globals = {
173 "__name__": "__main__",
174 "context": self._build_script_context(),
175 "ref": self.faldbt.ref,
176 "source": self.faldbt.source,
177 "write_to_firestore": self.faldbt.write_to_firestore,
178 "list_models": self.faldbt.list_models,
179 "list_models_ids": self.faldbt.list_models_ids,
180 "list_sources": self.faldbt.list_sources,
181 "list_features": self.faldbt.list_features,
182 "el": self.faldbt.el,
183 "execute_sql": self.faldbt.execute_sql,
184 }
185
186 if not self.is_hook:
187 exec_globals["write_to_source"] = self.faldbt.write_to_source
188
189 if self.model is not None:
190 # Hard-wire the model
191 exec_globals["write_to_model"] = partial(
192 self.faldbt.write_to_model,
193 target_1=self.model.name,
194 target_2=None,
195 )
196
197 else:
198 exec_globals["write_to_source"] = _not_allowed_function_maker(
199 "write_to_source"
200 )
201 exec_globals["write_to_model"] = _not_allowed_function_maker(
202 "write_to_model"
203 )
204 exec(program, exec_globals)
205 finally:
206 pass
207
208 @property
209 def relative_path(self):
210 if self.is_model:
211 return self.path.relative_to(self.faldbt.project_dir)
212 else:
213 return self.path.relative_to(self.faldbt.scripts_dir)
214
215 @property
216 def id(self):
217 if self.is_model:
218 return f"(model: {self.relative_path})"
219 else:
220 return f"({self.model_name}, {self.relative_path})"
221
222 @property
223 def is_global(self):
224 return self.model is None
225
226 @property
227 def is_model(self):
228 if self.model is not None and self.model.python_model is not None:
229 return self.model.python_model == self.path
230
231 @property
232 def model_name(self):
233 return "<GLOBAL>" if self.is_global else self.model.name # type: ignore
234
235 def _build_script_context(self) -> Context:
236 context_config = ContextConfig(self.faldbt._config)
237 if self.is_global:
238 return Context(current_model=None, config=context_config)
239
240 model: DbtModel = self.model # type: ignore
241
242 meta = model.meta or {}
243 _del_key(meta, self.faldbt.keyword)
244
245 tests = _process_tests(model.tests)
246
247 current_adapter_response = None
248 if model.adapter_response:
249 current_adapter_response = CurrentAdapterResponse(
250 message=str(model.adapter_response),
251 code=model.adapter_response.code,
252 rows_affected=model.adapter_response.rows_affected,
253 )
254
255 current_model = CurrentModel(
256 name=model.name,
257 alias=model.alias,
258 status=model.status,
259 columns=model.columns,
260 tests=tests,
261 meta=meta,
262 adapter_response=current_adapter_response,
263 )
264
265 return Context(
266 current_model=current_model,
267 config=context_config,
268 _arguments=self.hook_arguments,
269 )
270
271
272 def _del_key(dict: Dict[str, Any], key: str):
273 try:
274 del dict[key]
275 except KeyError:
276 pass
277
278
279 def _process_tests(tests: List[Any]):
280 return list(
281 map(
282 lambda test: CurrentTest(
283 name=test.name,
284 column=test.column,
285 status=test.status,
286 model_name=test.model,
287 ),
288 tests,
289 )
290 )
291
292
293 def python_from_file(path: Path) -> str:
294 with open(path) as file:
295 raw_source_code = file.read()
296 if path.suffix == ".ipynb":
297 raw_source_code = _process_ipynb(raw_source_code)
298 return raw_source_code
299
300
301 def _process_ipynb(raw_source_code: str) -> str:
302 def strip_magic(source: List[str]) -> List[str]:
303 NOTEBOOK_LIB = "faldbt.magics"
304 return [item for item in source if item[0] != "%" and NOTEBOOK_LIB not in item]
305
306 ipynb_struct = json.loads(raw_source_code)
307
308 script_list = []
309 for cell in ipynb_struct["cells"]:
310 if cell["cell_type"] == "code":
311 source = strip_magic(cell["source"])
312 script_list.append("".join(source))
313
314 joined_script = "\n #cell \n".join(script_list)
315
316 LOGGER.debug(f"Joined .ipynb cells to:\n{joined_script}")
317
318 return joined_script
319
320
321 def _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]:
322 def not_allowed_function(*args, **kwargs):
323 raise Exception(
324 (
325 f"{function_name} is not allowed in hooks."
326 " Consider using a Python model."
327 )
328 )
329
330 return not_allowed_function
331
[end of src/fal/fal_script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py
--- a/src/fal/fal_script.py
+++ b/src/fal/fal_script.py
@@ -99,11 +99,21 @@
os.path.realpath(os.path.join(config.project_root, config.target_path))
)
+@dataclass
+class ContextTarget:
+ def __init__(self, config: RuntimeConfig):
+ self.profile_name = config.profile_name
+ self.name = config.target_name
+ self.threads = config.threads
+ self.type = config.credentials.type
+ self.database = config.credentials.database
+ self.schema = config.credentials.schema
@dataclass
class Context:
current_model: Union[CurrentModel, None]
config: ContextConfig
+ target: ContextTarget
_arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)
@property
@@ -233,9 +243,12 @@
return "<GLOBAL>" if self.is_global else self.model.name # type: ignore
def _build_script_context(self) -> Context:
- context_config = ContextConfig(self.faldbt._config)
+ config: RuntimeConfig = self.faldbt._config
+ context_config = ContextConfig(config)
+ target = ContextTarget(config)
+
if self.is_global:
- return Context(current_model=None, config=context_config)
+ return Context(current_model=None, target=target, config=context_config)
model: DbtModel = self.model # type: ignore
@@ -264,6 +277,7 @@
return Context(
current_model=current_model,
+ target=target,
config=context_config,
_arguments=self.hook_arguments,
)
|
{"golden_diff": "diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py\n--- a/src/fal/fal_script.py\n+++ b/src/fal/fal_script.py\n@@ -99,11 +99,21 @@\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n \n+@dataclass\n+class ContextTarget:\n+ def __init__(self, config: RuntimeConfig):\n+ self.profile_name = config.profile_name\n+ self.name = config.target_name\n+ self.threads = config.threads\n+ self.type = config.credentials.type\n+ self.database = config.credentials.database\n+ self.schema = config.credentials.schema\n \n @dataclass\n class Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n+ target: ContextTarget\n _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)\n \n @property\n@@ -233,9 +243,12 @@\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n \n def _build_script_context(self) -> Context:\n- context_config = ContextConfig(self.faldbt._config)\n+ config: RuntimeConfig = self.faldbt._config\n+ context_config = ContextConfig(config)\n+ target = ContextTarget(config)\n+\n if self.is_global:\n- return Context(current_model=None, config=context_config)\n+ return Context(current_model=None, target=target, config=context_config)\n \n model: DbtModel = self.model # type: ignore\n \n@@ -264,6 +277,7 @@\n \n return Context(\n current_model=current_model,\n+ target=target,\n config=context_config,\n _arguments=self.hook_arguments,\n )\n", "issue": "Add the dbt target.name as a property on the context object\n**Context**\r\nI am sending slack messages and to keep things simple I have opted for one channel. (for some given set of models, that's handled through meta anyway i.e. slack_channel_id). \r\n\r\nHowever, doing this - it's hard to tell what the given environment was if you use one channel for all dbt invocations.\r\n\r\n**Describe the solution you'd like**\r\nI would like to be able to use context to access dbt properties such as the target.name e.g.\r\n\r\n```\r\n context.target.name # dev, test, prod, etc.\r\n```\r\n**Describe alternatives you've considered**\r\nThis can be worked around by injecting the {{ target.name }} as a meta field to a model.yml e.g:\r\n\r\n```\r\nversion: 2\r\n\r\nmodels:\r\n - name: my_funky_model\r\n meta:\r\n SLACK_CHANNEL_ID: XXXXXXXXXX\r\n TARGET: \"{{ target.name }}\"\r\n fal:\r\n scripts:\r\n - fal_scripts/notify_slack.py\r\n```\r\n\r\nHowever this does seem redundant/laborious to add to all .yml definitions \n", "before_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union, Callable\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass, field\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom fal.logger import LOGGER\n\nfrom dbt.contracts.graph.parsed import ColumnInfo\n\n\nclass Hook:\n path: str\n arguments: Dict[str, Any]\n\n\n@dataclass\nclass LocalHook(Hook):\n path: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\n@dataclass\nclass IsolatedHook(Hook):\n path: str\n environment_name: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\ndef _is_local_environment(environment_name: str) -> None:\n return environment_name == \"local\"\n\n\ndef create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook:\n if isinstance(raw_hook, str):\n raw_hook = {\"path\": raw_hook}\n\n if not isinstance(raw_hook, dict):\n raise ValueError(f\"Unrecognized hook value: {raw_hook}\")\n\n if \"path\" not in raw_hook:\n raise ValueError(f\"A hook must specify path.\")\n\n environment_name = raw_hook.get(\"environment\", default_environment_name)\n if environment_name and not _is_local_environment(environment_name):\n return IsolatedHook(\n raw_hook[\"path\"],\n environment_name,\n raw_hook.get(\"with\", {}),\n )\n else:\n return LocalHook(raw_hook[\"path\"], raw_hook.get(\"with\", {}))\n\n\n@dataclass\nclass CurrentAdapterResponse:\n message: str\n code: Optional[str]\n rows_affected: Optional[int]\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n adapter_response: Optional[CurrentAdapterResponse]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)\n\n @property\n def arguments(self) -> Dict[str, Any]:\n if self._arguments is None:\n raise ValueError(\n \"'context.arguments' is only accessible from hooks, \"\n \"not from scripts/models\"\n )\n return self._arguments\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n faldbt: FalDbt\n hook_arguments: Optional[Dict[str, Any]]\n is_hook: bool\n\n def __init__(\n self,\n faldbt: FalDbt,\n model: Optional[DbtModel],\n path: str,\n hook_arguments: Optional[Dict[str, Any]] = None,\n is_hook: bool = False,\n ):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"faldbt\", faldbt)\n object.__setattr__(self, \"hook_arguments\", hook_arguments)\n object.__setattr__(self, \"is_hook\", is_hook)\n\n @classmethod\n def from_hook(cls, faldbt: FalDbt, model: DbtModel, hook: Hook):\n \"\"\"\n Creates a FalScript from a hook\n \"\"\"\n assert isinstance(hook, LocalHook)\n return cls(\n faldbt=faldbt,\n model=model,\n path=hook.path,\n hook_arguments=hook.arguments,\n is_hook=True,\n )\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": self.faldbt.ref,\n \"source\": self.faldbt.source,\n \"write_to_firestore\": self.faldbt.write_to_firestore,\n \"list_models\": self.faldbt.list_models,\n \"list_models_ids\": self.faldbt.list_models_ids,\n \"list_sources\": self.faldbt.list_sources,\n \"list_features\": self.faldbt.list_features,\n \"el\": self.faldbt.el,\n \"execute_sql\": self.faldbt.execute_sql,\n }\n\n if not self.is_hook:\n exec_globals[\"write_to_source\"] = self.faldbt.write_to_source\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n self.faldbt.write_to_model,\n target_1=self.model.name,\n target_2=None,\n )\n\n else:\n exec_globals[\"write_to_source\"] = _not_allowed_function_maker(\n \"write_to_source\"\n )\n exec_globals[\"write_to_model\"] = _not_allowed_function_maker(\n \"write_to_model\"\n )\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def relative_path(self):\n if self.is_model:\n return self.path.relative_to(self.faldbt.project_dir)\n else:\n return self.path.relative_to(self.faldbt.scripts_dir)\n\n @property\n def id(self):\n if self.is_model:\n return f\"(model: {self.relative_path})\"\n else:\n return f\"({self.model_name}, {self.relative_path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def is_model(self):\n if self.model is not None and self.model.python_model is not None:\n return self.model.python_model == self.path\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self) -> Context:\n context_config = ContextConfig(self.faldbt._config)\n if self.is_global:\n return Context(current_model=None, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta or {}\n _del_key(meta, self.faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_adapter_response = None\n if model.adapter_response:\n current_adapter_response = CurrentAdapterResponse(\n message=str(model.adapter_response),\n code=model.adapter_response.code,\n rows_affected=model.adapter_response.rows_affected,\n )\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n adapter_response=current_adapter_response,\n )\n\n return Context(\n current_model=current_model,\n config=context_config,\n _arguments=self.hook_arguments,\n )\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n LOGGER.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n\n\ndef _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]:\n def not_allowed_function(*args, **kwargs):\n raise Exception(\n (\n f\"{function_name} is not allowed in hooks.\"\n \" Consider using a Python model.\"\n )\n )\n\n return not_allowed_function\n", "path": "src/fal/fal_script.py"}]}
| 3,800 | 394 |
gh_patches_debug_23336
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-286
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tqm_logger: metric_names is currently not optional
Hi,
https://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75
This line should be modified to make `metric_names` optional. Here is a suggestion:
```
if metric_names is not None and not isinstance(metric_names, list):
raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
```
Thanks
</issue>
<code>
[start of ignite/contrib/handlers/tqdm_logger.py]
1 try:
2 from tqdm import tqdm
3 except ImportError:
4 raise RuntimeError("This contrib module requires tqdm to be installed")
5
6 from ignite.engine import Events
7
8
9 class ProgressBar:
10 """
11 TQDM progress bar handler to log training progress and computed metrics.
12
13 Examples:
14
15 Create a progress bar that shows you some metrics as they are computed,
16 by simply attaching the progress bar object to your engine.
17
18 .. code-block:: python
19
20 pbar = ProgressBar()
21 pbar.attach(trainer, ['loss'])
22
23 Note:
24 When adding attaching the progress bar to an engine, it is recommend that you replace
25 every print operation in the engine's handlers triggered every iteration with
26 ``pbar.log_message`` to guarantee the correct format of the stdout.
27 """
28
29 def __init__(self):
30 self.pbar = None
31
32 def _reset(self, engine):
33 self.pbar = tqdm(
34 total=len(engine.state.dataloader),
35 leave=False,
36 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')
37
38 def _close(self, engine):
39 self.pbar.close()
40 self.pbar = None
41
42 def _update(self, engine, metric_names=None):
43 if self.pbar is None:
44 self._reset(engine)
45
46 self.pbar.set_description('Epoch {}'.format(engine.state.epoch))
47
48 if metric_names is not None:
49 if not all(metric in engine.state.metrics for metric in metric_names):
50 raise KeyError("metrics not found in engine.state.metrics")
51
52 metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}
53 self.pbar.set_postfix(**metrics)
54
55 self.pbar.update()
56
57 @staticmethod
58 def log_message(message):
59 """
60 Logs a message, preserving the progress bar correct output format
61
62 Args:
63 message (str): string you wish to log
64 """
65 tqdm.write(message)
66
67 def attach(self, engine, metric_names=None):
68 """
69 Attaches the progress bar to an engine object
70
71 Args:
72 engine (Engine): engine object
73 metric_names (list): (Optional) list of the metrics names to log as the bar progresses
74 """
75 if not isinstance(metric_names, list):
76 raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
77
78 engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)
79 engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)
80
[end of ignite/contrib/handlers/tqdm_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -47,6 +47,7 @@
if metric_names is not None:
if not all(metric in engine.state.metrics for metric in metric_names):
+ self._close(engine)
raise KeyError("metrics not found in engine.state.metrics")
metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}
@@ -72,7 +73,7 @@
engine (Engine): engine object
metric_names (list): (Optional) list of the metrics names to log as the bar progresses
"""
- if not isinstance(metric_names, list):
+ if metric_names is not None and not isinstance(metric_names, list):
raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names)))
engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)
|
{"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -47,6 +47,7 @@\n \n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n+ self._close(engine)\n raise KeyError(\"metrics not found in engine.state.metrics\")\n \n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n@@ -72,7 +73,7 @@\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n- if not isinstance(metric_names, list):\n+ if metric_names is not None and not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n \n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n", "issue": "tqm_logger: metric_names is currently not optional \nHi,\r\n\r\nhttps://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75\r\nThis line should be modified to make `metric_names` optional. Here is a suggestion:\r\n```\r\nif metric_names is not None and not isinstance(metric_names, list):\r\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\r\n```\r\n\r\nThanks\n", "before_files": [{"content": "try:\n from tqdm import tqdm\nexcept ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed\")\n\nfrom ignite.engine import Events\n\n\nclass ProgressBar:\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Examples:\n\n Create a progress bar that shows you some metrics as they are computed,\n by simply attaching the progress bar object to your engine.\n\n .. code-block:: python\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n \"\"\"\n\n def __init__(self):\n self.pbar = None\n\n def _reset(self, engine):\n self.pbar = tqdm(\n total=len(engine.state.dataloader),\n leave=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n def _update(self, engine, metric_names=None):\n if self.pbar is None:\n self._reset(engine)\n\n self.pbar.set_description('Epoch {}'.format(engine.state.epoch))\n\n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n raise KeyError(\"metrics not found in engine.state.metrics\")\n\n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n self.pbar.set_postfix(**metrics)\n\n self.pbar.update()\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format\n\n Args:\n message (str): string you wish to log\n \"\"\"\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None):\n \"\"\"\n Attaches the progress bar to an engine object\n\n Args:\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n if not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n\n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]}
| 1,351 | 244 |
gh_patches_debug_60627
|
rasdani/github-patches
|
git_diff
|
CTPUG__wafer-111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error
As seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with
"Specifying both 'fields' and 'form_class' is not permitted."
ImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted.
</issue>
<code>
[start of wafer/pages/views.py]
1 from django.http import Http404
2 from django.core.exceptions import PermissionDenied
3 from django.views.generic import DetailView, TemplateView, UpdateView
4
5 from wafer.pages.models import Page
6 from wafer.pages.forms import PageForm
7
8
9 class ShowPage(DetailView):
10 template_name = 'wafer.pages/page.html'
11 model = Page
12
13
14 class EditPage(UpdateView):
15 template_name = 'wafer.pages/page_form.html'
16 model = Page
17 form_class = PageForm
18 fields = ['name', 'content']
19
20
21 def slug(request, url):
22 """Look up a page by url (which is a tree of slugs)"""
23 page = None
24 for slug in url.split('/'):
25 if not slug:
26 continue
27 try:
28 page = Page.objects.get(slug=slug, parent=page)
29 except Page.DoesNotExist:
30 raise Http404
31
32 if page is None:
33 try:
34 page = Page.objects.get(slug='index')
35 except Page.DoesNotExist:
36 return TemplateView.as_view(
37 template_name='wafer/index.html')(request)
38
39 if 'edit' in request.GET.keys():
40 if not request.user.has_perm('pages.change_page'):
41 raise PermissionDenied
42 return EditPage.as_view()(request, pk=page.id)
43
44 return ShowPage.as_view()(request, pk=page.id)
45
[end of wafer/pages/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wafer/pages/views.py b/wafer/pages/views.py
--- a/wafer/pages/views.py
+++ b/wafer/pages/views.py
@@ -15,7 +15,6 @@
template_name = 'wafer.pages/page_form.html'
model = Page
form_class = PageForm
- fields = ['name', 'content']
def slug(request, url):
|
{"golden_diff": "diff --git a/wafer/pages/views.py b/wafer/pages/views.py\n--- a/wafer/pages/views.py\n+++ b/wafer/pages/views.py\n@@ -15,7 +15,6 @@\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n- fields = ['name', 'content']\n \n \n def slug(request, url):\n", "issue": "Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error\nAs seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with\n\n\"Specifying both 'fields' and 'form_class' is not permitted.\"\nImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted.\n\n", "before_files": [{"content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n fields = ['name', 'content']\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py"}]}
| 985 | 89 |
gh_patches_debug_38612
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-903
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PORT] Updated MicrosoftGovernmentAppCredentials to support Skills in Azure Gov
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/3353
Fixes https://github.com/microsoft/botbuilder-dotnet/issues/3233
Added constructor to MicrosoftGovernmentAppCredentials that takes OAuthScope to support skills in gov.
Updated BotFrameworkHttpClient and BotFrameworkAdapter to pass the OAuthScope to MicrosoftGovernmentAppCredentials
Added SimpleBotToBot functional test for testing.
Added Microsoft.Bot.Framework.Skills.sln to load skills test projects.
# Changed projects
* Microsoft.Bot.Builder
* Microsoft.Bot.Connector
* integration
* Microsoft.Bot.Connector.Tests
[Skills]
</issue>
<code>
[start of libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from botframework.connector.auth import MicrosoftAppCredentials, GovernmentConstants
5
6
7 class MicrosoftGovernmentAppCredentials(MicrosoftAppCredentials):
8 """
9 MicrosoftGovernmentAppCredentials auth implementation.
10 """
11
12 def __init__(
13 self,
14 app_id: str,
15 app_password: str,
16 channel_auth_tenant: str = None,
17 scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,
18 ):
19 super().__init__(app_id, app_password, channel_auth_tenant, scope)
20 self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL
21
22 @staticmethod
23 def empty():
24 return MicrosoftGovernmentAppCredentials("", "")
25
[end of libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py]
[start of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 # pylint: disable=no-member
4
5 import json
6 from typing import Dict
7 from logging import Logger
8
9 import aiohttp
10 from botbuilder.core import InvokeResponse
11 from botbuilder.core.skills import BotFrameworkClient
12 from botbuilder.schema import (
13 Activity,
14 ExpectedReplies,
15 ConversationReference,
16 ConversationAccount,
17 )
18 from botframework.connector.auth import (
19 ChannelProvider,
20 CredentialProvider,
21 GovernmentConstants,
22 MicrosoftAppCredentials,
23 )
24
25
26 class BotFrameworkHttpClient(BotFrameworkClient):
27
28 """
29 A skill host adapter implements API to forward activity to a skill and
30 implements routing ChannelAPI calls from the Skill up through the bot/adapter.
31 """
32
33 INVOKE_ACTIVITY_NAME = "SkillEvents.ChannelApiInvoke"
34 _BOT_IDENTITY_KEY = "BotIdentity"
35 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}
36
37 def __init__(
38 self,
39 credential_provider: CredentialProvider,
40 channel_provider: ChannelProvider = None,
41 logger: Logger = None,
42 ):
43 if not credential_provider:
44 raise TypeError("credential_provider can't be None")
45
46 self._credential_provider = credential_provider
47 self._channel_provider = channel_provider
48 self._logger = logger
49 self._session = aiohttp.ClientSession()
50
51 async def post_activity(
52 self,
53 from_bot_id: str,
54 to_bot_id: str,
55 to_url: str,
56 service_url: str,
57 conversation_id: str,
58 activity: Activity,
59 ) -> InvokeResponse:
60 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)
61
62 if not app_credentials:
63 raise KeyError("Unable to get appCredentials to connect to the skill")
64
65 # Get token for the skill call
66 token = (
67 app_credentials.get_access_token()
68 if app_credentials.microsoft_app_id
69 else None
70 )
71
72 # Capture current activity settings before changing them.
73 # TODO: DO we need to set the activity ID? (events that are created manually don't have it).
74 original_conversation_id = activity.conversation.id
75 original_service_url = activity.service_url
76 original_caller_id = activity.caller_id
77 original_relates_to = activity.relates_to
78
79 try:
80 # TODO: The relato has to be ported to the adapter in the new integration library when
81 # resolving conflicts in merge
82 activity.relates_to = ConversationReference(
83 service_url=activity.service_url,
84 activity_id=activity.id,
85 channel_id=activity.channel_id,
86 conversation=ConversationAccount(
87 id=activity.conversation.id,
88 name=activity.conversation.name,
89 conversation_type=activity.conversation.conversation_type,
90 aad_object_id=activity.conversation.aad_object_id,
91 is_group=activity.conversation.is_group,
92 role=activity.conversation.role,
93 tenant_id=activity.conversation.tenant_id,
94 properties=activity.conversation.properties,
95 ),
96 bot=None,
97 )
98 activity.conversation.id = conversation_id
99 activity.service_url = service_url
100 activity.caller_id = f"urn:botframework:aadappid:{from_bot_id}"
101
102 headers_dict = {
103 "Content-type": "application/json; charset=utf-8",
104 }
105 if token:
106 headers_dict.update(
107 {"Authorization": f"Bearer {token}",}
108 )
109
110 json_content = json.dumps(activity.serialize())
111 resp = await self._session.post(
112 to_url, data=json_content.encode("utf-8"), headers=headers_dict,
113 )
114 resp.raise_for_status()
115 data = (await resp.read()).decode()
116 content = json.loads(data) if data else None
117
118 return InvokeResponse(status=resp.status, body=content)
119
120 finally:
121 # Restore activity properties.
122 activity.conversation.id = original_conversation_id
123 activity.service_url = original_service_url
124 activity.caller_id = original_caller_id
125 activity.relates_to = original_relates_to
126
127 async def post_buffered_activity(
128 self,
129 from_bot_id: str,
130 to_bot_id: str,
131 to_url: str,
132 service_url: str,
133 conversation_id: str,
134 activity: Activity,
135 ) -> [Activity]:
136 """
137 Helper method to return a list of activities when an Activity is being
138 sent with DeliveryMode == expectReplies.
139 """
140 response = await self.post_activity(
141 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity
142 )
143 if not response or (response.status / 100) != 2:
144 return []
145 return ExpectedReplies().deserialize(response.body).activities
146
147 async def _get_app_credentials(
148 self, app_id: str, oauth_scope: str
149 ) -> MicrosoftAppCredentials:
150 if not app_id:
151 return MicrosoftAppCredentials(None, None)
152
153 cache_key = f"{app_id}{oauth_scope}"
154 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)
155
156 if app_credentials:
157 return app_credentials
158
159 app_password = await self._credential_provider.get_app_password(app_id)
160 app_credentials = MicrosoftAppCredentials(
161 app_id, app_password, oauth_scope=oauth_scope
162 )
163 if self._channel_provider and self._channel_provider.is_government():
164 app_credentials.oauth_endpoint = (
165 GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL
166 )
167 app_credentials.oauth_scope = (
168 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
169 )
170
171 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials
172 return app_credentials
173
[end of libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py
@@ -18,8 +18,9 @@
from botframework.connector.auth import (
ChannelProvider,
CredentialProvider,
- GovernmentConstants,
MicrosoftAppCredentials,
+ AppCredentials,
+ MicrosoftGovernmentAppCredentials,
)
@@ -146,27 +147,26 @@
async def _get_app_credentials(
self, app_id: str, oauth_scope: str
- ) -> MicrosoftAppCredentials:
+ ) -> AppCredentials:
if not app_id:
- return MicrosoftAppCredentials(None, None)
+ return MicrosoftAppCredentials.empty()
+ # in the cache?
cache_key = f"{app_id}{oauth_scope}"
app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)
-
if app_credentials:
return app_credentials
+ # create a new AppCredentials
app_password = await self._credential_provider.get_app_password(app_id)
- app_credentials = MicrosoftAppCredentials(
- app_id, app_password, oauth_scope=oauth_scope
+
+ app_credentials = (
+ MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)
+ if self._credential_provider and self._channel_provider.is_government()
+ else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)
)
- if self._channel_provider and self._channel_provider.is_government():
- app_credentials.oauth_endpoint = (
- GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL
- )
- app_credentials.oauth_scope = (
- GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
- )
+ # put it in the cache
BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials
+
return app_credentials
diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py
--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py
@@ -14,10 +14,13 @@
app_id: str,
app_password: str,
channel_auth_tenant: str = None,
- scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,
+ scope: str = None,
):
super().__init__(app_id, app_password, channel_auth_tenant, scope)
self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL
+ self.oauth_scope = (
+ scope if scope else GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
+ )
@staticmethod
def empty():
|
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -18,8 +18,9 @@\n from botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n- GovernmentConstants,\n MicrosoftAppCredentials,\n+ AppCredentials,\n+ MicrosoftGovernmentAppCredentials,\n )\n \n \n@@ -146,27 +147,26 @@\n \n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n- ) -> MicrosoftAppCredentials:\n+ ) -> AppCredentials:\n if not app_id:\n- return MicrosoftAppCredentials(None, None)\n+ return MicrosoftAppCredentials.empty()\n \n+ # in the cache?\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n-\n if app_credentials:\n return app_credentials\n \n+ # create a new AppCredentials\n app_password = await self._credential_provider.get_app_password(app_id)\n- app_credentials = MicrosoftAppCredentials(\n- app_id, app_password, oauth_scope=oauth_scope\n+\n+ app_credentials = (\n+ MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)\n+ if self._credential_provider and self._channel_provider.is_government()\n+ else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)\n )\n- if self._channel_provider and self._channel_provider.is_government():\n- app_credentials.oauth_endpoint = (\n- GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n- )\n- app_credentials.oauth_scope = (\n- GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n- )\n \n+ # put it in the cache\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n+\n return app_credentials\ndiff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n@@ -14,10 +14,13 @@\n app_id: str,\r\n app_password: str,\r\n channel_auth_tenant: str = None,\r\n- scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,\r\n+ scope: str = None,\r\n ):\r\n super().__init__(app_id, app_password, channel_auth_tenant, scope)\r\n self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\r\n+ self.oauth_scope = (\r\n+ scope if scope else GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\r\n+ )\r\n \r\n @staticmethod\r\n def empty():\n", "issue": "[PORT] Updated MicrosoftGovernmentAppCredentials to support Skills in Azure Gov\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3353\n\nFixes https://github.com/microsoft/botbuilder-dotnet/issues/3233\r\n\r\nAdded constructor to MicrosoftGovernmentAppCredentials that takes OAuthScope to support skills in gov.\r\nUpdated BotFrameworkHttpClient and BotFrameworkAdapter to pass the OAuthScope to MicrosoftGovernmentAppCredentials\r\nAdded SimpleBotToBot functional test for testing.\r\nAdded Microsoft.Bot.Framework.Skills.sln to load skills test projects.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Connector\r\n* integration\r\n* Microsoft.Bot.Connector.Tests\r\n\r\n[Skills]\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom botframework.connector.auth import MicrosoftAppCredentials, GovernmentConstants\r\n\r\n\r\nclass MicrosoftGovernmentAppCredentials(MicrosoftAppCredentials):\r\n \"\"\"\r\n MicrosoftGovernmentAppCredentials auth implementation.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n app_password: str,\r\n channel_auth_tenant: str = None,\r\n scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,\r\n ):\r\n super().__init__(app_id, app_password, channel_auth_tenant, scope)\r\n self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\r\n\r\n @staticmethod\r\n def empty():\r\n return MicrosoftGovernmentAppCredentials(\"\", \"\")\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n GovernmentConstants,\n MicrosoftAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> MicrosoftAppCredentials:\n if not app_id:\n return MicrosoftAppCredentials(None, None)\n\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n\n if app_credentials:\n return app_credentials\n\n app_password = await self._credential_provider.get_app_password(app_id)\n app_credentials = MicrosoftAppCredentials(\n app_id, app_password, oauth_scope=oauth_scope\n )\n if self._channel_provider and self._channel_provider.is_government():\n app_credentials.oauth_endpoint = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n )\n app_credentials.oauth_scope = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}]}
| 2,597 | 713 |
gh_patches_debug_19417
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1796
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
JupyterViz: Changing simulation parameters shouldn't automatically start the simulation
**Describe the bug**
Currently, changing the parameters auto-play the simulation.
**Expected behavior**
It shouldn't.
**To Reproduce**
Run any `JupyterViz` simulation.
</issue>
<code>
[start of mesa/experimental/jupyter_viz.py]
1 import threading
2
3 import matplotlib.pyplot as plt
4 import networkx as nx
5 import reacton.ipywidgets as widgets
6 import solara
7 from matplotlib.figure import Figure
8 from matplotlib.ticker import MaxNLocator
9
10 import mesa
11
12 # Avoid interactive backend
13 plt.switch_backend("agg")
14
15
16 @solara.component
17 def JupyterViz(
18 model_class,
19 model_params,
20 measures=None,
21 name="Mesa Model",
22 agent_portrayal=None,
23 space_drawer="default",
24 play_interval=400,
25 ):
26 """Initialize a component to visualize a model.
27 Args:
28 model_class: class of the model to instantiate
29 model_params: parameters for initializing the model
30 measures: list of callables or data attributes to plot
31 name: name for display
32 agent_portrayal: options for rendering agents (dictionary)
33 space_drawer: method to render the agent space for
34 the model; default implementation is :meth:`make_space`;
35 simulations with no space to visualize should
36 specify `space_drawer=False`
37 play_interval: play interval (default: 400)
38 """
39
40 current_step, set_current_step = solara.use_state(0)
41
42 # 1. Set up model parameters
43 user_params, fixed_params = split_model_params(model_params)
44 model_parameters, set_model_parameters = solara.use_state(
45 {**fixed_params, **{k: v["value"] for k, v in user_params.items()}}
46 )
47
48 # 2. Set up Model
49 def make_model():
50 model = model_class(**model_parameters)
51 set_current_step(0)
52 return model
53
54 reset_counter = solara.use_reactive(0)
55 model = solara.use_memo(
56 make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]
57 )
58
59 def handle_change_model_params(name: str, value: any):
60 set_model_parameters({**model_parameters, name: value})
61
62 # 3. Set up UI
63 solara.Markdown(name)
64 UserInputs(user_params, on_change=handle_change_model_params)
65 ModelController(model, play_interval, current_step, set_current_step, reset_counter)
66
67 with solara.GridFixed(columns=2):
68 # 4. Space
69 if space_drawer == "default":
70 # draw with the default implementation
71 make_space(model, agent_portrayal)
72 elif space_drawer:
73 # if specified, draw agent space with an alternate renderer
74 space_drawer(model, agent_portrayal)
75 # otherwise, do nothing (do not draw space)
76
77 # 5. Plots
78 for measure in measures:
79 if callable(measure):
80 # Is a custom object
81 measure(model)
82 else:
83 make_plot(model, measure)
84
85
86 @solara.component
87 def ModelController(
88 model, play_interval, current_step, set_current_step, reset_counter
89 ):
90 playing = solara.use_reactive(False)
91 thread = solara.use_reactive(None)
92
93 def on_value_play(change):
94 if model.running:
95 do_step()
96 else:
97 playing.value = False
98
99 def do_step():
100 model.step()
101 set_current_step(model.schedule.steps)
102
103 def do_play():
104 model.running = True
105 while model.running:
106 do_step()
107
108 def threaded_do_play():
109 if thread is not None and thread.is_alive():
110 return
111 thread.value = threading.Thread(target=do_play)
112 thread.start()
113
114 def do_pause():
115 if (thread is None) or (not thread.is_alive()):
116 return
117 model.running = False
118 thread.join()
119
120 def do_reset():
121 reset_counter.value += 1
122
123 with solara.Row():
124 solara.Button(label="Step", color="primary", on_click=do_step)
125 # This style is necessary so that the play widget has almost the same
126 # height as typical Solara buttons.
127 solara.Style(
128 """
129 .widget-play {
130 height: 30px;
131 }
132 """
133 )
134 widgets.Play(
135 value=0,
136 interval=play_interval,
137 repeat=True,
138 show_repeat=False,
139 on_value=on_value_play,
140 playing=playing.value,
141 on_playing=playing.set,
142 )
143 solara.Button(label="Reset", color="primary", on_click=do_reset)
144 solara.Markdown(md_text=f"**Step:** {current_step}")
145 # threaded_do_play is not used for now because it
146 # doesn't work in Google colab. We use
147 # ipywidgets.Play until it is fixed. The threading
148 # version is definite a much better implementation,
149 # if it works.
150 # solara.Button(label="▶", color="primary", on_click=viz.threaded_do_play)
151 # solara.Button(label="⏸︎", color="primary", on_click=viz.do_pause)
152 # solara.Button(label="Reset", color="primary", on_click=do_reset)
153
154
155 def split_model_params(model_params):
156 model_params_input = {}
157 model_params_fixed = {}
158 for k, v in model_params.items():
159 if check_param_is_fixed(v):
160 model_params_fixed[k] = v
161 else:
162 model_params_input[k] = v
163 return model_params_input, model_params_fixed
164
165
166 def check_param_is_fixed(param):
167 if not isinstance(param, dict):
168 return True
169 if "type" not in param:
170 return True
171
172
173 @solara.component
174 def UserInputs(user_params, on_change=None):
175 """Initialize user inputs for configurable model parameters.
176 Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,
177 and :class:`solara.Select`.
178
179 Props:
180 user_params: dictionary with options for the input, including label,
181 min and max values, and other fields specific to the input type.
182 on_change: function to be called with (name, value) when the value of an input changes.
183 """
184
185 for name, options in user_params.items():
186 # label for the input is "label" from options or name
187 label = options.get("label", name)
188 input_type = options.get("type")
189
190 def change_handler(value, name=name):
191 on_change(name, value)
192
193 if input_type == "SliderInt":
194 solara.SliderInt(
195 label,
196 value=options.get("value"),
197 on_value=change_handler,
198 min=options.get("min"),
199 max=options.get("max"),
200 step=options.get("step"),
201 )
202 elif input_type == "SliderFloat":
203 solara.SliderFloat(
204 label,
205 value=options.get("value"),
206 on_value=change_handler,
207 min=options.get("min"),
208 max=options.get("max"),
209 step=options.get("step"),
210 )
211 elif input_type == "Select":
212 solara.Select(
213 label,
214 value=options.get("value"),
215 on_value=change_handler,
216 values=options.get("values"),
217 )
218 else:
219 raise ValueError(f"{input_type} is not a supported input type")
220
221
222 def make_space(model, agent_portrayal):
223 def portray(g):
224 x = []
225 y = []
226 s = [] # size
227 c = [] # color
228 for i in range(g.width):
229 for j in range(g.height):
230 content = g._grid[i][j]
231 if not content:
232 continue
233 if not hasattr(content, "__iter__"):
234 # Is a single grid
235 content = [content]
236 for agent in content:
237 data = agent_portrayal(agent)
238 x.append(i)
239 y.append(j)
240 if "size" in data:
241 s.append(data["size"])
242 if "color" in data:
243 c.append(data["color"])
244 out = {"x": x, "y": y}
245 if len(s) > 0:
246 out["s"] = s
247 if len(c) > 0:
248 out["c"] = c
249 return out
250
251 space_fig = Figure()
252 space_ax = space_fig.subplots()
253 if isinstance(model.grid, mesa.space.NetworkGrid):
254 _draw_network_grid(model, space_ax, agent_portrayal)
255 else:
256 space_ax.scatter(**portray(model.grid))
257 space_ax.set_axis_off()
258 solara.FigureMatplotlib(space_fig)
259
260
261 def _draw_network_grid(model, space_ax, agent_portrayal):
262 graph = model.grid.G
263 pos = nx.spring_layout(graph, seed=0)
264 nx.draw(
265 graph,
266 ax=space_ax,
267 pos=pos,
268 **agent_portrayal(graph),
269 )
270
271
272 def make_plot(model, measure):
273 fig = Figure()
274 ax = fig.subplots()
275 df = model.datacollector.get_model_vars_dataframe()
276 ax.plot(df.loc[:, measure])
277 ax.set_ylabel(measure)
278 # Set integer x axis
279 ax.xaxis.set_major_locator(MaxNLocator(integer=True))
280 solara.FigureMatplotlib(fig)
281
282
283 def make_text(renderer):
284 def function(model):
285 solara.Markdown(renderer(model))
286
287 return function
288
[end of mesa/experimental/jupyter_viz.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py
--- a/mesa/experimental/jupyter_viz.py
+++ b/mesa/experimental/jupyter_viz.py
@@ -89,15 +89,25 @@
):
playing = solara.use_reactive(False)
thread = solara.use_reactive(None)
+ # We track the previous step to detect if user resets the model via
+ # clicking the reset button or changing the parameters. If previous_step >
+ # current_step, it means a model reset happens while the simulation is
+ # still playing.
+ previous_step = solara.use_reactive(0)
def on_value_play(change):
- if model.running:
+ if previous_step.value > current_step and current_step == 0:
+ # We add extra checks for current_step == 0, just to be sure.
+ # We automatically stop the playing if a model is reset.
+ playing.value = False
+ elif model.running:
do_step()
else:
playing.value = False
def do_step():
model.step()
+ previous_step.value = current_step
set_current_step(model.schedule.steps)
def do_play():
|
{"golden_diff": "diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py\n--- a/mesa/experimental/jupyter_viz.py\n+++ b/mesa/experimental/jupyter_viz.py\n@@ -89,15 +89,25 @@\n ):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n+ # We track the previous step to detect if user resets the model via\n+ # clicking the reset button or changing the parameters. If previous_step >\n+ # current_step, it means a model reset happens while the simulation is\n+ # still playing.\n+ previous_step = solara.use_reactive(0)\n \n def on_value_play(change):\n- if model.running:\n+ if previous_step.value > current_step and current_step == 0:\n+ # We add extra checks for current_step == 0, just to be sure.\n+ # We automatically stop the playing if a model is reset.\n+ playing.value = False\n+ elif model.running:\n do_step()\n else:\n playing.value = False\n \n def do_step():\n model.step()\n+ previous_step.value = current_step\n set_current_step(model.schedule.steps)\n \n def do_play():\n", "issue": "JupyterViz: Changing simulation parameters shouldn't automatically start the simulation\n**Describe the bug**\r\nCurrently, changing the parameters auto-play the simulation.\r\n\r\n**Expected behavior**\r\nIt shouldn't.\r\n\r\n**To Reproduce**\r\nRun any `JupyterViz` simulation.\n", "before_files": [{"content": "import threading\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport reacton.ipywidgets as widgets\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n# Avoid interactive backend\nplt.switch_backend(\"agg\")\n\n\[email protected]\ndef JupyterViz(\n model_class,\n model_params,\n measures=None,\n name=\"Mesa Model\",\n agent_portrayal=None,\n space_drawer=\"default\",\n play_interval=400,\n):\n \"\"\"Initialize a component to visualize a model.\n Args:\n model_class: class of the model to instantiate\n model_params: parameters for initializing the model\n measures: list of callables or data attributes to plot\n name: name for display\n agent_portrayal: options for rendering agents (dictionary)\n space_drawer: method to render the agent space for\n the model; default implementation is :meth:`make_space`;\n simulations with no space to visualize should\n specify `space_drawer=False`\n play_interval: play interval (default: 400)\n \"\"\"\n\n current_step, set_current_step = solara.use_state(0)\n\n # 1. Set up model parameters\n user_params, fixed_params = split_model_params(model_params)\n model_parameters, set_model_parameters = solara.use_state(\n {**fixed_params, **{k: v[\"value\"] for k, v in user_params.items()}}\n )\n\n # 2. Set up Model\n def make_model():\n model = model_class(**model_parameters)\n set_current_step(0)\n return model\n\n reset_counter = solara.use_reactive(0)\n model = solara.use_memo(\n make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]\n )\n\n def handle_change_model_params(name: str, value: any):\n set_model_parameters({**model_parameters, name: value})\n\n # 3. Set up UI\n solara.Markdown(name)\n UserInputs(user_params, on_change=handle_change_model_params)\n ModelController(model, play_interval, current_step, set_current_step, reset_counter)\n\n with solara.GridFixed(columns=2):\n # 4. Space\n if space_drawer == \"default\":\n # draw with the default implementation\n make_space(model, agent_portrayal)\n elif space_drawer:\n # if specified, draw agent space with an alternate renderer\n space_drawer(model, agent_portrayal)\n # otherwise, do nothing (do not draw space)\n\n # 5. Plots\n for measure in measures:\n if callable(measure):\n # Is a custom object\n measure(model)\n else:\n make_plot(model, measure)\n\n\[email protected]\ndef ModelController(\n model, play_interval, current_step, set_current_step, reset_counter\n):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n\n def on_value_play(change):\n if model.running:\n do_step()\n else:\n playing.value = False\n\n def do_step():\n model.step()\n set_current_step(model.schedule.steps)\n\n def do_play():\n model.running = True\n while model.running:\n do_step()\n\n def threaded_do_play():\n if thread is not None and thread.is_alive():\n return\n thread.value = threading.Thread(target=do_play)\n thread.start()\n\n def do_pause():\n if (thread is None) or (not thread.is_alive()):\n return\n model.running = False\n thread.join()\n\n def do_reset():\n reset_counter.value += 1\n\n with solara.Row():\n solara.Button(label=\"Step\", color=\"primary\", on_click=do_step)\n # This style is necessary so that the play widget has almost the same\n # height as typical Solara buttons.\n solara.Style(\n \"\"\"\n .widget-play {\n height: 30px;\n }\n \"\"\"\n )\n widgets.Play(\n value=0,\n interval=play_interval,\n repeat=True,\n show_repeat=False,\n on_value=on_value_play,\n playing=playing.value,\n on_playing=playing.set,\n )\n solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n solara.Markdown(md_text=f\"**Step:** {current_step}\")\n # threaded_do_play is not used for now because it\n # doesn't work in Google colab. We use\n # ipywidgets.Play until it is fixed. The threading\n # version is definite a much better implementation,\n # if it works.\n # solara.Button(label=\"\u25b6\", color=\"primary\", on_click=viz.threaded_do_play)\n # solara.Button(label=\"\u23f8\ufe0e\", color=\"primary\", on_click=viz.do_pause)\n # solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n\n\ndef split_model_params(model_params):\n model_params_input = {}\n model_params_fixed = {}\n for k, v in model_params.items():\n if check_param_is_fixed(v):\n model_params_fixed[k] = v\n else:\n model_params_input[k] = v\n return model_params_input, model_params_fixed\n\n\ndef check_param_is_fixed(param):\n if not isinstance(param, dict):\n return True\n if \"type\" not in param:\n return True\n\n\[email protected]\ndef UserInputs(user_params, on_change=None):\n \"\"\"Initialize user inputs for configurable model parameters.\n Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,\n and :class:`solara.Select`.\n\n Props:\n user_params: dictionary with options for the input, including label,\n min and max values, and other fields specific to the input type.\n on_change: function to be called with (name, value) when the value of an input changes.\n \"\"\"\n\n for name, options in user_params.items():\n # label for the input is \"label\" from options or name\n label = options.get(\"label\", name)\n input_type = options.get(\"type\")\n\n def change_handler(value, name=name):\n on_change(name, value)\n\n if input_type == \"SliderInt\":\n solara.SliderInt(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"SliderFloat\":\n solara.SliderFloat(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"Select\":\n solara.Select(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n values=options.get(\"values\"),\n )\n else:\n raise ValueError(f\"{input_type} is not a supported input type\")\n\n\ndef make_space(model, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_fig = Figure()\n space_ax = space_fig.subplots()\n if isinstance(model.grid, mesa.space.NetworkGrid):\n _draw_network_grid(model, space_ax, agent_portrayal)\n else:\n space_ax.scatter(**portray(model.grid))\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig)\n\n\ndef _draw_network_grid(model, space_ax, agent_portrayal):\n graph = model.grid.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n\n\ndef make_text(renderer):\n def function(model):\n solara.Markdown(renderer(model))\n\n return function\n", "path": "mesa/experimental/jupyter_viz.py"}]}
| 3,303 | 273 |
gh_patches_debug_20149
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-5219
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User account deletion doesn't remove resources from SOLR index
**Description of the bug**
Deleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover.
Steps to reproduce the bug:
1. make a new user account
2. add a resource and make it discoverable
3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel)
4. See that the resource listing persists on the Discover search page
**Expected behavior**
User account deletion should remove the user's resources from the SOLR index
**Additional information**
HS v 2.9.2
</issue>
<code>
[start of hs_core/hydro_realtime_signal_processor.py]
1 import logging
2
3 from django.conf import settings
4 from django.db import models
5 from hs_core.models import Date, BaseResource
6 from hs_access_control.models import ResourceAccess
7 from haystack.exceptions import NotHandled
8 from haystack.signals import BaseSignalProcessor
9
10 logger = logging.getLogger(__name__)
11
12
13 class HydroRealtimeSignalProcessor(BaseSignalProcessor):
14 """
15 Notes:
16 1. We assume everytime metadata is updated the modified datetime is updated
17 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)
18 """
19
20 def setup(self):
21 if not getattr(settings, "DISABLE_HAYSTACK", False):
22 models.signals.post_save.connect(self.handle_update, sender=Date)
23 models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)
24
25 def teardown(self):
26 if not getattr(settings, "DISABLE_HAYSTACK", False):
27 models.signals.post_save.disconnect(self.handle_update, sender=Date)
28 models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)
29
30 def handle_update(self, sender, instance, **kwargs):
31 try:
32 # resolve the BaseResource corresponding to the metadata element.
33 newbase = instance.metadata.resource
34 index_resource(self, newbase)
35 except Exception as e:
36 logger.exception("{} exception: {}".format(type(instance), str(e)))
37
38 def handle_access(self, sender, instance, **kwargs):
39 try:
40 newbase = instance.resource
41 index_resource(self, newbase)
42 except Exception as e:
43 logger.exception("{} exception: {}".format(type(instance), str(e)))
44
45
46 def index_resource(signal_processor, instance: BaseResource):
47 if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):
48 # work around for failure of super(BaseResource, instance) to work properly.
49 # this always succeeds because this is a post-save object action.
50 newbase = BaseResource.objects.get(pk=instance.pk)
51 newsender = BaseResource
52 using_backends = signal_processor.connection_router.for_write(instance=newbase)
53 for using in using_backends:
54 # if object is public/discoverable or becoming public/discoverable, index it
55 # test whether the object should be exposed.
56 if instance.show_in_discover:
57 try:
58 index = signal_processor.connections[using].get_unified_index().get_index(newsender)
59 index.update_object(newbase, using=using)
60 except NotHandled:
61 logger.exception("Failure: changes to %s with short_id %s not added to Solr Index.",
62 str(type(instance)), newbase.short_id)
63
64 # if object is private or becoming private, delete from index
65 else: # not to be shown in discover
66 try:
67 index = signal_processor.connections[using].get_unified_index().get_index(newsender)
68 index.remove_object(newbase, using=using)
69 except NotHandled:
70 logger.exception("Failure: delete of %s with short_id %s failed.",
71 str(type(instance)), newbase.short_id)
72
[end of hs_core/hydro_realtime_signal_processor.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py
--- a/hs_core/hydro_realtime_signal_processor.py
+++ b/hs_core/hydro_realtime_signal_processor.py
@@ -21,11 +21,13 @@
if not getattr(settings, "DISABLE_HAYSTACK", False):
models.signals.post_save.connect(self.handle_update, sender=Date)
models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)
+ models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)
def teardown(self):
if not getattr(settings, "DISABLE_HAYSTACK", False):
models.signals.post_save.disconnect(self.handle_update, sender=Date)
models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)
+ models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)
def handle_update(self, sender, instance, **kwargs):
try:
|
{"golden_diff": "diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py\n--- a/hs_core/hydro_realtime_signal_processor.py\n+++ b/hs_core/hydro_realtime_signal_processor.py\n@@ -21,11 +21,13 @@\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)\n \n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)\n \n def handle_update(self, sender, instance, **kwargs):\n try:\n", "issue": "User account deletion doesn't remove resources from SOLR index\n**Description of the bug**\r\nDeleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover.\r\n\r\nSteps to reproduce the bug:\r\n1. make a new user account\r\n2. add a resource and make it discoverable\r\n3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel)\r\n4. See that the resource listing persists on the Discover search page\r\n\r\n**Expected behavior**\r\nUser account deletion should remove the user's resources from the SOLR index\r\n\r\n**Additional information**\r\nHS v 2.9.2\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom hs_core.models import Date, BaseResource\nfrom hs_access_control.models import ResourceAccess\nfrom haystack.exceptions import NotHandled\nfrom haystack.signals import BaseSignalProcessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass HydroRealtimeSignalProcessor(BaseSignalProcessor):\n \"\"\"\n Notes:\n 1. We assume everytime metadata is updated the modified datetime is updated\n 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)\n \"\"\"\n\n def setup(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n\n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n\n def handle_update(self, sender, instance, **kwargs):\n try:\n # resolve the BaseResource corresponding to the metadata element.\n newbase = instance.metadata.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n def handle_access(self, sender, instance, **kwargs):\n try:\n newbase = instance.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n\ndef index_resource(signal_processor, instance: BaseResource):\n if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):\n # work around for failure of super(BaseResource, instance) to work properly.\n # this always succeeds because this is a post-save object action.\n newbase = BaseResource.objects.get(pk=instance.pk)\n newsender = BaseResource\n using_backends = signal_processor.connection_router.for_write(instance=newbase)\n for using in using_backends:\n # if object is public/discoverable or becoming public/discoverable, index it\n # test whether the object should be exposed.\n if instance.show_in_discover:\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.update_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: changes to %s with short_id %s not added to Solr Index.\",\n str(type(instance)), newbase.short_id)\n\n # if object is private or becoming private, delete from index\n else: # not to be shown in discover\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.remove_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: delete of %s with short_id %s failed.\",\n str(type(instance)), newbase.short_id)\n", "path": "hs_core/hydro_realtime_signal_processor.py"}]}
| 1,462 | 207 |
gh_patches_debug_17183
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-957
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cant get GUI if `storage_dir` is relative path in config
If specify in config.json storage_dir as relative path, then `GET /index.html` return 404. With absolute paths all work fine.
</issue>
<code>
[start of mindsdb/utilities/config.py]
1 import os
2 import json
3 import hashlib
4 import datetime
5
6
7 default_config = {
8 "log": {
9 "level": {
10 "console": "ERROR",
11 "file": "WARNING"
12 }
13 },
14 "debug": False,
15 "integrations": {},
16 "api": {
17 "http": {
18 "host": "127.0.0.1",
19 "port": "47334"
20 },
21 "mysql": {
22 "host": "127.0.0.1",
23 "password": "",
24 "port": "47335",
25 "user": "mindsdb",
26 "database": "mindsdb",
27 "ssl": True
28 },
29 "mongodb": {
30 "host": "127.0.0.1",
31 "port": "47336",
32 "database": "mindsdb"
33 }
34 }
35 }
36
37
38 class Config(object):
39 current_version = '1.3'
40 _config = {}
41 paths = {
42 'root': '',
43 'datasources': '',
44 'predictors': '',
45 'static': '',
46 'tmp': '',
47 'log': '',
48 'obsolete': {
49 'predictors': '',
50 'datasources': ''
51 }
52 }
53 versions = {}
54
55 def __init__(self, config_path):
56 self._config_path = None
57 self._config_hash = None
58 self._config = None
59 if isinstance(config_path, str):
60 self.config_path = config_path
61 self._read()
62 self._config_hash = self._gen_hash()
63
64 storage_dir = self._config['storage_dir']
65 if os.path.isabs(storage_dir) is False:
66 storage_dir = os.path.normpath(
67 os.path.join(
68 os.path.dirname(config_path),
69 storage_dir
70 )
71 )
72 self.paths['root'] = storage_dir
73 self.paths['datasources'] = os.path.join(storage_dir, 'datasources')
74 self.paths['predictors'] = os.path.join(storage_dir, 'predictors')
75 self.paths['static'] = os.path.join(storage_dir, 'static')
76 self.paths['tmp'] = os.path.join(storage_dir, 'tmp')
77 self.paths['log'] = os.path.join(storage_dir, 'log')
78 self.paths['obsolete']['predictors'] = os.path.join(storage_dir, 'obsolete', 'predictors')
79 self.paths['obsolete']['datasources'] = os.path.join(storage_dir, 'obsolete', 'datasources')
80
81 self._read_versions_file(os.path.join(self.paths['root'], 'versions.json'))
82 else:
83 raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')
84
85 def _read_versions_file(self, path):
86 if os.path.isfile(path):
87 with open(path, 'rt') as f:
88 self.versions = json.loads(f.read())
89
90 def _migrate(self):
91 def m1_0(config):
92 if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:
93 config['integrations']['default_clickhouse']['type'] = 'clickhouse'
94 if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:
95 config['integrations']['default_mariadb']['type'] = 'mariadb'
96 if 'datasources' in config['api']['mysql']:
97 del config['api']['mysql']['datasources']
98 config['config_version'] = '1.1'
99 return config
100
101 def m1_1(config):
102 import tempfile
103 import shutil
104 from pathlib import Path
105
106 ds_storage_path = Path(config['interface']['datastore']['storage_dir'])
107 mdb_storage_path = Path(config['interface']['mindsdb_native']['storage_dir'])
108
109 temp_dir_path = tempfile.mkdtemp()
110
111 if ds_storage_path.is_dir():
112 shutil.move(
113 str(ds_storage_path),
114 temp_dir_path
115 )
116
117 ds_storage_path.mkdir(mode=0o777, exist_ok=True, parents=True)
118
119 if Path(temp_dir_path).joinpath('datastore').is_dir():
120 shutil.move(
121 str(Path(temp_dir_path).joinpath('datastore')),
122 str(ds_storage_path.joinpath('datasources'))
123 )
124 else:
125 ds_storage_path.joinpath('datasources').mkdir(mode=0o777, exist_ok=True)
126
127 if ds_storage_path == mdb_storage_path:
128 shutil.move(
129 str(Path(temp_dir_path)),
130 str(ds_storage_path.joinpath('predictors'))
131 )
132 elif mdb_storage_path.is_dir():
133 shutil.move(
134 str(mdb_storage_path),
135 str(ds_storage_path.joinpath('predictors'))
136 )
137 else:
138 mdb_storage_path.joinpath('predictors').mkdir(mode=0o777, exist_ok=True)
139
140 ds_storage_path.joinpath('tmp').mkdir(mode=0o777, exist_ok=True)
141 ds_storage_path.joinpath('static').mkdir(mode=0o777, exist_ok=True)
142
143 if Path(temp_dir_path).is_dir():
144 shutil.rmtree(temp_dir_path)
145
146 config['storage_dir'] = str(ds_storage_path)
147 del config['interface']['datastore']['storage_dir']
148 del config['interface']['mindsdb_native']['storage_dir']
149 config['config_version'] = '1.2'
150 return config
151
152 def m1_2(config):
153 ''' remove no longer needed fields
154 '''
155 try:
156 del config['api']['mysql']['log']
157 except Exception:
158 pass
159
160 try:
161 del config['interface']
162 except Exception:
163 pass
164
165 if 'pip_path' in config and config['pip_path'] is None:
166 del config['pip_path']
167
168 if 'python_interpreter' in config and config['python_interpreter'] is None:
169 del config['python_interpreter']
170
171 config['config_version'] = '1.3'
172 return config
173
174 migrations = {
175 '1.0': m1_0,
176 '1.1': m1_1,
177 '1.2': m1_2
178 }
179
180 current_version = self._parse_version(self._config['config_version'])
181 target_version = self._parse_version(self.current_version)
182 while current_version < target_version:
183 str_version = '.'.join([str(x) for x in current_version])
184 self._config = migrations[str_version](self._config)
185 current_version = self._parse_version(self._config['config_version'])
186
187 def _validate(self):
188 integrations = self._config.get('integrations', {})
189 for key, value in integrations.items():
190 if not isinstance(value, dict):
191 raise TypeError(f"Config error: integration '{key}' must be a json")
192 if 'type' not in integrations[key]:
193 raise KeyError(f"Config error: for integration '{key}' key 'type' must be specified")
194
195 storage_dir = self._config.get('storage_dir')
196 if storage_dir is None:
197 raise KeyError("'storage_dir' mandatory key in config")
198
199 def _parse_version(self, version):
200 if isinstance(version, str):
201 version = [int(x) for x in version.split('.')]
202 elif isinstance(version, int):
203 version = [version]
204 if len(version) == 1:
205 version.append(0)
206 return version
207
208 def _format(self):
209 ''' changing user input to formalised view
210 '''
211 for integration in self._config.get('integrations', {}).values():
212 password = integration.get('password')
213 password = '' if password is None else str(password)
214 integration['password'] = str(password)
215
216 password = self._config['api']['mysql'].get('password')
217 password = '' if password is None else str(password)
218 self._config['api']['mysql']['password'] = str(password)
219
220 def _merge_default_config(self):
221 def merge_key_recursive(target_dict, source_dict, key):
222 if key not in target_dict:
223 target_dict[key] = source_dict[key]
224 elif isinstance(target_dict[key], dict) and isinstance(source_dict[key], dict):
225 for k in source_dict[key]:
226 merge_key_recursive(target_dict[key], source_dict[key], k)
227
228 for key in default_config:
229 merge_key_recursive(self._config, default_config, key)
230
231 def _read(self):
232 if isinstance(self.config_path, str) and os.path.isfile(self.config_path):
233 with open(self.config_path, 'r') as fp:
234 self._config = json.load(fp)
235 if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):
236 self._migrate()
237 self._save()
238 self._validate()
239 self._format()
240 self._merge_default_config()
241 else:
242 raise TypeError('`self.config_path` must be a string representing a local file path to a json config')
243
244 def _save(self):
245 with open(self.config_path, 'w') as fp:
246 json.dump(self._config, fp, indent=4, sort_keys=True)
247
248 def _gen_hash(self):
249 with open(self.config_path, 'rb') as fp:
250 return hashlib.md5(fp.read()).hexdigest()
251
252 def _set_updated(self, key):
253 # Only check this for dynamically generated keys, won't be needed once we switch to using a database here
254 if key in ['integrations']:
255 file_hash = self._gen_hash()
256 if file_hash != self._config_hash:
257 self._read()
258 self._config_hash = self._gen_hash()
259
260 def __getitem__(self, key):
261 self._set_updated(key)
262 return self._config[key]
263
264 def get(self, key, default=None):
265 self._set_updated(key)
266 return self._config.get(key, default)
267
268 def get_all(self):
269 return self._config
270
271 def set(self, key_chain, value, delete=False):
272 with open(self.config_path, 'r') as fp:
273 self._config = json.load(fp)
274
275 c = self._config
276 for i, k in enumerate(key_chain):
277 if k in c and i + 1 < len(key_chain):
278 c = c[k]
279 elif k not in c and i + 1 < len(key_chain):
280 c[k] = {}
281 c = c[k]
282 else:
283 if delete:
284 del c[k]
285 else:
286 c[k] = value
287 self._save()
288
289 # Higher level interface
290 def add_db_integration(self, name, dict):
291 dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]
292 if 'database_name' not in dict:
293 dict['database_name'] = name
294 if 'enabled' not in dict:
295 dict['enabled'] = True
296
297 self.set(['integrations', name], dict)
298
299 def modify_db_integration(self, name, dict):
300 old_dict = self._config['integrations'][name]
301 for k in old_dict:
302 if k not in dict:
303 dict[k] = old_dict[k]
304
305 self.add_db_integration(name, dict)
306
307 def remove_db_integration(self, name):
308 self.set(['integrations', name], None, True)
309
[end of mindsdb/utilities/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -57,7 +57,7 @@
self._config_hash = None
self._config = None
if isinstance(config_path, str):
- self.config_path = config_path
+ self.config_path = os.path.abspath(config_path)
self._read()
self._config_hash = self._gen_hash()
@@ -65,7 +65,7 @@
if os.path.isabs(storage_dir) is False:
storage_dir = os.path.normpath(
os.path.join(
- os.path.dirname(config_path),
+ os.path.dirname(self.config_path),
storage_dir
)
)
|
{"golden_diff": "diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py\n--- a/mindsdb/utilities/config.py\n+++ b/mindsdb/utilities/config.py\n@@ -57,7 +57,7 @@\n self._config_hash = None\n self._config = None\n if isinstance(config_path, str):\n- self.config_path = config_path\n+ self.config_path = os.path.abspath(config_path)\n self._read()\n self._config_hash = self._gen_hash()\n \n@@ -65,7 +65,7 @@\n if os.path.isabs(storage_dir) is False:\n storage_dir = os.path.normpath(\n os.path.join(\n- os.path.dirname(config_path),\n+ os.path.dirname(self.config_path),\n storage_dir\n )\n )\n", "issue": "Cant get GUI if `storage_dir` is relative path in config\nIf specify in config.json storage_dir as relative path, then `GET /index.html` return 404. With absolute paths all work fine.\n", "before_files": [{"content": "import os\nimport json\nimport hashlib\nimport datetime\n\n\ndefault_config = {\n \"log\": {\n \"level\": {\n \"console\": \"ERROR\",\n \"file\": \"WARNING\"\n }\n },\n \"debug\": False,\n \"integrations\": {},\n \"api\": {\n \"http\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47334\"\n },\n \"mysql\": {\n \"host\": \"127.0.0.1\",\n \"password\": \"\",\n \"port\": \"47335\",\n \"user\": \"mindsdb\",\n \"database\": \"mindsdb\",\n \"ssl\": True\n },\n \"mongodb\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47336\",\n \"database\": \"mindsdb\"\n }\n }\n}\n\n\nclass Config(object):\n current_version = '1.3'\n _config = {}\n paths = {\n 'root': '',\n 'datasources': '',\n 'predictors': '',\n 'static': '',\n 'tmp': '',\n 'log': '',\n 'obsolete': {\n 'predictors': '',\n 'datasources': ''\n }\n }\n versions = {}\n\n def __init__(self, config_path):\n self._config_path = None\n self._config_hash = None\n self._config = None\n if isinstance(config_path, str):\n self.config_path = config_path\n self._read()\n self._config_hash = self._gen_hash()\n\n storage_dir = self._config['storage_dir']\n if os.path.isabs(storage_dir) is False:\n storage_dir = os.path.normpath(\n os.path.join(\n os.path.dirname(config_path),\n storage_dir\n )\n )\n self.paths['root'] = storage_dir\n self.paths['datasources'] = os.path.join(storage_dir, 'datasources')\n self.paths['predictors'] = os.path.join(storage_dir, 'predictors')\n self.paths['static'] = os.path.join(storage_dir, 'static')\n self.paths['tmp'] = os.path.join(storage_dir, 'tmp')\n self.paths['log'] = os.path.join(storage_dir, 'log')\n self.paths['obsolete']['predictors'] = os.path.join(storage_dir, 'obsolete', 'predictors')\n self.paths['obsolete']['datasources'] = os.path.join(storage_dir, 'obsolete', 'datasources')\n\n self._read_versions_file(os.path.join(self.paths['root'], 'versions.json'))\n else:\n raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')\n\n def _read_versions_file(self, path):\n if os.path.isfile(path):\n with open(path, 'rt') as f:\n self.versions = json.loads(f.read())\n\n def _migrate(self):\n def m1_0(config):\n if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:\n config['integrations']['default_clickhouse']['type'] = 'clickhouse'\n if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:\n config['integrations']['default_mariadb']['type'] = 'mariadb'\n if 'datasources' in config['api']['mysql']:\n del config['api']['mysql']['datasources']\n config['config_version'] = '1.1'\n return config\n\n def m1_1(config):\n import tempfile\n import shutil\n from pathlib import Path\n\n ds_storage_path = Path(config['interface']['datastore']['storage_dir'])\n mdb_storage_path = Path(config['interface']['mindsdb_native']['storage_dir'])\n\n temp_dir_path = tempfile.mkdtemp()\n\n if ds_storage_path.is_dir():\n shutil.move(\n str(ds_storage_path),\n temp_dir_path\n )\n\n ds_storage_path.mkdir(mode=0o777, exist_ok=True, parents=True)\n\n if Path(temp_dir_path).joinpath('datastore').is_dir():\n shutil.move(\n str(Path(temp_dir_path).joinpath('datastore')),\n str(ds_storage_path.joinpath('datasources'))\n )\n else:\n ds_storage_path.joinpath('datasources').mkdir(mode=0o777, exist_ok=True)\n\n if ds_storage_path == mdb_storage_path:\n shutil.move(\n str(Path(temp_dir_path)),\n str(ds_storage_path.joinpath('predictors'))\n )\n elif mdb_storage_path.is_dir():\n shutil.move(\n str(mdb_storage_path),\n str(ds_storage_path.joinpath('predictors'))\n )\n else:\n mdb_storage_path.joinpath('predictors').mkdir(mode=0o777, exist_ok=True)\n\n ds_storage_path.joinpath('tmp').mkdir(mode=0o777, exist_ok=True)\n ds_storage_path.joinpath('static').mkdir(mode=0o777, exist_ok=True)\n\n if Path(temp_dir_path).is_dir():\n shutil.rmtree(temp_dir_path)\n\n config['storage_dir'] = str(ds_storage_path)\n del config['interface']['datastore']['storage_dir']\n del config['interface']['mindsdb_native']['storage_dir']\n config['config_version'] = '1.2'\n return config\n\n def m1_2(config):\n ''' remove no longer needed fields\n '''\n try:\n del config['api']['mysql']['log']\n except Exception:\n pass\n\n try:\n del config['interface']\n except Exception:\n pass\n\n if 'pip_path' in config and config['pip_path'] is None:\n del config['pip_path']\n\n if 'python_interpreter' in config and config['python_interpreter'] is None:\n del config['python_interpreter']\n\n config['config_version'] = '1.3'\n return config\n\n migrations = {\n '1.0': m1_0,\n '1.1': m1_1,\n '1.2': m1_2\n }\n\n current_version = self._parse_version(self._config['config_version'])\n target_version = self._parse_version(self.current_version)\n while current_version < target_version:\n str_version = '.'.join([str(x) for x in current_version])\n self._config = migrations[str_version](self._config)\n current_version = self._parse_version(self._config['config_version'])\n\n def _validate(self):\n integrations = self._config.get('integrations', {})\n for key, value in integrations.items():\n if not isinstance(value, dict):\n raise TypeError(f\"Config error: integration '{key}' must be a json\")\n if 'type' not in integrations[key]:\n raise KeyError(f\"Config error: for integration '{key}' key 'type' must be specified\")\n\n storage_dir = self._config.get('storage_dir')\n if storage_dir is None:\n raise KeyError(\"'storage_dir' mandatory key in config\")\n\n def _parse_version(self, version):\n if isinstance(version, str):\n version = [int(x) for x in version.split('.')]\n elif isinstance(version, int):\n version = [version]\n if len(version) == 1:\n version.append(0)\n return version\n\n def _format(self):\n ''' changing user input to formalised view\n '''\n for integration in self._config.get('integrations', {}).values():\n password = integration.get('password')\n password = '' if password is None else str(password)\n integration['password'] = str(password)\n\n password = self._config['api']['mysql'].get('password')\n password = '' if password is None else str(password)\n self._config['api']['mysql']['password'] = str(password)\n\n def _merge_default_config(self):\n def merge_key_recursive(target_dict, source_dict, key):\n if key not in target_dict:\n target_dict[key] = source_dict[key]\n elif isinstance(target_dict[key], dict) and isinstance(source_dict[key], dict):\n for k in source_dict[key]:\n merge_key_recursive(target_dict[key], source_dict[key], k)\n\n for key in default_config:\n merge_key_recursive(self._config, default_config, key)\n\n def _read(self):\n if isinstance(self.config_path, str) and os.path.isfile(self.config_path):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):\n self._migrate()\n self._save()\n self._validate()\n self._format()\n self._merge_default_config()\n else:\n raise TypeError('`self.config_path` must be a string representing a local file path to a json config')\n\n def _save(self):\n with open(self.config_path, 'w') as fp:\n json.dump(self._config, fp, indent=4, sort_keys=True)\n\n def _gen_hash(self):\n with open(self.config_path, 'rb') as fp:\n return hashlib.md5(fp.read()).hexdigest()\n\n def _set_updated(self, key):\n # Only check this for dynamically generated keys, won't be needed once we switch to using a database here\n if key in ['integrations']:\n file_hash = self._gen_hash()\n if file_hash != self._config_hash:\n self._read()\n self._config_hash = self._gen_hash()\n\n def __getitem__(self, key):\n self._set_updated(key)\n return self._config[key]\n\n def get(self, key, default=None):\n self._set_updated(key)\n return self._config.get(key, default)\n\n def get_all(self):\n return self._config\n\n def set(self, key_chain, value, delete=False):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n\n c = self._config\n for i, k in enumerate(key_chain):\n if k in c and i + 1 < len(key_chain):\n c = c[k]\n elif k not in c and i + 1 < len(key_chain):\n c[k] = {}\n c = c[k]\n else:\n if delete:\n del c[k]\n else:\n c[k] = value\n self._save()\n\n # Higher level interface\n def add_db_integration(self, name, dict):\n dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]\n if 'database_name' not in dict:\n dict['database_name'] = name\n if 'enabled' not in dict:\n dict['enabled'] = True\n\n self.set(['integrations', name], dict)\n\n def modify_db_integration(self, name, dict):\n old_dict = self._config['integrations'][name]\n for k in old_dict:\n if k not in dict:\n dict[k] = old_dict[k]\n\n self.add_db_integration(name, dict)\n\n def remove_db_integration(self, name):\n self.set(['integrations', name], None, True)\n", "path": "mindsdb/utilities/config.py"}]}
| 3,843 | 176 |
gh_patches_debug_8403
|
rasdani/github-patches
|
git_diff
|
pypa__pip-10507
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
21.3 regression with legacy resolver
Assuming the following project with an empty `pyproject.toml` and the following `setup.cfg`:
```ini
[metadata]
name = pkgb
version = 1.0
[options]
install_requires =
wrapt
```
We get the following stack trace, using pip main branch, today:
```console
$ pip install --use-deprecated=legacy-resolver -e ./pkgb
Obtaining file:///home/me/tmp/brol/pkgb
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing wheel metadata ... done
Requirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1)
ERROR: Exception:
Traceback (most recent call last):
File "/home/me/pip/src/pip/_internal/cli/base_command.py", line 179, in exc_logging_wrapper
status = run_func(*args)
File "/home/me/pip/src/pip/_internal/cli/req_command.py", line 203, in wrapper
return func(self, options, args)
File "/home/me/pip/src/pip/_internal/commands/install.py", line 334, in run
requirement_set = resolver.resolve(
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 181, in resolve
discovered_reqs.extend(self._resolve_one(requirement_set, req))
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 382, in _resolve_one
_check_dist_requires_python(
File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 75, in _check_dist_requires_python
requires_python = str(dist.requires_python)
File "/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py", line 2816, in __getattr__
return getattr(self._provider, attr)
AttributeError: 'PathMetadata' object has no attribute 'requires_python'
```
</issue>
<code>
[start of src/pip/_internal/distributions/installed.py]
1 from pip._internal.distributions.base import AbstractDistribution
2 from pip._internal.index.package_finder import PackageFinder
3 from pip._internal.metadata import BaseDistribution
4
5
6 class InstalledDistribution(AbstractDistribution):
7 """Represents an installed package.
8
9 This does not need any preparation as the required information has already
10 been computed.
11 """
12
13 def get_metadata_distribution(self) -> BaseDistribution:
14 assert self.req.satisfied_by is not None, "not actually installed"
15 return self.req.satisfied_by
16
17 def prepare_distribution_metadata(
18 self, finder: PackageFinder, build_isolation: bool
19 ) -> None:
20 pass
21
[end of src/pip/_internal/distributions/installed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py
--- a/src/pip/_internal/distributions/installed.py
+++ b/src/pip/_internal/distributions/installed.py
@@ -11,8 +11,10 @@
"""
def get_metadata_distribution(self) -> BaseDistribution:
+ from pip._internal.metadata.pkg_resources import Distribution as _Dist
+
assert self.req.satisfied_by is not None, "not actually installed"
- return self.req.satisfied_by
+ return _Dist(self.req.satisfied_by)
def prepare_distribution_metadata(
self, finder: PackageFinder, build_isolation: bool
|
{"golden_diff": "diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py\n--- a/src/pip/_internal/distributions/installed.py\n+++ b/src/pip/_internal/distributions/installed.py\n@@ -11,8 +11,10 @@\n \"\"\"\n \n def get_metadata_distribution(self) -> BaseDistribution:\n+ from pip._internal.metadata.pkg_resources import Distribution as _Dist\n+\n assert self.req.satisfied_by is not None, \"not actually installed\"\n- return self.req.satisfied_by\n+ return _Dist(self.req.satisfied_by)\n \n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n", "issue": "21.3 regression with legacy resolver\nAssuming the following project with an empty `pyproject.toml` and the following `setup.cfg`:\r\n\r\n```ini\r\n[metadata]\r\nname = pkgb\r\nversion = 1.0\r\n\r\n[options]\r\ninstall_requires =\r\n wrapt\r\n```\r\n\r\nWe get the following stack trace, using pip main branch, today:\r\n\r\n```console\r\n$ pip install --use-deprecated=legacy-resolver -e ./pkgb\r\nObtaining file:///home/me/tmp/brol/pkgb\r\n Installing build dependencies ... done\r\n Getting requirements to build wheel ... done\r\n Preparing wheel metadata ... done\r\nRequirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1)\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/me/pip/src/pip/_internal/cli/base_command.py\", line 179, in exc_logging_wrapper\r\n status = run_func(*args)\r\n File \"/home/me/pip/src/pip/_internal/cli/req_command.py\", line 203, in wrapper\r\n return func(self, options, args)\r\n File \"/home/me/pip/src/pip/_internal/commands/install.py\", line 334, in run\r\n requirement_set = resolver.resolve(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 181, in resolve\r\n discovered_reqs.extend(self._resolve_one(requirement_set, req))\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 382, in _resolve_one\r\n _check_dist_requires_python(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 75, in _check_dist_requires_python\r\n requires_python = str(dist.requires_python)\r\n File \"/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py\", line 2816, in __getattr__\r\n return getattr(self._provider, attr)\r\nAttributeError: 'PathMetadata' object has no attribute 'requires_python'\r\n```\r\n\n", "before_files": [{"content": "from pip._internal.distributions.base import AbstractDistribution\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution\n\n\nclass InstalledDistribution(AbstractDistribution):\n \"\"\"Represents an installed package.\n\n This does not need any preparation as the required information has already\n been computed.\n \"\"\"\n\n def get_metadata_distribution(self) -> BaseDistribution:\n assert self.req.satisfied_by is not None, \"not actually installed\"\n return self.req.satisfied_by\n\n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n ) -> None:\n pass\n", "path": "src/pip/_internal/distributions/installed.py"}]}
| 1,195 | 152 |
gh_patches_debug_21164
|
rasdani/github-patches
|
git_diff
|
elastic__ecs-1528
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support `match_only_text` type fallback for ES 6 artifacts
The `match_only_text` field data type will be introduced in Elasticsearch 7.14 and an upcoming ECS version via an [RFC proposal](https://github.com/elastic/ecs/blob/master/rfcs/text/0023-match_only_text-data-type.md).
Since `match_only_text` is a feature available in 7.14+, artifacts generated targeting ES 6.x will need to fall back to using the `text` data type.
</issue>
<code>
[start of scripts/generators/es_template.py]
1 import copy
2 import json
3 import sys
4
5 from os.path import join
6
7 from generators import ecs_helpers
8 from schema.cleaner import field_or_multi_field_datatype_defaults
9
10
11 TYPE_FALLBACKS = {
12 'constant_keyword': 'keyword',
13 'wildcard': 'keyword',
14 'version': 'keyword'
15 }
16
17 # Composable Template
18
19
20 def generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):
21 """This generates all artifacts for the composable template approach"""
22 all_component_templates(ecs_nested, ecs_version, out_dir)
23 component_names = component_name_convention(ecs_version, ecs_nested)
24 save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)
25
26
27 def save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):
28 """Generate the master sample composable template"""
29 template = {
30 "index_patterns": ["try-ecs-*"],
31 "composed_of": component_names,
32 "priority": 1, # Very low, as this is a sample template
33 "_meta": {
34 "ecs_version": ecs_version,
35 "description": "Sample composable template that includes all ECS fields"
36 },
37 "template": {
38 "settings": {
39 "index": {
40 "mapping": {
41 "total_fields": {
42 "limit": 2000
43 }
44 }
45 }
46 },
47 "mappings": mapping_settings(mapping_settings_file)
48 }
49 }
50 filename = join(out_dir, "elasticsearch/template.json")
51 save_json(filename, template)
52
53
54 def all_component_templates(ecs_nested, ecs_version, out_dir):
55 """Generate one component template per field set"""
56 component_dir = join(out_dir, 'elasticsearch/component')
57 ecs_helpers.make_dirs(component_dir)
58
59 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
60 field_mappings = {}
61 for (flat_name, field) in fieldset['fields'].items():
62 name_parts = flat_name.split('.')
63 dict_add_nested(field_mappings, name_parts, entry_for(field))
64
65 save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)
66
67
68 def save_component_template(template_name, ecs_version, out_dir, field_mappings):
69 filename = join(out_dir, template_name) + ".json"
70 reference_url = "https://www.elastic.co/guide/en/ecs/current/ecs-{}.html".format(template_name)
71
72 template = {
73 'template': {'mappings': {'properties': field_mappings}},
74 '_meta': {
75 'ecs_version': ecs_version,
76 'documentation': reference_url
77 }
78 }
79 save_json(filename, template)
80
81
82 def component_name_convention(ecs_version, ecs_nested):
83 version = ecs_version.replace('+', '-')
84 names = []
85 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():
86 names.append("ecs_{}_{}".format(version, fieldset_name.lower()))
87 return names
88
89
90 def candidate_components(ecs_nested):
91 """Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False"""
92 components = {}
93 for (fieldset_name, fieldset) in ecs_nested.items():
94 if fieldset.get('reusable', None):
95 if not fieldset['reusable']['top_level']:
96 continue
97 components[fieldset_name] = fieldset
98 return components
99
100
101 # Legacy template
102
103
104 def generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):
105 """Generate the legacy index template"""
106 field_mappings = {}
107 for flat_name in sorted(ecs_flat):
108 field = ecs_flat[flat_name]
109 name_parts = flat_name.split('.')
110 dict_add_nested(field_mappings, name_parts, entry_for(field))
111
112 mappings_section = mapping_settings(mapping_settings_file)
113 mappings_section['properties'] = field_mappings
114
115 generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)
116 generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)
117
118
119 def generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):
120 ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))
121 template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)
122
123 filename = join(out_dir, "elasticsearch/{}/template.json".format(es_version))
124 save_json(filename, template)
125
126
127 # Common helpers
128
129
130 def dict_add_nested(dct, name_parts, value):
131 current_nesting = name_parts[0]
132 rest_name_parts = name_parts[1:]
133 if len(rest_name_parts) > 0:
134 dct.setdefault(current_nesting, {})
135 dct[current_nesting].setdefault('properties', {})
136
137 dict_add_nested(
138 dct[current_nesting]['properties'],
139 rest_name_parts,
140 value)
141
142 else:
143 if current_nesting in dct and 'type' in value and 'object' == value['type']:
144 return
145 dct[current_nesting] = value
146
147
148 def entry_for(field):
149 field_entry = {'type': field['type']}
150 try:
151 if field['type'] == 'object' or field['type'] == 'nested':
152 if 'enabled' in field and not field['enabled']:
153 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])
154 # the index field is only valid for field types that are not object and nested
155 elif 'index' in field and not field['index']:
156 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])
157
158 if field['type'] == 'keyword':
159 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])
160 elif field['type'] == 'constant_keyword':
161 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])
162 elif field['type'] == 'text':
163 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])
164 elif field['type'] == 'alias':
165 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])
166 elif field['type'] == 'scaled_float':
167 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])
168
169 if 'multi_fields' in field:
170 field_entry['fields'] = {}
171 for mf in field['multi_fields']:
172 mf_type = mf['type']
173 mf_entry = {'type': mf_type}
174 if mf_type == 'keyword':
175 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])
176 elif mf_type == 'text':
177 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])
178 field_entry['fields'][mf['name']] = mf_entry
179
180 except KeyError as ex:
181 print("Exception {} occurred for field {}".format(ex, field))
182 raise ex
183 return field_entry
184
185
186 def mapping_settings(mapping_settings_file):
187 if mapping_settings_file:
188 with open(mapping_settings_file) as f:
189 mappings = json.load(f)
190 else:
191 mappings = default_mapping_settings()
192 return mappings
193
194
195 def template_settings(es_version, ecs_version, mappings_section, template_settings_file):
196 if template_settings_file:
197 with open(template_settings_file) as f:
198 template = json.load(f)
199 else:
200 template = default_template_settings(ecs_version)
201
202 if es_version == 6:
203 mappings_section = copy.deepcopy(mappings_section)
204 es6_type_fallback(mappings_section['properties'])
205
206 # error.stack_trace needs special handling to set
207 # index: false and doc_values: false if the field
208 # is present in the mappings
209 try:
210 error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']
211 error_stack_trace_mappings.setdefault('index', False)
212 error_stack_trace_mappings.setdefault('doc_values', False)
213 except KeyError:
214 pass
215
216 template['mappings'] = {'_doc': mappings_section}
217 else:
218 template['mappings'] = mappings_section
219
220 # _meta can't be at template root in legacy templates, so moving back to mappings section
221 # if present
222 if '_meta' in template:
223 mappings_section['_meta'] = template.pop('_meta')
224
225 return template
226
227
228 def save_json(file, data):
229 open_mode = "wb"
230 if sys.version_info >= (3, 0):
231 open_mode = "w"
232 with open(file, open_mode) as jsonfile:
233 jsonfile.write(json.dumps(data, indent=2, sort_keys=True))
234
235
236 def default_template_settings(ecs_version):
237 return {
238 "index_patterns": ["try-ecs-*"],
239 "_meta": {"version": ecs_version},
240 "order": 1,
241 "settings": {
242 "index": {
243 "mapping": {
244 "total_fields": {
245 "limit": 10000
246 }
247 },
248 "refresh_interval": "5s"
249 }
250 }
251 }
252
253
254 def default_mapping_settings():
255 return {
256 "date_detection": False,
257 "dynamic_templates": [
258 {
259 "strings_as_keyword": {
260 "mapping": {
261 "ignore_above": 1024,
262 "type": "keyword"
263 },
264 "match_mapping_type": "string"
265 }
266 }
267 ]
268 }
269
270
271 def es6_type_fallback(mappings):
272 """
273 Visits each leaf in mappings object and fallback to an
274 Elasticsearch 6.x supported type.
275
276 Since a field like `wildcard` won't have the same defaults as
277 a `keyword` field, we must add any missing defaults.
278 """
279
280 for (name, details) in mappings.items():
281 if 'type' in details:
282 fallback_type = TYPE_FALLBACKS.get(details['type'])
283 if fallback_type:
284 mappings[name]['type'] = fallback_type
285 field_or_multi_field_datatype_defaults(mappings[name])
286 if 'properties' in details:
287 es6_type_fallback(details['properties'])
288
[end of scripts/generators/es_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py
--- a/scripts/generators/es_template.py
+++ b/scripts/generators/es_template.py
@@ -11,7 +11,8 @@
TYPE_FALLBACKS = {
'constant_keyword': 'keyword',
'wildcard': 'keyword',
- 'version': 'keyword'
+ 'version': 'keyword',
+ 'match_only_text': 'text'
}
# Composable Template
@@ -283,5 +284,13 @@
if fallback_type:
mappings[name]['type'] = fallback_type
field_or_multi_field_datatype_defaults(mappings[name])
+ # support multi-fields
+ if 'fields' in details:
+ # potentially multiple multi-fields
+ for field_name, field_value in details['fields'].items():
+ fallback_type = TYPE_FALLBACKS.get(field_value['type'])
+ if fallback_type:
+ mappings[name]['fields'][field_name]['type'] = fallback_type
+ field_or_multi_field_datatype_defaults(mappings[name]['fields'][field_name])
if 'properties' in details:
es6_type_fallback(details['properties'])
|
{"golden_diff": "diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py\n--- a/scripts/generators/es_template.py\n+++ b/scripts/generators/es_template.py\n@@ -11,7 +11,8 @@\n TYPE_FALLBACKS = {\n 'constant_keyword': 'keyword',\n 'wildcard': 'keyword',\n- 'version': 'keyword'\n+ 'version': 'keyword',\n+ 'match_only_text': 'text'\n }\n \n # Composable Template\n@@ -283,5 +284,13 @@\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n+ # support multi-fields\n+ if 'fields' in details:\n+ # potentially multiple multi-fields\n+ for field_name, field_value in details['fields'].items():\n+ fallback_type = TYPE_FALLBACKS.get(field_value['type'])\n+ if fallback_type:\n+ mappings[name]['fields'][field_name]['type'] = fallback_type\n+ field_or_multi_field_datatype_defaults(mappings[name]['fields'][field_name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "issue": "Support `match_only_text` type fallback for ES 6 artifacts\nThe `match_only_text` field data type will be introduced in Elasticsearch 7.14 and an upcoming ECS version via an [RFC proposal](https://github.com/elastic/ecs/blob/master/rfcs/text/0023-match_only_text-data-type.md).\r\n\r\nSince `match_only_text` is a feature available in 7.14+, artifacts generated targeting ES 6.x will need to fall back to using the `text` data type.\n", "before_files": [{"content": "import copy\nimport json\nimport sys\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom schema.cleaner import field_or_multi_field_datatype_defaults\n\n\nTYPE_FALLBACKS = {\n 'constant_keyword': 'keyword',\n 'wildcard': 'keyword',\n 'version': 'keyword'\n}\n\n# Composable Template\n\n\ndef generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):\n \"\"\"Generate the master sample composable template\"\"\"\n template = {\n \"index_patterns\": [\"try-ecs-*\"],\n \"composed_of\": component_names,\n \"priority\": 1, # Very low, as this is a sample template\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"template\": {\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n \"mappings\": mapping_settings(mapping_settings_file)\n }\n }\n filename = join(out_dir, \"elasticsearch/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(ecs_nested, ecs_version, out_dir):\n \"\"\"Generate one component template per field set\"\"\"\n component_dir = join(out_dir, 'elasticsearch/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(template_name, ecs_version, out_dir, field_mappings):\n filename = join(out_dir, template_name) + \".json\"\n reference_url = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n 'documentation': reference_url\n }\n }\n save_json(filename, template)\n\n\ndef component_name_convention(ecs_version, ecs_nested):\n version = ecs_version.replace('+', '-')\n names = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name.lower()))\n return names\n\n\ndef candidate_components(ecs_nested):\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\n generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\n\n filename = join(out_dir, \"elasticsearch/{}/template.json\".format(es_version))\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(dct, name_parts, value):\n current_nesting = name_parts[0]\n rest_name_parts = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field):\n field_entry = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])\n field_entry['fields'][mf['name']] = mf_entry\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file):\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(es_version, ecs_version, mappings_section, template_settings_file):\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n template = default_template_settings(ecs_version)\n\n if es_version == 6:\n mappings_section = copy.deepcopy(mappings_section)\n es6_type_fallback(mappings_section['properties'])\n\n # error.stack_trace needs special handling to set\n # index: false and doc_values: false if the field\n # is present in the mappings\n try:\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n error_stack_trace_mappings.setdefault('index', False)\n error_stack_trace_mappings.setdefault('doc_values', False)\n except KeyError:\n pass\n\n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n return template\n\n\ndef save_json(file, data):\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n jsonfile.write(json.dumps(data, indent=2, sort_keys=True))\n\n\ndef default_template_settings(ecs_version):\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings():\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n\n\ndef es6_type_fallback(mappings):\n \"\"\"\n Visits each leaf in mappings object and fallback to an\n Elasticsearch 6.x supported type.\n\n Since a field like `wildcard` won't have the same defaults as\n a `keyword` field, we must add any missing defaults.\n \"\"\"\n\n for (name, details) in mappings.items():\n if 'type' in details:\n fallback_type = TYPE_FALLBACKS.get(details['type'])\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "path": "scripts/generators/es_template.py"}]}
| 3,553 | 261 |
gh_patches_debug_37678
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-5176
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ColorMapping - color values out of high/low
``` python
data = [-1, 0, np.NaN, 1, 2]
cmap = LinearColorMapper(palette=["red", "green"], low=0, high=1, nan_color=‘gray’, high_color=‘orange’, low_color=‘pink’)
# expect: [‘pink’, ‘red’, ‘gray’, ‘green’, ‘orange’]
```
LogColorMapper should also receive this treatment.
</issue>
<code>
[start of bokeh/models/mappers.py]
1 """ Models for mapping values from one range or space to another.
2
3 """
4 from __future__ import absolute_import
5 import warnings
6
7 from ..model import Model
8 from ..core.properties import abstract
9 from ..core.properties import Color, Enum, Seq, Either, String, Int, Float, Date, Datetime
10 from ..core.enums import Palette
11 from .. import palettes
12
13
14 @abstract
15 class ColorMapper(Model):
16 """ Base class for color mapper types. ``ColorMapper`` is not
17 generally useful to instantiate on its own.
18
19 """
20
21 palette = Seq(Color, help="""
22 A sequence of colors to use as the target palette for mapping.
23
24 This property can also be set as a ``String``, to the name of
25 any of the palettes shown in :ref:`bokeh.palettes`.
26 """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal))
27
28 nan_color = Color(default="gray", help="""
29 Color to be used if data is NaN. Default: 'gray'
30 """)
31
32 def __init__(self, palette=None, **kwargs):
33 if palette is not None:
34 kwargs['palette'] = palette
35 super(ColorMapper, self).__init__(**kwargs)
36
37
38 class CategoricalColorMapper(ColorMapper):
39 """ Map categories to colors. Values that are passed to
40 this mapper that aren't in factors will be assigned the nan_color.
41
42 """
43
44 factors = Either(Seq(String), Seq(Int), Seq(Float), Seq(Datetime), Seq(Date), help="""
45 A sequence of factors / categories that map to the color palette.
46 """)
47
48
49 def __init__(self, **kwargs):
50 super(ColorMapper, self).__init__(**kwargs)
51 palette = self.palette
52 factors = self.factors
53 if palette and factors:
54 if len(palette) < len(factors):
55 extra_factors = factors[len(palette):]
56 warnings.warn("""Palette length does not match number of
57 factors. %s will be assigned to `nan_color` %s""" % (extra_factors, self.nan_color))
58
59
60 @abstract
61 class ContinuousColorMapper(ColorMapper):
62 """ Base class for cotinuous color mapper types. ``ContinuousColorMapper`` is not
63 generally useful to instantiate on its own.
64
65 """
66
67 low = Float(help="""
68 The minimum value of the range to map into the palette. Values below
69 this are clamped to ``low``.
70 """)
71
72 high = Float(help="""
73 The maximum value of the range to map into the palette. Values above
74 this are clamped to ``high``.
75 """)
76
77
78 class LinearColorMapper(ContinuousColorMapper):
79 """ Map numbers in a range [*low*, *high*] linearly into a
80 sequence of colors (a palette).
81
82 For example, if the range is [0, 99] and the palette is
83 ``['red', 'green', 'blue']``, the values would be mapped as
84 follows::
85
86 x < 0 : 'red' # values < low are clamped
87 0 >= x < 33 : 'red'
88 33 >= x < 66 : 'green'
89 66 >= x < 99 : 'blue'
90 99 >= x : 'blue' # values > high are clamped
91
92 """
93
94
95 class LogColorMapper(ContinuousColorMapper):
96 """ Map numbers in a range [*low*, *high*] into a
97 sequence of colors (a palette) on a natural logarithm scale.
98
99 For example, if the range is [0, 25] and the palette is
100 ``['red', 'green', 'blue']``, the values would be mapped as
101 follows::
102
103 x < 0 : 'red' # values < low are clamped
104 0 >= x < 2.72 : 'red' # math.e ** 1
105 2.72 >= x < 7.39 : 'green' # math.e ** 2
106 7.39 >= x < 20.09 : 'blue' # math.e ** 3
107 20.09 >= x : 'blue' # values > high are clamped
108
109 .. warning::
110 The LogColorMapper only works for images with scalar values that are
111 non-negative.
112
113 """
114
[end of bokeh/models/mappers.py]
[start of examples/plotting/file/color_data_map.py]
1 import numpy as np
2
3 from bokeh.io import show
4 from bokeh.layouts import gridplot
5 from bokeh.models import (
6 ColumnDataSource,
7 ColorBar,
8 LinearColorMapper,
9 LogColorMapper,
10 )
11 from bokeh.palettes import Viridis3, Viridis256
12 from bokeh.plotting import figure
13
14 x = np.random.random(2500) * 100
15 y = np.random.normal(size=2500) * 2 + 5
16 source = ColumnDataSource(dict(x=x, y=y))
17 opts = dict(x='x', line_color=None, source=source)
18
19
20 def make_plot(mapper, title):
21 p = figure(toolbar_location=None, tools='', title=title)
22 color_bar = ColorBar(color_mapper=mapper, location=(0, 0))
23 p.circle(
24 x='x', y='y',
25 fill_color={'field': 'x', 'transform': mapper}, line_color=None,
26 source=source
27 )
28 p.add_layout(color_bar, 'right')
29 return p
30
31 p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')
32 p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')
33 p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')
34 p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')
35
36 show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))
37
[end of examples/plotting/file/color_data_map.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py
--- a/bokeh/models/mappers.py
+++ b/bokeh/models/mappers.py
@@ -74,6 +74,15 @@
this are clamped to ``high``.
""")
+ low_color = Color(default=None, help="""
+ Color to be used if data is lower than ``low`` value. If None,
+ values lower than ``low`` are mapped to the first color in the palette.
+ """)
+
+ high_color = Color(default=None, help="""
+ Color to be used if data is lower than ``high`` value. If None,
+ values lower than ``high`` are mapped to the last color in the palette.
+ """)
class LinearColorMapper(ContinuousColorMapper):
""" Map numbers in a range [*low*, *high*] linearly into a
@@ -92,6 +101,7 @@
"""
+
class LogColorMapper(ContinuousColorMapper):
""" Map numbers in a range [*low*, *high*] into a
sequence of colors (a palette) on a natural logarithm scale.
diff --git a/examples/plotting/file/color_data_map.py b/examples/plotting/file/color_data_map.py
--- a/examples/plotting/file/color_data_map.py
+++ b/examples/plotting/file/color_data_map.py
@@ -11,13 +11,15 @@
from bokeh.palettes import Viridis3, Viridis256
from bokeh.plotting import figure
-x = np.random.random(2500) * 100
+x = np.random.random(2500) * 140 - 20
y = np.random.normal(size=2500) * 2 + 5
source = ColumnDataSource(dict(x=x, y=y))
opts = dict(x='x', line_color=None, source=source)
def make_plot(mapper, title):
+ mapper.low_color = 'blue'
+ mapper.high_color = 'red'
p = figure(toolbar_location=None, tools='', title=title)
color_bar = ColorBar(color_mapper=mapper, location=(0, 0))
p.circle(
@@ -28,9 +30,9 @@
p.add_layout(color_bar, 'right')
return p
-p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')
-p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')
-p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')
-p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')
+p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red')
+p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red')
+p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red')
+p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red')
show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))
|
{"golden_diff": "diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py\n--- a/bokeh/models/mappers.py\n+++ b/bokeh/models/mappers.py\n@@ -74,6 +74,15 @@\n this are clamped to ``high``.\n \"\"\")\n \n+ low_color = Color(default=None, help=\"\"\"\n+ Color to be used if data is lower than ``low`` value. If None,\n+ values lower than ``low`` are mapped to the first color in the palette.\n+ \"\"\")\n+\n+ high_color = Color(default=None, help=\"\"\"\n+ Color to be used if data is lower than ``high`` value. If None,\n+ values lower than ``high`` are mapped to the last color in the palette.\n+ \"\"\")\n \n class LinearColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n@@ -92,6 +101,7 @@\n \"\"\"\n \n \n+\n class LogColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\ndiff --git a/examples/plotting/file/color_data_map.py b/examples/plotting/file/color_data_map.py\n--- a/examples/plotting/file/color_data_map.py\n+++ b/examples/plotting/file/color_data_map.py\n@@ -11,13 +11,15 @@\n from bokeh.palettes import Viridis3, Viridis256\n from bokeh.plotting import figure\n \n-x = np.random.random(2500) * 100\n+x = np.random.random(2500) * 140 - 20\n y = np.random.normal(size=2500) * 2 + 5\n source = ColumnDataSource(dict(x=x, y=y))\n opts = dict(x='x', line_color=None, source=source)\n \n \n def make_plot(mapper, title):\n+ mapper.low_color = 'blue'\n+ mapper.high_color = 'red'\n p = figure(toolbar_location=None, tools='', title=title)\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n p.circle(\n@@ -28,9 +30,9 @@\n p.add_layout(color_bar, 'right')\n return p\n \n-p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')\n-p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')\n-p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')\n-p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')\n+p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red')\n+p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red')\n+p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red')\n+p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red')\n \n show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))\n", "issue": "ColorMapping - color values out of high/low\n``` python\ndata = [-1, 0, np.NaN, 1, 2]\ncmap = LinearColorMapper(palette=[\"red\", \"green\"], low=0, high=1, nan_color=\u2018gray\u2019, high_color=\u2018orange\u2019, low_color=\u2018pink\u2019)\n# expect: [\u2018pink\u2019, \u2018red\u2019, \u2018gray\u2019, \u2018green\u2019, \u2018orange\u2019]\n```\n\nLogColorMapper should also receive this treatment.\n\n", "before_files": [{"content": "\"\"\" Models for mapping values from one range or space to another.\n\n\"\"\"\nfrom __future__ import absolute_import\nimport warnings\n\nfrom ..model import Model\nfrom ..core.properties import abstract\nfrom ..core.properties import Color, Enum, Seq, Either, String, Int, Float, Date, Datetime\nfrom ..core.enums import Palette\nfrom .. import palettes\n\n\n@abstract\nclass ColorMapper(Model):\n \"\"\" Base class for color mapper types. ``ColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n palette = Seq(Color, help=\"\"\"\n A sequence of colors to use as the target palette for mapping.\n\n This property can also be set as a ``String``, to the name of\n any of the palettes shown in :ref:`bokeh.palettes`.\n \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n\n nan_color = Color(default=\"gray\", help=\"\"\"\n Color to be used if data is NaN. Default: 'gray'\n \"\"\")\n\n def __init__(self, palette=None, **kwargs):\n if palette is not None:\n kwargs['palette'] = palette\n super(ColorMapper, self).__init__(**kwargs)\n\n\nclass CategoricalColorMapper(ColorMapper):\n \"\"\" Map categories to colors. Values that are passed to\n this mapper that aren't in factors will be assigned the nan_color.\n\n \"\"\"\n\n factors = Either(Seq(String), Seq(Int), Seq(Float), Seq(Datetime), Seq(Date), help=\"\"\"\n A sequence of factors / categories that map to the color palette.\n \"\"\")\n\n\n def __init__(self, **kwargs):\n super(ColorMapper, self).__init__(**kwargs)\n palette = self.palette\n factors = self.factors\n if palette and factors:\n if len(palette) < len(factors):\n extra_factors = factors[len(palette):]\n warnings.warn(\"\"\"Palette length does not match number of\nfactors. %s will be assigned to `nan_color` %s\"\"\" % (extra_factors, self.nan_color))\n\n\n@abstract\nclass ContinuousColorMapper(ColorMapper):\n \"\"\" Base class for cotinuous color mapper types. ``ContinuousColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n \"\"\")\n\n high = Float(help=\"\"\"\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n\n\nclass LinearColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n sequence of colors (a palette).\n\n For example, if the range is [0, 99] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 33 : 'red'\n 33 >= x < 66 : 'green'\n 66 >= x < 99 : 'blue'\n 99 >= x : 'blue' # values > high are clamped\n\n \"\"\"\n\n\nclass LogColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\n\n For example, if the range is [0, 25] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 2.72 : 'red' # math.e ** 1\n 2.72 >= x < 7.39 : 'green' # math.e ** 2\n 7.39 >= x < 20.09 : 'blue' # math.e ** 3\n 20.09 >= x : 'blue' # values > high are clamped\n\n .. warning::\n The LogColorMapper only works for images with scalar values that are\n non-negative.\n\n \"\"\"\n", "path": "bokeh/models/mappers.py"}, {"content": "import numpy as np\n\nfrom bokeh.io import show\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import (\n ColumnDataSource,\n ColorBar,\n LinearColorMapper,\n LogColorMapper,\n)\nfrom bokeh.palettes import Viridis3, Viridis256\nfrom bokeh.plotting import figure\n\nx = np.random.random(2500) * 100\ny = np.random.normal(size=2500) * 2 + 5\nsource = ColumnDataSource(dict(x=x, y=y))\nopts = dict(x='x', line_color=None, source=source)\n\n\ndef make_plot(mapper, title):\n p = figure(toolbar_location=None, tools='', title=title)\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n p.circle(\n x='x', y='y',\n fill_color={'field': 'x', 'transform': mapper}, line_color=None,\n source=source\n )\n p.add_layout(color_bar, 'right')\n return p\n\np1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')\np2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')\np3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')\np4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')\n\nshow(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))\n", "path": "examples/plotting/file/color_data_map.py"}]}
| 2,337 | 866 |
gh_patches_debug_28320
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3645
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type hints to compatibility.py
Adds type hints to src/toil/lib/compatibility.py so it can be checked under mypy during linting.
Refers to #3568.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-921)
┆Issue Number: TOIL-921
</issue>
<code>
[start of src/toil/lib/compatibility.py]
1 def compat_bytes(s):
2 return s.decode('utf-8') if isinstance(s, bytes) else s
3
[end of src/toil/lib/compatibility.py]
[start of contrib/admin/mypy-with-ignore.py]
1 #!/usr/bin/env python3
2 """
3 Runs mypy and ignores files that do not yet have passing type hints.
4
5 Does not type check test files (any path including "src/toil/test").
6 """
7 import os
8 import subprocess
9 import sys
10
11 os.environ['MYPYPATH'] = 'contrib/typeshed'
12 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
13 sys.path.insert(0, pkg_root) # noqa
14
15 from src.toil.lib.resources import glob # type: ignore
16
17
18 def main():
19 all_files_to_check = []
20 for d in ['dashboard', 'docker', 'docs', 'src']:
21 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))
22
23 # TODO: Remove these paths as typing is added and mypy conflicts are addressed
24 ignore_paths = [os.path.abspath(f) for f in [
25 'docker/Dockerfile.py',
26 'docs/conf.py',
27 'docs/vendor/sphinxcontrib/fulltoc.py',
28 'docs/vendor/sphinxcontrib/__init__.py',
29 'src/toil/job.py',
30 'src/toil/leader.py',
31 'src/toil/statsAndLogging.py',
32 'src/toil/common.py',
33 'src/toil/realtimeLogger.py',
34 'src/toil/worker.py',
35 'src/toil/serviceManager.py',
36 'src/toil/toilState.py',
37 'src/toil/__init__.py',
38 'src/toil/resource.py',
39 'src/toil/deferred.py',
40 'src/toil/version.py',
41 'src/toil/wdl/utils.py',
42 'src/toil/wdl/wdl_types.py',
43 'src/toil/wdl/wdl_synthesis.py',
44 'src/toil/wdl/wdl_analysis.py',
45 'src/toil/wdl/wdl_functions.py',
46 'src/toil/wdl/toilwdl.py',
47 'src/toil/wdl/versions/draft2.py',
48 'src/toil/wdl/versions/v1.py',
49 'src/toil/wdl/versions/dev.py',
50 'src/toil/provisioners/clusterScaler.py',
51 'src/toil/provisioners/abstractProvisioner.py',
52 'src/toil/provisioners/gceProvisioner.py',
53 'src/toil/provisioners/__init__.py',
54 'src/toil/provisioners/node.py',
55 'src/toil/provisioners/aws/boto2Context.py',
56 'src/toil/provisioners/aws/awsProvisioner.py',
57 'src/toil/provisioners/aws/__init__.py',
58 'src/toil/batchSystems/slurm.py',
59 'src/toil/batchSystems/gridengine.py',
60 'src/toil/batchSystems/singleMachine.py',
61 'src/toil/batchSystems/abstractBatchSystem.py',
62 'src/toil/batchSystems/parasol.py',
63 'src/toil/batchSystems/kubernetes.py',
64 'src/toil/batchSystems/torque.py',
65 'src/toil/batchSystems/options.py',
66 'src/toil/batchSystems/registry.py',
67 'src/toil/batchSystems/lsf.py',
68 'src/toil/batchSystems/__init__.py',
69 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',
70 'src/toil/batchSystems/lsfHelper.py',
71 'src/toil/batchSystems/htcondor.py',
72 'src/toil/batchSystems/mesos/batchSystem.py',
73 'src/toil/batchSystems/mesos/executor.py',
74 'src/toil/batchSystems/mesos/conftest.py',
75 'src/toil/batchSystems/mesos/__init__.py',
76 'src/toil/batchSystems/mesos/test/__init__.py',
77 'src/toil/cwl/conftest.py',
78 'src/toil/cwl/__init__.py',
79 'src/toil/cwl/cwltoil.py',
80 'src/toil/fileStores/cachingFileStore.py',
81 'src/toil/fileStores/abstractFileStore.py',
82 'src/toil/fileStores/nonCachingFileStore.py',
83 'src/toil/fileStores/__init__.py',
84 'src/toil/jobStores/utils.py',
85 'src/toil/jobStores/abstractJobStore.py',
86 'src/toil/jobStores/conftest.py',
87 'src/toil/jobStores/fileJobStore.py',
88 'src/toil/jobStores/__init__.py',
89 'src/toil/jobStores/googleJobStore.py',
90 'src/toil/jobStores/aws/utils.py',
91 'src/toil/jobStores/aws/jobStore.py',
92 'src/toil/jobStores/aws/__init__.py',
93 'src/toil/utils/toilDebugFile.py',
94 # 'src/toil/utils/toilStatus.py',
95 'src/toil/utils/toilStats.py',
96 'src/toil/utils/__init__.py',
97 'src/toil/utils/toilLaunchCluster.py',
98 'src/toil/lib/memoize.py',
99 'src/toil/lib/throttle.py',
100 'src/toil/lib/humanize.py',
101 'src/toil/lib/compatibility.py',
102 'src/toil/lib/iterables.py',
103 'src/toil/lib/bioio.py',
104 'src/toil/lib/ec2.py',
105 'src/toil/lib/ec2nodes.py',
106 'src/toil/lib/expando.py',
107 'src/toil/lib/threading.py',
108 'src/toil/lib/exceptions.py',
109 'src/toil/lib/__init__.py',
110 'src/toil/lib/generatedEC2Lists.py',
111 'src/toil/lib/retry.py',
112 'src/toil/lib/objects.py',
113 'src/toil/lib/io.py',
114 'src/toil/lib/docker.py',
115 # 'src/toil/lib/encryption/_nacl.py',
116 'src/toil/lib/encryption/_dummy.py',
117 'src/toil/lib/encryption/conftest.py',
118 'src/toil/lib/encryption/__init__.py',
119 'src/toil/lib/aws/utils.py',
120 'src/toil/lib/aws/__init__.py'
121 ]]
122
123 filtered_files_to_check = []
124 for file_path in all_files_to_check:
125 if file_path not in ignore_paths and 'src/toil/test' not in file_path:
126 filtered_files_to_check.append(file_path)
127 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?
128 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check
129 p = subprocess.run(args=args, stdout=subprocess.PIPE)
130 result = p.stdout.decode()
131 print(result)
132 if 'Success: no issues found' not in result:
133 exit(1)
134
135
136 if __name__ == '__main__':
137 main()
138
[end of contrib/admin/mypy-with-ignore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py
--- a/contrib/admin/mypy-with-ignore.py
+++ b/contrib/admin/mypy-with-ignore.py
@@ -91,14 +91,12 @@
'src/toil/jobStores/aws/jobStore.py',
'src/toil/jobStores/aws/__init__.py',
'src/toil/utils/toilDebugFile.py',
- # 'src/toil/utils/toilStatus.py',
'src/toil/utils/toilStats.py',
'src/toil/utils/__init__.py',
'src/toil/utils/toilLaunchCluster.py',
'src/toil/lib/memoize.py',
'src/toil/lib/throttle.py',
'src/toil/lib/humanize.py',
- 'src/toil/lib/compatibility.py',
'src/toil/lib/iterables.py',
'src/toil/lib/bioio.py',
'src/toil/lib/ec2.py',
@@ -112,7 +110,6 @@
'src/toil/lib/objects.py',
'src/toil/lib/io.py',
'src/toil/lib/docker.py',
- # 'src/toil/lib/encryption/_nacl.py',
'src/toil/lib/encryption/_dummy.py',
'src/toil/lib/encryption/conftest.py',
'src/toil/lib/encryption/__init__.py',
diff --git a/src/toil/lib/compatibility.py b/src/toil/lib/compatibility.py
--- a/src/toil/lib/compatibility.py
+++ b/src/toil/lib/compatibility.py
@@ -1,2 +1,4 @@
-def compat_bytes(s):
+from typing import Union
+
+def compat_bytes(s: Union[bytes, str]) -> str:
return s.decode('utf-8') if isinstance(s, bytes) else s
|
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -91,14 +91,12 @@\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n- # 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n- 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n@@ -112,7 +110,6 @@\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n- # 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\ndiff --git a/src/toil/lib/compatibility.py b/src/toil/lib/compatibility.py\n--- a/src/toil/lib/compatibility.py\n+++ b/src/toil/lib/compatibility.py\n@@ -1,2 +1,4 @@\n-def compat_bytes(s):\n+from typing import Union\n+\n+def compat_bytes(s: Union[bytes, str]) -> str:\n return s.decode('utf-8') if isinstance(s, bytes) else s\n", "issue": "Add type hints to compatibility.py\nAdds type hints to src/toil/lib/compatibility.py so it can be checked under mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-921)\n\u2506Issue Number: TOIL-921\n\n", "before_files": [{"content": "def compat_bytes(s):\n return s.decode('utf-8') if isinstance(s, bytes) else s\n", "path": "src/toil/lib/compatibility.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nos.environ['MYPYPATH'] = 'contrib/typeshed'\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n # 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n # 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
| 2,460 | 412 |
gh_patches_debug_19762
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4676
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
startproject only allows creating the project on an existing folder if the path is specified with an invalid module name
```
(venv) [adrian@afonsox temporal]$ mkdir existing_folder
(venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder
Error: Module 'existing_folder' already exists
(venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder .
New Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in:
/home/adrian/temporal/existing_folder
You can start your first spider with:
cd .
scrapy genspider example example.com
```
</issue>
<code>
[start of scrapy/commands/startproject.py]
1 import re
2 import os
3 import string
4 from importlib import import_module
5 from os.path import join, exists, abspath
6 from shutil import ignore_patterns, move, copy2, copystat
7 from stat import S_IWUSR as OWNER_WRITE_PERMISSION
8
9 import scrapy
10 from scrapy.commands import ScrapyCommand
11 from scrapy.utils.template import render_templatefile, string_camelcase
12 from scrapy.exceptions import UsageError
13
14
15 TEMPLATES_TO_RENDER = (
16 ('scrapy.cfg',),
17 ('${project_name}', 'settings.py.tmpl'),
18 ('${project_name}', 'items.py.tmpl'),
19 ('${project_name}', 'pipelines.py.tmpl'),
20 ('${project_name}', 'middlewares.py.tmpl'),
21 )
22
23 IGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')
24
25
26 def _make_writable(path):
27 current_permissions = os.stat(path).st_mode
28 os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)
29
30
31 class Command(ScrapyCommand):
32
33 requires_project = False
34 default_settings = {'LOG_ENABLED': False,
35 'SPIDER_LOADER_WARN_ONLY': True}
36
37 def syntax(self):
38 return "<project_name> [project_dir]"
39
40 def short_desc(self):
41 return "Create new project"
42
43 def _is_valid_name(self, project_name):
44 def _module_exists(module_name):
45 try:
46 import_module(module_name)
47 return True
48 except ImportError:
49 return False
50
51 if not re.search(r'^[_a-zA-Z]\w*$', project_name):
52 print('Error: Project names must begin with a letter and contain'
53 ' only\nletters, numbers and underscores')
54 elif _module_exists(project_name):
55 print(f'Error: Module {project_name!r} already exists')
56 else:
57 return True
58 return False
59
60 def _copytree(self, src, dst):
61 """
62 Since the original function always creates the directory, to resolve
63 the issue a new function had to be created. It's a simple copy and
64 was reduced for this case.
65
66 More info at:
67 https://github.com/scrapy/scrapy/pull/2005
68 """
69 ignore = IGNORE
70 names = os.listdir(src)
71 ignored_names = ignore(src, names)
72
73 if not os.path.exists(dst):
74 os.makedirs(dst)
75
76 for name in names:
77 if name in ignored_names:
78 continue
79
80 srcname = os.path.join(src, name)
81 dstname = os.path.join(dst, name)
82 if os.path.isdir(srcname):
83 self._copytree(srcname, dstname)
84 else:
85 copy2(srcname, dstname)
86 _make_writable(dstname)
87
88 copystat(src, dst)
89 _make_writable(dst)
90
91 def run(self, args, opts):
92 if len(args) not in (1, 2):
93 raise UsageError()
94
95 project_name = args[0]
96 project_dir = args[0]
97
98 if len(args) == 2:
99 project_dir = args[1]
100
101 if exists(join(project_dir, 'scrapy.cfg')):
102 self.exitcode = 1
103 print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}')
104 return
105
106 if not self._is_valid_name(project_name):
107 self.exitcode = 1
108 return
109
110 self._copytree(self.templates_dir, abspath(project_dir))
111 move(join(project_dir, 'module'), join(project_dir, project_name))
112 for paths in TEMPLATES_TO_RENDER:
113 path = join(*paths)
114 tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))
115 render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))
116 print(f"New Scrapy project '{project_name}', using template directory "
117 f"'{self.templates_dir}', created in:")
118 print(f" {abspath(project_dir)}\n")
119 print("You can start your first spider with:")
120 print(f" cd {project_dir}")
121 print(" scrapy genspider example example.com")
122
123 @property
124 def templates_dir(self):
125 return join(
126 self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),
127 'project'
128 )
129
[end of scrapy/commands/startproject.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py
--- a/scrapy/commands/startproject.py
+++ b/scrapy/commands/startproject.py
@@ -1,7 +1,7 @@
import re
import os
import string
-from importlib import import_module
+from importlib.util import find_spec
from os.path import join, exists, abspath
from shutil import ignore_patterns, move, copy2, copystat
from stat import S_IWUSR as OWNER_WRITE_PERMISSION
@@ -42,11 +42,8 @@
def _is_valid_name(self, project_name):
def _module_exists(module_name):
- try:
- import_module(module_name)
- return True
- except ImportError:
- return False
+ spec = find_spec(module_name)
+ return spec is not None and spec.loader is not None
if not re.search(r'^[_a-zA-Z]\w*$', project_name):
print('Error: Project names must begin with a letter and contain'
|
{"golden_diff": "diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py\n--- a/scrapy/commands/startproject.py\n+++ b/scrapy/commands/startproject.py\n@@ -1,7 +1,7 @@\n import re\n import os\n import string\n-from importlib import import_module\n+from importlib.util import find_spec\n from os.path import join, exists, abspath\n from shutil import ignore_patterns, move, copy2, copystat\n from stat import S_IWUSR as OWNER_WRITE_PERMISSION\n@@ -42,11 +42,8 @@\n \n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n- try:\n- import_module(module_name)\n- return True\n- except ImportError:\n- return False\n+ spec = find_spec(module_name)\n+ return spec is not None and spec.loader is not None\n \n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n", "issue": "startproject only allows creating the project on an existing folder if the path is specified with an invalid module name\n```\r\n(venv) [adrian@afonsox temporal]$ mkdir existing_folder\r\n(venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder\r\nError: Module 'existing_folder' already exists\r\n(venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder .\r\nNew Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in:\r\n /home/adrian/temporal/existing_folder\r\n\r\nYou can start your first spider with:\r\n cd .\r\n scrapy genspider example example.com\r\n```\n", "before_files": [{"content": "import re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\nfrom stat import S_IWUSR as OWNER_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')\n\n\ndef _make_writable(path):\n current_permissions = os.stat(path).st_mode\n os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False,\n 'SPIDER_LOADER_WARN_ONLY': True}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print(f'Error: Module {project_name!r} already exists')\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n _make_writable(dstname)\n\n copystat(src, dst)\n _make_writable(dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}')\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))\n print(f\"New Scrapy project '{project_name}', using template directory \"\n f\"'{self.templates_dir}', created in:\")\n print(f\" {abspath(project_dir)}\\n\")\n print(\"You can start your first spider with:\")\n print(f\" cd {project_dir}\")\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n return join(\n self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),\n 'project'\n )\n", "path": "scrapy/commands/startproject.py"}]}
| 1,908 | 227 |
gh_patches_debug_25061
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-813
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Flower hangs from version 5.4.0.
**Describe the bug**:
Flower hangs (no answer from http connection to browser) when a version >= 5.4.0 is installed
**To Reproduce**
1. pip install elastic-apm==5.4.0
2. restart flower and try to access
**Environment (please complete the following information)**
- OS: Ubuntu 18.04
- Python version: 3.6
- Framework and version: Django 2.2
- APM Server version: NA
- Agent version: 5.4.0+
</issue>
<code>
[start of elasticapm/instrumentation/packages/tornado.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 """
31 Instrumentation for Tornado
32 """
33 import elasticapm
34 from elasticapm.conf import constants
35 from elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule
36 from elasticapm.traces import capture_span
37 from elasticapm.utils.disttracing import TraceParent
38
39
40 class TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):
41 name = "tornado_request_execute"
42 creates_transactions = True
43 instrument_list = [("tornado.web", "RequestHandler._execute")]
44
45 async def call(self, module, method, wrapped, instance, args, kwargs):
46 # Late import to avoid ImportErrors
47 from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
48
49 request = instance.request
50 trace_parent = TraceParent.from_headers(request.headers)
51 client = instance.application.elasticapm_client
52 client.begin_transaction("request", trace_parent=trace_parent)
53 elasticapm.set_context(
54 lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), "request"
55 )
56 # TODO: Can we somehow incorporate the routing rule itself here?
57 elasticapm.set_transaction_name("{} {}".format(request.method, type(instance).__name__), override=False)
58
59 ret = await wrapped(*args, **kwargs)
60
61 elasticapm.set_context(
62 lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), "response"
63 )
64 result = "HTTP {}xx".format(instance.get_status() // 100)
65 elasticapm.set_transaction_result(result, override=False)
66 client.end_transaction()
67
68 return ret
69
70
71 class TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):
72 name = "tornado_handle_request_exception"
73
74 instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
75
76 def call(self, module, method, wrapped, instance, args, kwargs):
77
78 # Late import to avoid ImportErrors
79 from tornado.web import Finish, HTTPError
80 from elasticapm.contrib.tornado.utils import get_data_from_request
81
82 e = args[0]
83 if isinstance(e, Finish):
84 # Not an error; Finish is an exception that ends a request without an error response
85 return wrapped(*args, **kwargs)
86
87 client = instance.application.elasticapm_client
88 request = instance.request
89 client.capture_exception(
90 context={"request": get_data_from_request(instance, request, client.config, constants.ERROR)}
91 )
92 if isinstance(e, HTTPError):
93 elasticapm.set_transaction_result("HTTP {}xx".format(int(e.status_code / 100)), override=False)
94 elasticapm.set_context({"status_code": e.status_code}, "response")
95 else:
96 elasticapm.set_transaction_result("HTTP 5xx", override=False)
97 elasticapm.set_context({"status_code": 500}, "response")
98
99 return wrapped(*args, **kwargs)
100
101
102 class TornadoRenderInstrumentation(AbstractInstrumentedModule):
103 name = "tornado_render"
104
105 instrument_list = [("tornado.web", "RequestHandler.render")]
106
107 def call(self, module, method, wrapped, instance, args, kwargs):
108 if "template_name" in kwargs:
109 name = kwargs["template_name"]
110 else:
111 name = args[0]
112
113 with capture_span(name, span_type="template", span_subtype="tornado", span_action="render"):
114 return wrapped(*args, **kwargs)
115
[end of elasticapm/instrumentation/packages/tornado.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py
--- a/elasticapm/instrumentation/packages/tornado.py
+++ b/elasticapm/instrumentation/packages/tornado.py
@@ -43,6 +43,11 @@
instrument_list = [("tornado.web", "RequestHandler._execute")]
async def call(self, module, method, wrapped, instance, args, kwargs):
+ if not hasattr(instance.application, "elasticapm_client"):
+ # If tornado was instrumented but not as the main framework
+ # (i.e. in Flower), we should skip it.
+ return await wrapped(*args, **kwargs)
+
# Late import to avoid ImportErrors
from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response
@@ -74,6 +79,10 @@
instrument_list = [("tornado.web", "RequestHandler._handle_request_exception")]
def call(self, module, method, wrapped, instance, args, kwargs):
+ if not hasattr(instance.application, "elasticapm_client"):
+ # If tornado was instrumented but not as the main framework
+ # (i.e. in Flower), we should skip it.
+ return wrapped(*args, **kwargs)
# Late import to avoid ImportErrors
from tornado.web import Finish, HTTPError
|
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/tornado.py b/elasticapm/instrumentation/packages/tornado.py\n--- a/elasticapm/instrumentation/packages/tornado.py\n+++ b/elasticapm/instrumentation/packages/tornado.py\n@@ -43,6 +43,11 @@\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n \n async def call(self, module, method, wrapped, instance, args, kwargs):\n+ if not hasattr(instance.application, \"elasticapm_client\"):\n+ # If tornado was instrumented but not as the main framework\n+ # (i.e. in Flower), we should skip it.\n+ return await wrapped(*args, **kwargs)\n+\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n \n@@ -74,6 +79,10 @@\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n \n def call(self, module, method, wrapped, instance, args, kwargs):\n+ if not hasattr(instance.application, \"elasticapm_client\"):\n+ # If tornado was instrumented but not as the main framework\n+ # (i.e. in Flower), we should skip it.\n+ return wrapped(*args, **kwargs)\n \n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n", "issue": "Flower hangs from version 5.4.0.\n**Describe the bug**: \r\nFlower hangs (no answer from http connection to browser) when a version >= 5.4.0 is installed\r\n\r\n**To Reproduce**\r\n\r\n1. pip install elastic-apm==5.4.0\r\n2. restart flower and try to access\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Ubuntu 18.04\r\n- Python version: 3.6\r\n- Framework and version: Django 2.2\r\n- APM Server version: NA\r\n- Agent version: 5.4.0+ \r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nInstrumentation for Tornado\n\"\"\"\nimport elasticapm\nfrom elasticapm.conf import constants\nfrom elasticapm.instrumentation.packages.asyncio.base import AbstractInstrumentedModule, AsyncAbstractInstrumentedModule\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils.disttracing import TraceParent\n\n\nclass TornadoRequestExecuteInstrumentation(AsyncAbstractInstrumentedModule):\n name = \"tornado_request_execute\"\n creates_transactions = True\n instrument_list = [(\"tornado.web\", \"RequestHandler._execute\")]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n # Late import to avoid ImportErrors\n from elasticapm.contrib.tornado.utils import get_data_from_request, get_data_from_response\n\n request = instance.request\n trace_parent = TraceParent.from_headers(request.headers)\n client = instance.application.elasticapm_client\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n elasticapm.set_context(\n lambda: get_data_from_request(instance, request, client.config, constants.TRANSACTION), \"request\"\n )\n # TODO: Can we somehow incorporate the routing rule itself here?\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, type(instance).__name__), override=False)\n\n ret = await wrapped(*args, **kwargs)\n\n elasticapm.set_context(\n lambda: get_data_from_response(instance, client.config, constants.TRANSACTION), \"response\"\n )\n result = \"HTTP {}xx\".format(instance.get_status() // 100)\n elasticapm.set_transaction_result(result, override=False)\n client.end_transaction()\n\n return ret\n\n\nclass TornadoHandleRequestExceptionInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_handle_request_exception\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler._handle_request_exception\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n\n # Late import to avoid ImportErrors\n from tornado.web import Finish, HTTPError\n from elasticapm.contrib.tornado.utils import get_data_from_request\n\n e = args[0]\n if isinstance(e, Finish):\n # Not an error; Finish is an exception that ends a request without an error response\n return wrapped(*args, **kwargs)\n\n client = instance.application.elasticapm_client\n request = instance.request\n client.capture_exception(\n context={\"request\": get_data_from_request(instance, request, client.config, constants.ERROR)}\n )\n if isinstance(e, HTTPError):\n elasticapm.set_transaction_result(\"HTTP {}xx\".format(int(e.status_code / 100)), override=False)\n elasticapm.set_context({\"status_code\": e.status_code}, \"response\")\n else:\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n return wrapped(*args, **kwargs)\n\n\nclass TornadoRenderInstrumentation(AbstractInstrumentedModule):\n name = \"tornado_render\"\n\n instrument_list = [(\"tornado.web\", \"RequestHandler.render\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"template_name\" in kwargs:\n name = kwargs[\"template_name\"]\n else:\n name = args[0]\n\n with capture_span(name, span_type=\"template\", span_subtype=\"tornado\", span_action=\"render\"):\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/tornado.py"}]}
| 1,998 | 310 |
gh_patches_debug_37753
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmpose-948
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
False 'segmentation' labeled in 'macaque_train/test.json'
In general, the macaque dataset is missing parts of segmentation.
猴子的数据集:每当身体位置是分开的时候,‘'segmentation’ 的标注都有问题。我check了原始csv标注数据,是正确的;是你们制作的“macaque_train/test.json”出现了偏差。

</issue>
<code>
[start of tools/dataset/parse_macaquepose_dataset.py]
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import csv
3 import json
4 import os
5 import time
6
7 import cv2
8 import numpy as np
9
10 np.random.seed(0)
11
12
13 def PolyArea(x, y):
14 """Calculate area of polygon given (x,y) coordinates (Shoelace formula)
15
16 :param x: np.ndarray(N, )
17 :param y: np.ndarray(N, )
18 :return: area
19 """
20 return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
21
22
23 def save_coco_anno(data_annotation,
24 img_root,
25 save_path,
26 start_img_id=0,
27 start_ann_id=0,
28 kpt_num=17):
29 """Save annotations in coco-format.
30
31 :param data_annotation: list of data annotation.
32 :param img_root: the root dir to load images.
33 :param save_path: the path to save transformed annotation file.
34 :param start_img_id: the starting point to count the image id.
35 :param start_ann_id: the starting point to count the annotation id.
36 :param kpt_num: the number of keypoint.
37 """
38 images = []
39 annotations = []
40
41 img_id = start_img_id
42 ann_id = start_ann_id
43
44 for i in range(0, len(data_annotation)):
45 data_anno = data_annotation[i]
46 image_name = data_anno[0]
47
48 img = cv2.imread(os.path.join(img_root, image_name))
49
50 kp_string = data_anno[1]
51 kps = json.loads(kp_string)
52
53 seg_string = data_anno[2]
54 segs = json.loads(seg_string)
55
56 for kp, seg in zip(kps, segs):
57 keypoints = np.zeros([kpt_num, 3])
58 for ind, p in enumerate(kp):
59 if p['position'] is None:
60 continue
61 else:
62 keypoints[ind, 0] = p['position'][0]
63 keypoints[ind, 1] = p['position'][1]
64 keypoints[ind, 2] = 2
65
66 segmentation = np.array(seg[0]['segment'])
67 max_x, max_y = segmentation.max(0)
68 min_x, min_y = segmentation.min(0)
69
70 anno = {}
71 anno['keypoints'] = keypoints.reshape(-1).tolist()
72 anno['image_id'] = img_id
73 anno['id'] = ann_id
74 anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0))
75 anno['bbox'] = [
76 float(min_x),
77 float(min_y),
78 float(max_x - min_x + 1),
79 float(max_y - min_y + 1)
80 ]
81 anno['iscrowd'] = 0
82 anno['area'] = float(
83 PolyArea(segmentation[:, 0], segmentation[:, 1]))
84 anno['category_id'] = 1
85 anno['segmentation'] = segmentation.reshape([1, -1]).tolist()
86
87 annotations.append(anno)
88 ann_id += 1
89
90 image = {}
91 image['id'] = img_id
92 image['file_name'] = image_name
93 image['height'] = img.shape[0]
94 image['width'] = img.shape[1]
95
96 images.append(image)
97 img_id += 1
98
99 cocotype = {}
100
101 cocotype['info'] = {}
102 cocotype['info']['description'] = 'MacaquePose Generated by MMPose Team'
103 cocotype['info']['version'] = '1.0'
104 cocotype['info']['year'] = time.strftime('%Y', time.localtime())
105 cocotype['info']['date_created'] = time.strftime('%Y/%m/%d',
106 time.localtime())
107
108 cocotype['images'] = images
109 cocotype['annotations'] = annotations
110 cocotype['categories'] = [{
111 'supercategory':
112 'animal',
113 'id':
114 1,
115 'name':
116 'macaque',
117 'keypoints': [
118 'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',
119 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',
120 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',
121 'right_knee', 'left_ankle', 'right_ankle'
122 ],
123 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],
124 [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],
125 [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]
126 }]
127
128 os.makedirs(os.path.dirname(save_path), exist_ok=True)
129 json.dump(cocotype, open(save_path, 'w'), indent=4)
130 print('number of images:', img_id)
131 print('number of annotations:', ann_id)
132 print(f'done {save_path}')
133
134
135 dataset_dir = '/data/macaque/'
136
137 with open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp:
138 data_annotation_all = list(csv.reader(fp, delimiter=','))[1:]
139
140 np.random.shuffle(data_annotation_all)
141
142 data_annotation_train = data_annotation_all[0:12500]
143 data_annotation_val = data_annotation_all[12500:]
144
145 img_root = os.path.join(dataset_dir, 'images')
146 save_coco_anno(
147 data_annotation_train,
148 img_root,
149 os.path.join(dataset_dir, 'annotations', 'macaque_train.json'),
150 kpt_num=17)
151 save_coco_anno(
152 data_annotation_val,
153 img_root,
154 os.path.join(dataset_dir, 'annotations', 'macaque_test.json'),
155 start_img_id=12500,
156 start_ann_id=15672,
157 kpt_num=17)
158
[end of tools/dataset/parse_macaquepose_dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tools/dataset/parse_macaquepose_dataset.py b/tools/dataset/parse_macaquepose_dataset.py
--- a/tools/dataset/parse_macaquepose_dataset.py
+++ b/tools/dataset/parse_macaquepose_dataset.py
@@ -10,14 +10,22 @@
np.random.seed(0)
-def PolyArea(x, y):
+def get_poly_area(x, y):
"""Calculate area of polygon given (x,y) coordinates (Shoelace formula)
:param x: np.ndarray(N, )
:param y: np.ndarray(N, )
:return: area
"""
- return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
+ return float(0.5 *
+ np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))))
+
+
+def get_seg_area(segmentations):
+ area = 0
+ for segmentation in segmentations:
+ area += get_poly_area(segmentation[:, 0], segmentation[:, 1])
+ return area
def save_coco_anno(data_annotation,
@@ -63,9 +71,26 @@
keypoints[ind, 1] = p['position'][1]
keypoints[ind, 2] = 2
- segmentation = np.array(seg[0]['segment'])
- max_x, max_y = segmentation.max(0)
- min_x, min_y = segmentation.min(0)
+ segmentations = []
+
+ max_x = -1
+ max_y = -1
+ min_x = 999999
+ min_y = 999999
+ for segm in seg:
+ if len(segm['segment']) == 0:
+ continue
+
+ segmentation = np.array(segm['segment'])
+ segmentations.append(segmentation)
+
+ _max_x, _max_y = segmentation.max(0)
+ _min_x, _min_y = segmentation.min(0)
+
+ max_x = max(max_x, _max_x)
+ max_y = max(max_y, _max_y)
+ min_x = min(min_x, _min_x)
+ min_y = min(min_y, _min_y)
anno = {}
anno['keypoints'] = keypoints.reshape(-1).tolist()
@@ -79,10 +104,11 @@
float(max_y - min_y + 1)
]
anno['iscrowd'] = 0
- anno['area'] = float(
- PolyArea(segmentation[:, 0], segmentation[:, 1]))
+ anno['area'] = get_seg_area(segmentations)
anno['category_id'] = 1
- anno['segmentation'] = segmentation.reshape([1, -1]).tolist()
+ anno['segmentation'] = [
+ seg.reshape(-1).tolist() for seg in segmentations
+ ]
annotations.append(anno)
ann_id += 1
@@ -133,7 +159,6 @@
dataset_dir = '/data/macaque/'
-
with open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp:
data_annotation_all = list(csv.reader(fp, delimiter=','))[1:]
|
{"golden_diff": "diff --git a/tools/dataset/parse_macaquepose_dataset.py b/tools/dataset/parse_macaquepose_dataset.py\n--- a/tools/dataset/parse_macaquepose_dataset.py\n+++ b/tools/dataset/parse_macaquepose_dataset.py\n@@ -10,14 +10,22 @@\n np.random.seed(0)\n \n \n-def PolyArea(x, y):\n+def get_poly_area(x, y):\n \"\"\"Calculate area of polygon given (x,y) coordinates (Shoelace formula)\n \n :param x: np.ndarray(N, )\n :param y: np.ndarray(N, )\n :return: area\n \"\"\"\n- return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n+ return float(0.5 *\n+ np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))))\n+\n+\n+def get_seg_area(segmentations):\n+ area = 0\n+ for segmentation in segmentations:\n+ area += get_poly_area(segmentation[:, 0], segmentation[:, 1])\n+ return area\n \n \n def save_coco_anno(data_annotation,\n@@ -63,9 +71,26 @@\n keypoints[ind, 1] = p['position'][1]\n keypoints[ind, 2] = 2\n \n- segmentation = np.array(seg[0]['segment'])\n- max_x, max_y = segmentation.max(0)\n- min_x, min_y = segmentation.min(0)\n+ segmentations = []\n+\n+ max_x = -1\n+ max_y = -1\n+ min_x = 999999\n+ min_y = 999999\n+ for segm in seg:\n+ if len(segm['segment']) == 0:\n+ continue\n+\n+ segmentation = np.array(segm['segment'])\n+ segmentations.append(segmentation)\n+\n+ _max_x, _max_y = segmentation.max(0)\n+ _min_x, _min_y = segmentation.min(0)\n+\n+ max_x = max(max_x, _max_x)\n+ max_y = max(max_y, _max_y)\n+ min_x = min(min_x, _min_x)\n+ min_y = min(min_y, _min_y)\n \n anno = {}\n anno['keypoints'] = keypoints.reshape(-1).tolist()\n@@ -79,10 +104,11 @@\n float(max_y - min_y + 1)\n ]\n anno['iscrowd'] = 0\n- anno['area'] = float(\n- PolyArea(segmentation[:, 0], segmentation[:, 1]))\n+ anno['area'] = get_seg_area(segmentations)\n anno['category_id'] = 1\n- anno['segmentation'] = segmentation.reshape([1, -1]).tolist()\n+ anno['segmentation'] = [\n+ seg.reshape(-1).tolist() for seg in segmentations\n+ ]\n \n annotations.append(anno)\n ann_id += 1\n@@ -133,7 +159,6 @@\n \n \n dataset_dir = '/data/macaque/'\n-\n with open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp:\n data_annotation_all = list(csv.reader(fp, delimiter=','))[1:]\n", "issue": "False 'segmentation' labeled in 'macaque_train/test.json'\nIn general, the macaque dataset is missing parts of segmentation.\r\n\r\n\u7334\u5b50\u7684\u6570\u636e\u96c6\uff1a\u6bcf\u5f53\u8eab\u4f53\u4f4d\u7f6e\u662f\u5206\u5f00\u7684\u65f6\u5019\uff0c\u2018'segmentation\u2019 \u7684\u6807\u6ce8\u90fd\u6709\u95ee\u9898\u3002\u6211check\u4e86\u539f\u59cbcsv\u6807\u6ce8\u6570\u636e\uff0c\u662f\u6b63\u786e\u7684\uff1b\u662f\u4f60\u4eec\u5236\u4f5c\u7684\u201cmacaque_train/test.json\u201d\u51fa\u73b0\u4e86\u504f\u5dee\u3002\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport csv\nimport json\nimport os\nimport time\n\nimport cv2\nimport numpy as np\n\nnp.random.seed(0)\n\n\ndef PolyArea(x, y):\n \"\"\"Calculate area of polygon given (x,y) coordinates (Shoelace formula)\n\n :param x: np.ndarray(N, )\n :param y: np.ndarray(N, )\n :return: area\n \"\"\"\n return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n\ndef save_coco_anno(data_annotation,\n img_root,\n save_path,\n start_img_id=0,\n start_ann_id=0,\n kpt_num=17):\n \"\"\"Save annotations in coco-format.\n\n :param data_annotation: list of data annotation.\n :param img_root: the root dir to load images.\n :param save_path: the path to save transformed annotation file.\n :param start_img_id: the starting point to count the image id.\n :param start_ann_id: the starting point to count the annotation id.\n :param kpt_num: the number of keypoint.\n \"\"\"\n images = []\n annotations = []\n\n img_id = start_img_id\n ann_id = start_ann_id\n\n for i in range(0, len(data_annotation)):\n data_anno = data_annotation[i]\n image_name = data_anno[0]\n\n img = cv2.imread(os.path.join(img_root, image_name))\n\n kp_string = data_anno[1]\n kps = json.loads(kp_string)\n\n seg_string = data_anno[2]\n segs = json.loads(seg_string)\n\n for kp, seg in zip(kps, segs):\n keypoints = np.zeros([kpt_num, 3])\n for ind, p in enumerate(kp):\n if p['position'] is None:\n continue\n else:\n keypoints[ind, 0] = p['position'][0]\n keypoints[ind, 1] = p['position'][1]\n keypoints[ind, 2] = 2\n\n segmentation = np.array(seg[0]['segment'])\n max_x, max_y = segmentation.max(0)\n min_x, min_y = segmentation.min(0)\n\n anno = {}\n anno['keypoints'] = keypoints.reshape(-1).tolist()\n anno['image_id'] = img_id\n anno['id'] = ann_id\n anno['num_keypoints'] = int(sum(keypoints[:, 2] > 0))\n anno['bbox'] = [\n float(min_x),\n float(min_y),\n float(max_x - min_x + 1),\n float(max_y - min_y + 1)\n ]\n anno['iscrowd'] = 0\n anno['area'] = float(\n PolyArea(segmentation[:, 0], segmentation[:, 1]))\n anno['category_id'] = 1\n anno['segmentation'] = segmentation.reshape([1, -1]).tolist()\n\n annotations.append(anno)\n ann_id += 1\n\n image = {}\n image['id'] = img_id\n image['file_name'] = image_name\n image['height'] = img.shape[0]\n image['width'] = img.shape[1]\n\n images.append(image)\n img_id += 1\n\n cocotype = {}\n\n cocotype['info'] = {}\n cocotype['info']['description'] = 'MacaquePose Generated by MMPose Team'\n cocotype['info']['version'] = '1.0'\n cocotype['info']['year'] = time.strftime('%Y', time.localtime())\n cocotype['info']['date_created'] = time.strftime('%Y/%m/%d',\n time.localtime())\n\n cocotype['images'] = images\n cocotype['annotations'] = annotations\n cocotype['categories'] = [{\n 'supercategory':\n 'animal',\n 'id':\n 1,\n 'name':\n 'macaque',\n 'keypoints': [\n 'nose', 'left_eye', 'right_eye', 'left_ear', 'right_ear',\n 'left_shoulder', 'right_shoulder', 'left_elbow', 'right_elbow',\n 'left_wrist', 'right_wrist', 'left_hip', 'right_hip', 'left_knee',\n 'right_knee', 'left_ankle', 'right_ankle'\n ],\n 'skeleton': [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12],\n [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3],\n [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]]\n }]\n\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n json.dump(cocotype, open(save_path, 'w'), indent=4)\n print('number of images:', img_id)\n print('number of annotations:', ann_id)\n print(f'done {save_path}')\n\n\ndataset_dir = '/data/macaque/'\n\nwith open(os.path.join(dataset_dir, 'annotations.csv'), 'r') as fp:\n data_annotation_all = list(csv.reader(fp, delimiter=','))[1:]\n\nnp.random.shuffle(data_annotation_all)\n\ndata_annotation_train = data_annotation_all[0:12500]\ndata_annotation_val = data_annotation_all[12500:]\n\nimg_root = os.path.join(dataset_dir, 'images')\nsave_coco_anno(\n data_annotation_train,\n img_root,\n os.path.join(dataset_dir, 'annotations', 'macaque_train.json'),\n kpt_num=17)\nsave_coco_anno(\n data_annotation_val,\n img_root,\n os.path.join(dataset_dir, 'annotations', 'macaque_test.json'),\n start_img_id=12500,\n start_ann_id=15672,\n kpt_num=17)\n", "path": "tools/dataset/parse_macaquepose_dataset.py"}]}
| 2,425 | 745 |
gh_patches_debug_11264
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-5758
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception when deleting resource if datastore table should exist but does not
**2.8.4**
**Describe the bug**
If for whatever reason, you end up with a resource for which datastore_active is set in the resource extras, but the datastore table does not actually exist, an exception is thown when trying to delete this resource.
**Steps to reproduce**
1. Create a resource and make sure data is uploaded to the datastore
2. Manually delete the database table of this resource from the database
3. Try to delete this resource via the ckan UI
4. An exception is thrown
**Expected behavior**
Before deleting, check whether the datastore table actually exists. If it doesn't exist, just skip the delete step. Better than throwing an exception.
**Additional details**
Not sure how I managed to get into this inconsistent state. Might not even be CKAN's fault since we had some issues with our persistence infrastructure/volumes.
Stack trace here:
```
File '/srv/app/src/ckan/ckan/controllers/package.py', line 1175 in resource_delete
get_action('resource_delete')(context, {'id': resource_id})
File '/srv/app/src/ckan/ckan/logic/__init__.py', line 466 in wrapped
result = _action(context, data_dict, **kw)
File '/srv/app/src/ckan/ckan/logic/action/delete.py', line 204 in resource_delete
plugin.after_delete(context, pkg_dict.get('resources', []))
File '/srv/app/src/ckan/ckanext/datastore/plugin.py', line 161 in after_delete
'resource_id': res.id,
File '/srv/app/src/ckan/ckanext/datastore/backend/postgres.py', line 1720 in delete
data_dict['resource_id'])
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 939 in execute
return self._execute_text(object, multiparams, params)
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1097 in _execute_text
statement, parameters
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1189 in _execute_context
context)
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1402 in _handle_dbapi_exception
exc_info
File '/usr/lib/python2.7/site-packages/sqlalchemy/util/compat.py', line 203 in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1182 in _execute_context
context)
File '/usr/lib/python2.7/site-packages/sqlalchemy/engine/default.py', line 470 in do_execute
cursor.execute(statement, parameters)
ProgrammingError: (psycopg2.ProgrammingError) table "f03c4532-bc47-4ca0-bf73-f96e42082f49" does not exist
[SQL: 'DROP TABLE "f03c4532-bc47-4ca0-bf73-f96e42082f49" CASCADE']
```
I will provide a pull request.
</issue>
<code>
[start of ckanext/datastore/plugin.py]
1 # encoding: utf-8
2
3 import logging
4
5 from six import string_types
6
7 import ckan.plugins as p
8 import ckan.logic as logic
9 import ckan.model as model
10 from ckan.model.core import State
11
12 import ckanext.datastore.helpers as datastore_helpers
13 import ckanext.datastore.logic.action as action
14 import ckanext.datastore.logic.auth as auth
15 import ckanext.datastore.interfaces as interfaces
16 from ckanext.datastore.backend import (
17 DatastoreException,
18 _parse_sort_clause,
19 DatastoreBackend
20 )
21 from ckanext.datastore.backend.postgres import DatastorePostgresqlBackend
22 import ckanext.datastore.blueprint as view
23
24 log = logging.getLogger(__name__)
25 _get_or_bust = logic.get_or_bust
26
27 DEFAULT_FORMATS = []
28
29 ValidationError = p.toolkit.ValidationError
30
31
32 class DatastorePlugin(p.SingletonPlugin):
33 p.implements(p.IConfigurable, inherit=True)
34 p.implements(p.IConfigurer)
35 p.implements(p.IActions)
36 p.implements(p.IAuthFunctions)
37 p.implements(p.IRoutes, inherit=True)
38 p.implements(p.IResourceController, inherit=True)
39 p.implements(p.ITemplateHelpers)
40 p.implements(p.IForkObserver, inherit=True)
41 p.implements(interfaces.IDatastore, inherit=True)
42 p.implements(interfaces.IDatastoreBackend, inherit=True)
43 p.implements(p.IBlueprint)
44
45 resource_show_action = None
46
47 def __new__(cls, *args, **kwargs):
48 idatastore_extensions = p.PluginImplementations(interfaces.IDatastore)
49 idatastore_extensions = idatastore_extensions.extensions()
50
51 if idatastore_extensions and idatastore_extensions[0].__class__ != cls:
52 msg = ('The "datastore" plugin must be the first IDatastore '
53 'plugin loaded. Change the order it is loaded in '
54 '"ckan.plugins" in your CKAN .ini file and try again.')
55 raise DatastoreException(msg)
56
57 return super(cls, cls).__new__(cls, *args, **kwargs)
58
59 # IDatastoreBackend
60
61 def register_backends(self):
62 return {
63 'postgresql': DatastorePostgresqlBackend,
64 'postgres': DatastorePostgresqlBackend,
65 }
66
67 # IConfigurer
68
69 def update_config(self, config):
70 DatastoreBackend.register_backends()
71 DatastoreBackend.set_active_backend(config)
72
73 templates_base = config.get('ckan.base_templates_folder')
74
75 p.toolkit.add_template_directory(config, templates_base)
76 self.backend = DatastoreBackend.get_active_backend()
77
78 # IConfigurable
79
80 def configure(self, config):
81 self.config = config
82 self.backend.configure(config)
83
84 # IActions
85
86 def get_actions(self):
87 actions = {
88 'datastore_create': action.datastore_create,
89 'datastore_upsert': action.datastore_upsert,
90 'datastore_delete': action.datastore_delete,
91 'datastore_search': action.datastore_search,
92 'datastore_info': action.datastore_info,
93 'datastore_function_create': action.datastore_function_create,
94 'datastore_function_delete': action.datastore_function_delete,
95 'datastore_run_triggers': action.datastore_run_triggers,
96 }
97 if getattr(self.backend, 'enable_sql_search', False):
98 # Only enable search_sql if the config/backend does not disable it
99 actions.update({
100 'datastore_search_sql': action.datastore_search_sql,
101 })
102 return actions
103
104 # IAuthFunctions
105
106 def get_auth_functions(self):
107 return {
108 'datastore_create': auth.datastore_create,
109 'datastore_upsert': auth.datastore_upsert,
110 'datastore_delete': auth.datastore_delete,
111 'datastore_info': auth.datastore_info,
112 'datastore_search': auth.datastore_search,
113 'datastore_search_sql': auth.datastore_search_sql,
114 'datastore_change_permissions': auth.datastore_change_permissions,
115 'datastore_function_create': auth.datastore_function_create,
116 'datastore_function_delete': auth.datastore_function_delete,
117 'datastore_run_triggers': auth.datastore_run_triggers,
118 }
119
120 # IResourceController
121
122 def before_show(self, resource_dict):
123 # Modify the resource url of datastore resources so that
124 # they link to the datastore dumps.
125 if resource_dict.get('url_type') == 'datastore':
126 resource_dict['url'] = p.toolkit.url_for(
127 'datastore.dump', resource_id=resource_dict['id'],
128 qualified=True)
129
130 if 'datastore_active' not in resource_dict:
131 resource_dict[u'datastore_active'] = False
132
133 return resource_dict
134
135 def after_delete(self, context, resources):
136 model = context['model']
137 pkg = context['package']
138 res_query = model.Session.query(model.Resource)
139 query = res_query.filter(
140 model.Resource.package_id == pkg.id,
141 model.Resource.state == State.DELETED
142 )
143 deleted = [
144 res for res in query.all()
145 if res.extras.get('datastore_active') is True]
146
147 for res in deleted:
148 self.backend.delete(context, {
149 'resource_id': res.id,
150 })
151 res.extras['datastore_active'] = False
152 res_query.filter_by(id=res.id).update(
153 {'extras': res.extras}, synchronize_session=False)
154
155 # IDatastore
156
157 def datastore_validate(self, context, data_dict, fields_types):
158 column_names = list(fields_types.keys())
159
160 filters = data_dict.get('filters', {})
161 for key in list(filters.keys()):
162 if key in fields_types:
163 del filters[key]
164
165 q = data_dict.get('q')
166 if q:
167 if isinstance(q, string_types):
168 del data_dict['q']
169 column_names.append(u'rank')
170 elif isinstance(q, dict):
171 for key in list(q.keys()):
172 if key in fields_types and isinstance(q[key],
173 string_types):
174 column_names.append(u'rank ' + key)
175 del q[key]
176
177 fields = data_dict.get('fields')
178 if fields:
179 data_dict['fields'] = list(set(fields) - set(column_names))
180
181 language = data_dict.get('language')
182 if language:
183 if isinstance(language, string_types):
184 del data_dict['language']
185
186 plain = data_dict.get('plain')
187 if plain:
188 if isinstance(plain, bool):
189 del data_dict['plain']
190
191 distinct = data_dict.get('distinct')
192 if distinct:
193 if isinstance(distinct, bool):
194 del data_dict['distinct']
195
196 sort_clauses = data_dict.get('sort')
197 if sort_clauses:
198 invalid_clauses = [
199 c for c in sort_clauses
200 if not _parse_sort_clause(
201 c, fields_types
202 )
203 ]
204 data_dict['sort'] = invalid_clauses
205
206 limit = data_dict.get('limit')
207 if limit:
208 is_positive_int = datastore_helpers.validate_int(limit,
209 non_negative=True)
210 is_all = isinstance(limit, string_types) and limit.lower() == 'all'
211 if is_positive_int or is_all:
212 del data_dict['limit']
213
214 offset = data_dict.get('offset')
215 if offset:
216 is_positive_int = datastore_helpers.validate_int(offset,
217 non_negative=True)
218 if is_positive_int:
219 del data_dict['offset']
220
221 return data_dict
222
223 def datastore_delete(self, context, data_dict, fields_types, query_dict):
224 hook = getattr(self.backend, 'datastore_delete', None)
225 if hook:
226 query_dict = hook(context, data_dict, fields_types, query_dict)
227 return query_dict
228
229 def datastore_search(self, context, data_dict, fields_types, query_dict):
230 hook = getattr(self.backend, 'datastore_search', None)
231 if hook:
232 query_dict = hook(context, data_dict, fields_types, query_dict)
233 return query_dict
234
235 def get_helpers(self):
236 return {
237 'datastore_dictionary': datastore_helpers.datastore_dictionary}
238
239 # IForkObserver
240
241 def before_fork(self):
242 try:
243 before_fork = self.backend.before_fork
244 except AttributeError:
245 pass
246 else:
247 before_fork()
248
249 # IBlueprint
250
251 def get_blueprint(self):
252 u'''Return a Flask Blueprint object to be registered by the app.'''
253
254 return view.datastore
255
[end of ckanext/datastore/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -145,9 +145,10 @@
if res.extras.get('datastore_active') is True]
for res in deleted:
- self.backend.delete(context, {
- 'resource_id': res.id,
- })
+ if self.backend.resource_exists(res.id):
+ self.backend.delete(context, {
+ 'resource_id': res.id,
+ })
res.extras['datastore_active'] = False
res_query.filter_by(id=res.id).update(
{'extras': res.extras}, synchronize_session=False)
|
{"golden_diff": "diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py\n--- a/ckanext/datastore/plugin.py\n+++ b/ckanext/datastore/plugin.py\n@@ -145,9 +145,10 @@\n if res.extras.get('datastore_active') is True]\n \n for res in deleted:\n- self.backend.delete(context, {\n- 'resource_id': res.id,\n- })\n+ if self.backend.resource_exists(res.id):\n+ self.backend.delete(context, {\n+ 'resource_id': res.id,\n+ })\n res.extras['datastore_active'] = False\n res_query.filter_by(id=res.id).update(\n {'extras': res.extras}, synchronize_session=False)\n", "issue": "Exception when deleting resource if datastore table should exist but does not\n**2.8.4**\r\n\r\n**Describe the bug**\r\nIf for whatever reason, you end up with a resource for which datastore_active is set in the resource extras, but the datastore table does not actually exist, an exception is thown when trying to delete this resource.\r\n\r\n**Steps to reproduce**\r\n1. Create a resource and make sure data is uploaded to the datastore\r\n2. Manually delete the database table of this resource from the database\r\n3. Try to delete this resource via the ckan UI\r\n4. An exception is thrown\r\n\r\n**Expected behavior**\r\nBefore deleting, check whether the datastore table actually exists. If it doesn't exist, just skip the delete step. Better than throwing an exception.\r\n\r\n**Additional details**\r\nNot sure how I managed to get into this inconsistent state. Might not even be CKAN's fault since we had some issues with our persistence infrastructure/volumes.\r\n\r\nStack trace here:\r\n```\r\nFile '/srv/app/src/ckan/ckan/controllers/package.py', line 1175 in resource_delete\r\n get_action('resource_delete')(context, {'id': resource_id})\r\nFile '/srv/app/src/ckan/ckan/logic/__init__.py', line 466 in wrapped\r\n result = _action(context, data_dict, **kw)\r\nFile '/srv/app/src/ckan/ckan/logic/action/delete.py', line 204 in resource_delete\r\n plugin.after_delete(context, pkg_dict.get('resources', []))\r\nFile '/srv/app/src/ckan/ckanext/datastore/plugin.py', line 161 in after_delete\r\n 'resource_id': res.id,\r\nFile '/srv/app/src/ckan/ckanext/datastore/backend/postgres.py', line 1720 in delete\r\n data_dict['resource_id'])\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 939 in execute\r\n return self._execute_text(object, multiparams, params)\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1097 in _execute_text\r\n statement, parameters\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1189 in _execute_context\r\n context)\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1402 in _handle_dbapi_exception\r\n exc_info\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/util/compat.py', line 203 in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/base.py', line 1182 in _execute_context\r\n context)\r\nFile '/usr/lib/python2.7/site-packages/sqlalchemy/engine/default.py', line 470 in do_execute\r\n cursor.execute(statement, parameters)\r\nProgrammingError: (psycopg2.ProgrammingError) table \"f03c4532-bc47-4ca0-bf73-f96e42082f49\" does not exist\r\n [SQL: 'DROP TABLE \"f03c4532-bc47-4ca0-bf73-f96e42082f49\" CASCADE']\r\n```\r\n\r\nI will provide a pull request.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport logging\n\nfrom six import string_types\n\nimport ckan.plugins as p\nimport ckan.logic as logic\nimport ckan.model as model\nfrom ckan.model.core import State\n\nimport ckanext.datastore.helpers as datastore_helpers\nimport ckanext.datastore.logic.action as action\nimport ckanext.datastore.logic.auth as auth\nimport ckanext.datastore.interfaces as interfaces\nfrom ckanext.datastore.backend import (\n DatastoreException,\n _parse_sort_clause,\n DatastoreBackend\n)\nfrom ckanext.datastore.backend.postgres import DatastorePostgresqlBackend\nimport ckanext.datastore.blueprint as view\n\nlog = logging.getLogger(__name__)\n_get_or_bust = logic.get_or_bust\n\nDEFAULT_FORMATS = []\n\nValidationError = p.toolkit.ValidationError\n\n\nclass DatastorePlugin(p.SingletonPlugin):\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IConfigurer)\n p.implements(p.IActions)\n p.implements(p.IAuthFunctions)\n p.implements(p.IRoutes, inherit=True)\n p.implements(p.IResourceController, inherit=True)\n p.implements(p.ITemplateHelpers)\n p.implements(p.IForkObserver, inherit=True)\n p.implements(interfaces.IDatastore, inherit=True)\n p.implements(interfaces.IDatastoreBackend, inherit=True)\n p.implements(p.IBlueprint)\n\n resource_show_action = None\n\n def __new__(cls, *args, **kwargs):\n idatastore_extensions = p.PluginImplementations(interfaces.IDatastore)\n idatastore_extensions = idatastore_extensions.extensions()\n\n if idatastore_extensions and idatastore_extensions[0].__class__ != cls:\n msg = ('The \"datastore\" plugin must be the first IDatastore '\n 'plugin loaded. Change the order it is loaded in '\n '\"ckan.plugins\" in your CKAN .ini file and try again.')\n raise DatastoreException(msg)\n\n return super(cls, cls).__new__(cls, *args, **kwargs)\n\n # IDatastoreBackend\n\n def register_backends(self):\n return {\n 'postgresql': DatastorePostgresqlBackend,\n 'postgres': DatastorePostgresqlBackend,\n }\n\n # IConfigurer\n\n def update_config(self, config):\n DatastoreBackend.register_backends()\n DatastoreBackend.set_active_backend(config)\n\n templates_base = config.get('ckan.base_templates_folder')\n\n p.toolkit.add_template_directory(config, templates_base)\n self.backend = DatastoreBackend.get_active_backend()\n\n # IConfigurable\n\n def configure(self, config):\n self.config = config\n self.backend.configure(config)\n\n # IActions\n\n def get_actions(self):\n actions = {\n 'datastore_create': action.datastore_create,\n 'datastore_upsert': action.datastore_upsert,\n 'datastore_delete': action.datastore_delete,\n 'datastore_search': action.datastore_search,\n 'datastore_info': action.datastore_info,\n 'datastore_function_create': action.datastore_function_create,\n 'datastore_function_delete': action.datastore_function_delete,\n 'datastore_run_triggers': action.datastore_run_triggers,\n }\n if getattr(self.backend, 'enable_sql_search', False):\n # Only enable search_sql if the config/backend does not disable it\n actions.update({\n 'datastore_search_sql': action.datastore_search_sql,\n })\n return actions\n\n # IAuthFunctions\n\n def get_auth_functions(self):\n return {\n 'datastore_create': auth.datastore_create,\n 'datastore_upsert': auth.datastore_upsert,\n 'datastore_delete': auth.datastore_delete,\n 'datastore_info': auth.datastore_info,\n 'datastore_search': auth.datastore_search,\n 'datastore_search_sql': auth.datastore_search_sql,\n 'datastore_change_permissions': auth.datastore_change_permissions,\n 'datastore_function_create': auth.datastore_function_create,\n 'datastore_function_delete': auth.datastore_function_delete,\n 'datastore_run_triggers': auth.datastore_run_triggers,\n }\n\n # IResourceController\n\n def before_show(self, resource_dict):\n # Modify the resource url of datastore resources so that\n # they link to the datastore dumps.\n if resource_dict.get('url_type') == 'datastore':\n resource_dict['url'] = p.toolkit.url_for(\n 'datastore.dump', resource_id=resource_dict['id'],\n qualified=True)\n\n if 'datastore_active' not in resource_dict:\n resource_dict[u'datastore_active'] = False\n\n return resource_dict\n\n def after_delete(self, context, resources):\n model = context['model']\n pkg = context['package']\n res_query = model.Session.query(model.Resource)\n query = res_query.filter(\n model.Resource.package_id == pkg.id,\n model.Resource.state == State.DELETED\n )\n deleted = [\n res for res in query.all()\n if res.extras.get('datastore_active') is True]\n\n for res in deleted:\n self.backend.delete(context, {\n 'resource_id': res.id,\n })\n res.extras['datastore_active'] = False\n res_query.filter_by(id=res.id).update(\n {'extras': res.extras}, synchronize_session=False)\n\n # IDatastore\n\n def datastore_validate(self, context, data_dict, fields_types):\n column_names = list(fields_types.keys())\n\n filters = data_dict.get('filters', {})\n for key in list(filters.keys()):\n if key in fields_types:\n del filters[key]\n\n q = data_dict.get('q')\n if q:\n if isinstance(q, string_types):\n del data_dict['q']\n column_names.append(u'rank')\n elif isinstance(q, dict):\n for key in list(q.keys()):\n if key in fields_types and isinstance(q[key],\n string_types):\n column_names.append(u'rank ' + key)\n del q[key]\n\n fields = data_dict.get('fields')\n if fields:\n data_dict['fields'] = list(set(fields) - set(column_names))\n\n language = data_dict.get('language')\n if language:\n if isinstance(language, string_types):\n del data_dict['language']\n\n plain = data_dict.get('plain')\n if plain:\n if isinstance(plain, bool):\n del data_dict['plain']\n\n distinct = data_dict.get('distinct')\n if distinct:\n if isinstance(distinct, bool):\n del data_dict['distinct']\n\n sort_clauses = data_dict.get('sort')\n if sort_clauses:\n invalid_clauses = [\n c for c in sort_clauses\n if not _parse_sort_clause(\n c, fields_types\n )\n ]\n data_dict['sort'] = invalid_clauses\n\n limit = data_dict.get('limit')\n if limit:\n is_positive_int = datastore_helpers.validate_int(limit,\n non_negative=True)\n is_all = isinstance(limit, string_types) and limit.lower() == 'all'\n if is_positive_int or is_all:\n del data_dict['limit']\n\n offset = data_dict.get('offset')\n if offset:\n is_positive_int = datastore_helpers.validate_int(offset,\n non_negative=True)\n if is_positive_int:\n del data_dict['offset']\n\n return data_dict\n\n def datastore_delete(self, context, data_dict, fields_types, query_dict):\n hook = getattr(self.backend, 'datastore_delete', None)\n if hook:\n query_dict = hook(context, data_dict, fields_types, query_dict)\n return query_dict\n\n def datastore_search(self, context, data_dict, fields_types, query_dict):\n hook = getattr(self.backend, 'datastore_search', None)\n if hook:\n query_dict = hook(context, data_dict, fields_types, query_dict)\n return query_dict\n\n def get_helpers(self):\n return {\n 'datastore_dictionary': datastore_helpers.datastore_dictionary}\n\n # IForkObserver\n\n def before_fork(self):\n try:\n before_fork = self.backend.before_fork\n except AttributeError:\n pass\n else:\n before_fork()\n\n # IBlueprint\n\n def get_blueprint(self):\n u'''Return a Flask Blueprint object to be registered by the app.'''\n\n return view.datastore\n", "path": "ckanext/datastore/plugin.py"}]}
| 3,735 | 165 |
gh_patches_debug_36976
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-18977
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
chore: override item removal methods in tracking
Based on the TODO comments in keras/keras/utils/tracking.py
</issue>
<code>
[start of keras/utils/tracking.py]
1 from functools import wraps
2
3 from keras.backend.common.global_state import get_global_attribute
4 from keras.backend.common.global_state import set_global_attribute
5 from keras.utils import python_utils
6
7
8 class DotNotTrackScope:
9 def __enter__(self):
10 self.original_value = is_tracking_enabled()
11 set_global_attribute("tracking_on", False)
12
13 def __exit__(self, *args, **kwargs):
14 set_global_attribute("tracking_on", self.original_value)
15
16
17 def is_tracking_enabled():
18 return get_global_attribute("tracking_on", True)
19
20
21 def no_automatic_dependency_tracking(fn):
22 @wraps(fn)
23 def wrapper(*args, **kwargs):
24 with DotNotTrackScope():
25 return fn(*args, **kwargs)
26
27 return wrapper
28
29
30 class Tracker:
31 """Attribute tracker, used for e.g. Variable tracking.
32
33 Monitors certain attribute types
34 and put them in appropriate lists in case of a match.
35
36 Also passively tracks certain mutable collections
37 (dict, list) so that items added to them later
38 still get tracked. This is done by wrapping these
39 collections into an equivalent, tracking-aware object.
40
41 Usage:
42
43 ```python
44 def __init__(self):
45 self.tracker = Tracker(
46 # Format: `name: (test_fn, store)`
47 {
48 "variables":
49 (lambda x: isinstance(x, Variable), self._variables),
50 "metrics": (lambda x: isinstance(x, Metric), self._metrics),
51 "layers": (lambda x: isinstance(x, Layer), self._layers),
52 }
53 )
54
55 def __setattr__(self, name, value):
56 if hasattr(self, "_tracker"):
57 value = self._tracker.track(value)
58 return super().__setattr__(name, value)
59 ```
60 """
61
62 def __init__(self, config):
63 self.config = config
64 self.stored_ids = {name: set() for name in self.config.keys()}
65 self.locked = False
66 self._lock_violation_msg = None
67
68 def track(self, attr):
69 if not is_tracking_enabled():
70 return attr
71
72 for store_name, (is_attr_type, _) in self.config.items():
73 if is_attr_type(attr):
74 if id(attr) not in self.stored_ids[store_name]:
75 self.add_to_store(store_name, attr)
76 return attr
77 if isinstance(attr, tuple):
78 wrapped_attr = []
79 for e in attr:
80 wrapped_attr.append(self.track(e))
81 # This should cover tuples and nametuples
82 return attr.__class__(wrapped_attr)
83 elif isinstance(attr, list):
84 return TrackedList(attr, self)
85 elif isinstance(attr, dict):
86 # TODO: OrderedDict?
87 return TrackedDict(attr, self)
88 elif isinstance(attr, set):
89 return TrackedSet(attr, self)
90 return attr
91
92 def untrack(self, value):
93 for store_name in self.stored_ids.keys():
94 if id(value) in self.stored_ids[store_name]:
95 self.stored_ids[store_name].remove(id(value))
96 python_utils.remove_by_id(self.config[store_name][1], value)
97
98 def lock(self, msg):
99 self.locked = True
100 self._lock_violation_msg = msg
101
102 def add_to_store(self, store_name, value):
103 if self.locked:
104 raise ValueError(self._lock_violation_msg)
105 self.config[store_name][1].append(value)
106 self.stored_ids[store_name].add(id(value))
107
108
109 class TrackedList(list):
110 # TODO: override item removal methods?
111 def __init__(self, values=None, tracker=None):
112 self.tracker = tracker
113 if tracker and values:
114 values = [tracker.track(v) for v in values]
115 super().__init__(values or [])
116
117 def append(self, value):
118 if self.tracker:
119 self.tracker.track(value)
120 super().append(value)
121
122 def insert(self, value):
123 if self.tracker:
124 self.tracker.track(value)
125 super().insert(value)
126
127 def extend(self, values):
128 if self.tracker:
129 values = [self.tracker.track(v) for v in values]
130 super().extend(values)
131
132 def remove(self, value):
133 if self.tracker:
134 self.tracker.untrack(value)
135 try:
136 super().remove(value)
137 except ValueError:
138 python_utils.remove_by_id(self, value)
139
140
141 class TrackedDict(dict):
142 # TODO: override item removal methods?
143 def __init__(self, values=None, tracker=None):
144 self.tracker = tracker
145 if tracker and values:
146 values = {k: tracker.track(v) for k, v in values.items()}
147 super().__init__(values or [])
148
149 def __setitem__(self, key, value):
150 if self.tracker:
151 self.tracker.track(value)
152 super().__setitem__(key, value)
153
154 def update(self, mapping):
155 if self.tracker:
156 mapping = {k: self.tracker.track(v) for k, v in mapping.items()}
157 super().update(mapping)
158
159
160 class TrackedSet(set):
161 # TODO: override item removal methods?
162 def __init__(self, values=None, tracker=None):
163 self.tracker = tracker
164 if tracker and values:
165 values = {tracker.track(v) for v in values}
166 super().__init__(values or [])
167
168 def add(self, value):
169 if self.tracker:
170 self.tracker.track(value)
171 super().add(value)
172
173 def update(self, values):
174 if self.tracker:
175 values = [self.tracker.track(v) for v in values]
176 super().update(values)
177
178 def remove(self, value):
179 if self.tracker:
180 self.tracker.untrack(value)
181 super().remove(value)
182
[end of keras/utils/tracking.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/keras/utils/tracking.py b/keras/utils/tracking.py
--- a/keras/utils/tracking.py
+++ b/keras/utils/tracking.py
@@ -107,7 +107,6 @@
class TrackedList(list):
- # TODO: override item removal methods?
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
@@ -137,9 +136,28 @@
except ValueError:
python_utils.remove_by_id(self, value)
+ def pop(self, index=-1):
+ if self.tracker:
+ value = self[index]
+ self.tracker.untrack(value)
+ return super().pop(index)
+ else:
+ return super().pop(index)
+
+ def clear(self):
+ if self.tracker:
+ for value in self:
+ self.tracker.untrack(value)
+ super().clear()
+
+ def __delitem__(self, index):
+ value = self[index] # Get value before removing
+ super().__delitem__(index)
+ if self.tracker:
+ self.tracker.untrack(value)
+
class TrackedDict(dict):
- # TODO: override item removal methods?
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
@@ -156,9 +174,29 @@
mapping = {k: self.tracker.track(v) for k, v in mapping.items()}
super().update(mapping)
+ def pop(self, key, default=None):
+ if self.tracker:
+ value = super().pop(key, default)
+ if value is not default:
+ self.tracker.untrack(value)
+ return value
+ else:
+ return super().pop(key, default)
+
+ def popitem(self):
+ key, value = super().popitem()
+ if self.tracker:
+ self.tracker.untrack(value)
+ return key, value
+
+ def clear(self):
+ if self.tracker:
+ for value in self.values():
+ self.tracker.untrack(value)
+ super().clear()
+
class TrackedSet(set):
- # TODO: override item removal methods?
def __init__(self, values=None, tracker=None):
self.tracker = tracker
if tracker and values:
@@ -179,3 +217,15 @@
if self.tracker:
self.tracker.untrack(value)
super().remove(value)
+
+ def pop(self):
+ value = super().pop()
+ if self.tracker:
+ self.tracker.untrack(value)
+ return value
+
+ def clear(self):
+ if self.tracker:
+ for value in self:
+ self.tracker.untrack(value)
+ super().clear()
|
{"golden_diff": "diff --git a/keras/utils/tracking.py b/keras/utils/tracking.py\n--- a/keras/utils/tracking.py\n+++ b/keras/utils/tracking.py\n@@ -107,7 +107,6 @@\n \n \n class TrackedList(list):\n- # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n@@ -137,9 +136,28 @@\n except ValueError:\n python_utils.remove_by_id(self, value)\n \n+ def pop(self, index=-1):\n+ if self.tracker:\n+ value = self[index]\n+ self.tracker.untrack(value)\n+ return super().pop(index)\n+ else:\n+ return super().pop(index)\n+\n+ def clear(self):\n+ if self.tracker:\n+ for value in self:\n+ self.tracker.untrack(value)\n+ super().clear()\n+\n+ def __delitem__(self, index):\n+ value = self[index] # Get value before removing\n+ super().__delitem__(index)\n+ if self.tracker:\n+ self.tracker.untrack(value)\n+\n \n class TrackedDict(dict):\n- # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n@@ -156,9 +174,29 @@\n mapping = {k: self.tracker.track(v) for k, v in mapping.items()}\n super().update(mapping)\n \n+ def pop(self, key, default=None):\n+ if self.tracker:\n+ value = super().pop(key, default)\n+ if value is not default:\n+ self.tracker.untrack(value)\n+ return value\n+ else:\n+ return super().pop(key, default)\n+\n+ def popitem(self):\n+ key, value = super().popitem()\n+ if self.tracker:\n+ self.tracker.untrack(value)\n+ return key, value\n+\n+ def clear(self):\n+ if self.tracker:\n+ for value in self.values():\n+ self.tracker.untrack(value)\n+ super().clear()\n+\n \n class TrackedSet(set):\n- # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n@@ -179,3 +217,15 @@\n if self.tracker:\n self.tracker.untrack(value)\n super().remove(value)\n+\n+ def pop(self):\n+ value = super().pop()\n+ if self.tracker:\n+ self.tracker.untrack(value)\n+ return value\n+\n+ def clear(self):\n+ if self.tracker:\n+ for value in self:\n+ self.tracker.untrack(value)\n+ super().clear()\n", "issue": "chore: override item removal methods in tracking\nBased on the TODO comments in keras/keras/utils/tracking.py\n", "before_files": [{"content": "from functools import wraps\n\nfrom keras.backend.common.global_state import get_global_attribute\nfrom keras.backend.common.global_state import set_global_attribute\nfrom keras.utils import python_utils\n\n\nclass DotNotTrackScope:\n def __enter__(self):\n self.original_value = is_tracking_enabled()\n set_global_attribute(\"tracking_on\", False)\n\n def __exit__(self, *args, **kwargs):\n set_global_attribute(\"tracking_on\", self.original_value)\n\n\ndef is_tracking_enabled():\n return get_global_attribute(\"tracking_on\", True)\n\n\ndef no_automatic_dependency_tracking(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n with DotNotTrackScope():\n return fn(*args, **kwargs)\n\n return wrapper\n\n\nclass Tracker:\n \"\"\"Attribute tracker, used for e.g. Variable tracking.\n\n Monitors certain attribute types\n and put them in appropriate lists in case of a match.\n\n Also passively tracks certain mutable collections\n (dict, list) so that items added to them later\n still get tracked. This is done by wrapping these\n collections into an equivalent, tracking-aware object.\n\n Usage:\n\n ```python\n def __init__(self):\n self.tracker = Tracker(\n # Format: `name: (test_fn, store)`\n {\n \"variables\":\n (lambda x: isinstance(x, Variable), self._variables),\n \"metrics\": (lambda x: isinstance(x, Metric), self._metrics),\n \"layers\": (lambda x: isinstance(x, Layer), self._layers),\n }\n )\n\n def __setattr__(self, name, value):\n if hasattr(self, \"_tracker\"):\n value = self._tracker.track(value)\n return super().__setattr__(name, value)\n ```\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.stored_ids = {name: set() for name in self.config.keys()}\n self.locked = False\n self._lock_violation_msg = None\n\n def track(self, attr):\n if not is_tracking_enabled():\n return attr\n\n for store_name, (is_attr_type, _) in self.config.items():\n if is_attr_type(attr):\n if id(attr) not in self.stored_ids[store_name]:\n self.add_to_store(store_name, attr)\n return attr\n if isinstance(attr, tuple):\n wrapped_attr = []\n for e in attr:\n wrapped_attr.append(self.track(e))\n # This should cover tuples and nametuples\n return attr.__class__(wrapped_attr)\n elif isinstance(attr, list):\n return TrackedList(attr, self)\n elif isinstance(attr, dict):\n # TODO: OrderedDict?\n return TrackedDict(attr, self)\n elif isinstance(attr, set):\n return TrackedSet(attr, self)\n return attr\n\n def untrack(self, value):\n for store_name in self.stored_ids.keys():\n if id(value) in self.stored_ids[store_name]:\n self.stored_ids[store_name].remove(id(value))\n python_utils.remove_by_id(self.config[store_name][1], value)\n\n def lock(self, msg):\n self.locked = True\n self._lock_violation_msg = msg\n\n def add_to_store(self, store_name, value):\n if self.locked:\n raise ValueError(self._lock_violation_msg)\n self.config[store_name][1].append(value)\n self.stored_ids[store_name].add(id(value))\n\n\nclass TrackedList(list):\n # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n values = [tracker.track(v) for v in values]\n super().__init__(values or [])\n\n def append(self, value):\n if self.tracker:\n self.tracker.track(value)\n super().append(value)\n\n def insert(self, value):\n if self.tracker:\n self.tracker.track(value)\n super().insert(value)\n\n def extend(self, values):\n if self.tracker:\n values = [self.tracker.track(v) for v in values]\n super().extend(values)\n\n def remove(self, value):\n if self.tracker:\n self.tracker.untrack(value)\n try:\n super().remove(value)\n except ValueError:\n python_utils.remove_by_id(self, value)\n\n\nclass TrackedDict(dict):\n # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n values = {k: tracker.track(v) for k, v in values.items()}\n super().__init__(values or [])\n\n def __setitem__(self, key, value):\n if self.tracker:\n self.tracker.track(value)\n super().__setitem__(key, value)\n\n def update(self, mapping):\n if self.tracker:\n mapping = {k: self.tracker.track(v) for k, v in mapping.items()}\n super().update(mapping)\n\n\nclass TrackedSet(set):\n # TODO: override item removal methods?\n def __init__(self, values=None, tracker=None):\n self.tracker = tracker\n if tracker and values:\n values = {tracker.track(v) for v in values}\n super().__init__(values or [])\n\n def add(self, value):\n if self.tracker:\n self.tracker.track(value)\n super().add(value)\n\n def update(self, values):\n if self.tracker:\n values = [self.tracker.track(v) for v in values]\n super().update(values)\n\n def remove(self, value):\n if self.tracker:\n self.tracker.untrack(value)\n super().remove(value)\n", "path": "keras/utils/tracking.py"}]}
| 2,248 | 648 |
gh_patches_debug_34076
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-575
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Openlibrary connector not loading isbn sometimes, when it appears to be available
an example: https://openlibrary.org/books/OL27222321M.json
</issue>
<code>
[start of bookwyrm/connectors/openlibrary.py]
1 ''' openlibrary data connector '''
2 import re
3
4 from bookwyrm import models
5 from .abstract_connector import AbstractConnector, SearchResult, Mapping
6 from .abstract_connector import get_data
7 from .connector_manager import ConnectorException
8 from .openlibrary_languages import languages
9
10
11 class Connector(AbstractConnector):
12 ''' instantiate a connector for OL '''
13 def __init__(self, identifier):
14 super().__init__(identifier)
15
16 get_first = lambda a: a[0]
17 get_remote_id = lambda a: self.base_url + a
18 self.book_mappings = [
19 Mapping('title'),
20 Mapping('id', remote_field='key', formatter=get_remote_id),
21 Mapping(
22 'cover', remote_field='covers', formatter=self.get_cover_url),
23 Mapping('sortTitle', remote_field='sort_title'),
24 Mapping('subtitle'),
25 Mapping('description', formatter=get_description),
26 Mapping('languages', formatter=get_languages),
27 Mapping('series', formatter=get_first),
28 Mapping('seriesNumber', remote_field='series_number'),
29 Mapping('subjects'),
30 Mapping('subjectPlaces'),
31 Mapping('isbn13', formatter=get_first),
32 Mapping('isbn10', formatter=get_first),
33 Mapping('lccn', formatter=get_first),
34 Mapping(
35 'oclcNumber', remote_field='oclc_numbers',
36 formatter=get_first
37 ),
38 Mapping(
39 'openlibraryKey', remote_field='key',
40 formatter=get_openlibrary_key
41 ),
42 Mapping('goodreadsKey', remote_field='goodreads_key'),
43 Mapping('asin'),
44 Mapping(
45 'firstPublishedDate', remote_field='first_publish_date',
46 ),
47 Mapping('publishedDate', remote_field='publish_date'),
48 Mapping('pages', remote_field='number_of_pages'),
49 Mapping('physicalFormat', remote_field='physical_format'),
50 Mapping('publishers'),
51 ]
52
53 self.author_mappings = [
54 Mapping('id', remote_field='key', formatter=get_remote_id),
55 Mapping('name'),
56 Mapping(
57 'openlibraryKey', remote_field='key',
58 formatter=get_openlibrary_key
59 ),
60 Mapping('born', remote_field='birth_date'),
61 Mapping('died', remote_field='death_date'),
62 Mapping('bio', formatter=get_description),
63 ]
64
65
66 def get_remote_id_from_data(self, data):
67 ''' format a url from an openlibrary id field '''
68 try:
69 key = data['key']
70 except KeyError:
71 raise ConnectorException('Invalid book data')
72 return '%s%s' % (self.books_url, key)
73
74
75 def is_work_data(self, data):
76 return bool(re.match(r'^[\/\w]+OL\d+W$', data['key']))
77
78
79 def get_edition_from_work_data(self, data):
80 try:
81 key = data['key']
82 except KeyError:
83 raise ConnectorException('Invalid book data')
84 url = '%s%s/editions' % (self.books_url, key)
85 data = get_data(url)
86 return pick_default_edition(data['entries'])
87
88
89 def get_work_from_edition_data(self, data):
90 try:
91 key = data['works'][0]['key']
92 except (IndexError, KeyError):
93 raise ConnectorException('No work found for edition')
94 url = '%s%s' % (self.books_url, key)
95 return get_data(url)
96
97
98 def get_authors_from_data(self, data):
99 ''' parse author json and load or create authors '''
100 for author_blob in data.get('authors', []):
101 author_blob = author_blob.get('author', author_blob)
102 # this id is "/authors/OL1234567A"
103 author_id = author_blob['key']
104 url = '%s%s' % (self.base_url, author_id)
105 yield self.get_or_create_author(url)
106
107
108 def get_cover_url(self, cover_blob):
109 ''' ask openlibrary for the cover '''
110 cover_id = cover_blob[0]
111 image_name = '%s-L.jpg' % cover_id
112 return '%s/b/id/%s' % (self.covers_url, image_name)
113
114
115 def parse_search_data(self, data):
116 return data.get('docs')
117
118
119 def format_search_result(self, search_result):
120 # build the remote id from the openlibrary key
121 key = self.books_url + search_result['key']
122 author = search_result.get('author_name') or ['Unknown']
123 return SearchResult(
124 title=search_result.get('title'),
125 key=key,
126 author=', '.join(author),
127 connector=self,
128 year=search_result.get('first_publish_year'),
129 )
130
131
132 def load_edition_data(self, olkey):
133 ''' query openlibrary for editions of a work '''
134 url = '%s/works/%s/editions' % (self.books_url, olkey)
135 return get_data(url)
136
137
138 def expand_book_data(self, book):
139 work = book
140 # go from the edition to the work, if necessary
141 if isinstance(book, models.Edition):
142 work = book.parent_work
143
144 # we can mass download edition data from OL to avoid repeatedly querying
145 edition_options = self.load_edition_data(work.openlibrary_key)
146 for edition_data in edition_options.get('entries'):
147 self.create_edition_from_data(work, edition_data)
148
149
150 def get_description(description_blob):
151 ''' descriptions can be a string or a dict '''
152 if isinstance(description_blob, dict):
153 return description_blob.get('value')
154 return description_blob
155
156
157 def get_openlibrary_key(key):
158 ''' convert /books/OL27320736M into OL27320736M '''
159 return key.split('/')[-1]
160
161
162 def get_languages(language_blob):
163 ''' /language/eng -> English '''
164 langs = []
165 for lang in language_blob:
166 langs.append(
167 languages.get(lang.get('key', ''), None)
168 )
169 return langs
170
171
172 def pick_default_edition(options):
173 ''' favor physical copies with covers in english '''
174 if not options:
175 return None
176 if len(options) == 1:
177 return options[0]
178
179 options = [e for e in options if e.get('covers')] or options
180 options = [e for e in options if \
181 '/languages/eng' in str(e.get('languages'))] or options
182 formats = ['paperback', 'hardcover', 'mass market paperback']
183 options = [e for e in options if \
184 str(e.get('physical_format')).lower() in formats] or options
185 options = [e for e in options if e.get('isbn_13')] or options
186 options = [e for e in options if e.get('ocaid')] or options
187 return options[0]
188
[end of bookwyrm/connectors/openlibrary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/connectors/openlibrary.py b/bookwyrm/connectors/openlibrary.py
--- a/bookwyrm/connectors/openlibrary.py
+++ b/bookwyrm/connectors/openlibrary.py
@@ -27,9 +27,9 @@
Mapping('series', formatter=get_first),
Mapping('seriesNumber', remote_field='series_number'),
Mapping('subjects'),
- Mapping('subjectPlaces'),
- Mapping('isbn13', formatter=get_first),
- Mapping('isbn10', formatter=get_first),
+ Mapping('subjectPlaces', remote_field='subject_places'),
+ Mapping('isbn13', remote_field='isbn_13', formatter=get_first),
+ Mapping('isbn10', remote_field='isbn_10', formatter=get_first),
Mapping('lccn', formatter=get_first),
Mapping(
'oclcNumber', remote_field='oclc_numbers',
@@ -144,9 +144,34 @@
# we can mass download edition data from OL to avoid repeatedly querying
edition_options = self.load_edition_data(work.openlibrary_key)
for edition_data in edition_options.get('entries'):
+ # does this edition have ANY interesting data?
+ if ignore_edition(edition_data):
+ continue
self.create_edition_from_data(work, edition_data)
+def ignore_edition(edition_data):
+ ''' don't load a million editions that have no metadata '''
+ # an isbn, we love to see it
+ if edition_data.get('isbn_13') or edition_data.get('isbn_10'):
+ print(edition_data.get('isbn_10'))
+ return False
+ # grudgingly, oclc can stay
+ if edition_data.get('oclc_numbers'):
+ print(edition_data.get('oclc_numbers'))
+ return False
+ # if it has a cover it can stay
+ if edition_data.get('covers'):
+ print(edition_data.get('covers'))
+ return False
+ # keep non-english editions
+ if edition_data.get('languages') and \
+ 'languages/eng' not in str(edition_data.get('languages')):
+ print(edition_data.get('languages'))
+ return False
+ return True
+
+
def get_description(description_blob):
''' descriptions can be a string or a dict '''
if isinstance(description_blob, dict):
|
{"golden_diff": "diff --git a/bookwyrm/connectors/openlibrary.py b/bookwyrm/connectors/openlibrary.py\n--- a/bookwyrm/connectors/openlibrary.py\n+++ b/bookwyrm/connectors/openlibrary.py\n@@ -27,9 +27,9 @@\n Mapping('series', formatter=get_first),\n Mapping('seriesNumber', remote_field='series_number'),\n Mapping('subjects'),\n- Mapping('subjectPlaces'),\n- Mapping('isbn13', formatter=get_first),\n- Mapping('isbn10', formatter=get_first),\n+ Mapping('subjectPlaces', remote_field='subject_places'),\n+ Mapping('isbn13', remote_field='isbn_13', formatter=get_first),\n+ Mapping('isbn10', remote_field='isbn_10', formatter=get_first),\n Mapping('lccn', formatter=get_first),\n Mapping(\n 'oclcNumber', remote_field='oclc_numbers',\n@@ -144,9 +144,34 @@\n # we can mass download edition data from OL to avoid repeatedly querying\n edition_options = self.load_edition_data(work.openlibrary_key)\n for edition_data in edition_options.get('entries'):\n+ # does this edition have ANY interesting data?\n+ if ignore_edition(edition_data):\n+ continue\n self.create_edition_from_data(work, edition_data)\n \n \n+def ignore_edition(edition_data):\n+ ''' don't load a million editions that have no metadata '''\n+ # an isbn, we love to see it\n+ if edition_data.get('isbn_13') or edition_data.get('isbn_10'):\n+ print(edition_data.get('isbn_10'))\n+ return False\n+ # grudgingly, oclc can stay\n+ if edition_data.get('oclc_numbers'):\n+ print(edition_data.get('oclc_numbers'))\n+ return False\n+ # if it has a cover it can stay\n+ if edition_data.get('covers'):\n+ print(edition_data.get('covers'))\n+ return False\n+ # keep non-english editions\n+ if edition_data.get('languages') and \\\n+ 'languages/eng' not in str(edition_data.get('languages')):\n+ print(edition_data.get('languages'))\n+ return False\n+ return True\n+\n+\n def get_description(description_blob):\n ''' descriptions can be a string or a dict '''\n if isinstance(description_blob, dict):\n", "issue": "Openlibrary connector not loading isbn sometimes, when it appears to be available\nan example: https://openlibrary.org/books/OL27222321M.json\n", "before_files": [{"content": "''' openlibrary data connector '''\nimport re\n\nfrom bookwyrm import models\nfrom .abstract_connector import AbstractConnector, SearchResult, Mapping\nfrom .abstract_connector import get_data\nfrom .connector_manager import ConnectorException\nfrom .openlibrary_languages import languages\n\n\nclass Connector(AbstractConnector):\n ''' instantiate a connector for OL '''\n def __init__(self, identifier):\n super().__init__(identifier)\n\n get_first = lambda a: a[0]\n get_remote_id = lambda a: self.base_url + a\n self.book_mappings = [\n Mapping('title'),\n Mapping('id', remote_field='key', formatter=get_remote_id),\n Mapping(\n 'cover', remote_field='covers', formatter=self.get_cover_url),\n Mapping('sortTitle', remote_field='sort_title'),\n Mapping('subtitle'),\n Mapping('description', formatter=get_description),\n Mapping('languages', formatter=get_languages),\n Mapping('series', formatter=get_first),\n Mapping('seriesNumber', remote_field='series_number'),\n Mapping('subjects'),\n Mapping('subjectPlaces'),\n Mapping('isbn13', formatter=get_first),\n Mapping('isbn10', formatter=get_first),\n Mapping('lccn', formatter=get_first),\n Mapping(\n 'oclcNumber', remote_field='oclc_numbers',\n formatter=get_first\n ),\n Mapping(\n 'openlibraryKey', remote_field='key',\n formatter=get_openlibrary_key\n ),\n Mapping('goodreadsKey', remote_field='goodreads_key'),\n Mapping('asin'),\n Mapping(\n 'firstPublishedDate', remote_field='first_publish_date',\n ),\n Mapping('publishedDate', remote_field='publish_date'),\n Mapping('pages', remote_field='number_of_pages'),\n Mapping('physicalFormat', remote_field='physical_format'),\n Mapping('publishers'),\n ]\n\n self.author_mappings = [\n Mapping('id', remote_field='key', formatter=get_remote_id),\n Mapping('name'),\n Mapping(\n 'openlibraryKey', remote_field='key',\n formatter=get_openlibrary_key\n ),\n Mapping('born', remote_field='birth_date'),\n Mapping('died', remote_field='death_date'),\n Mapping('bio', formatter=get_description),\n ]\n\n\n def get_remote_id_from_data(self, data):\n ''' format a url from an openlibrary id field '''\n try:\n key = data['key']\n except KeyError:\n raise ConnectorException('Invalid book data')\n return '%s%s' % (self.books_url, key)\n\n\n def is_work_data(self, data):\n return bool(re.match(r'^[\\/\\w]+OL\\d+W$', data['key']))\n\n\n def get_edition_from_work_data(self, data):\n try:\n key = data['key']\n except KeyError:\n raise ConnectorException('Invalid book data')\n url = '%s%s/editions' % (self.books_url, key)\n data = get_data(url)\n return pick_default_edition(data['entries'])\n\n\n def get_work_from_edition_data(self, data):\n try:\n key = data['works'][0]['key']\n except (IndexError, KeyError):\n raise ConnectorException('No work found for edition')\n url = '%s%s' % (self.books_url, key)\n return get_data(url)\n\n\n def get_authors_from_data(self, data):\n ''' parse author json and load or create authors '''\n for author_blob in data.get('authors', []):\n author_blob = author_blob.get('author', author_blob)\n # this id is \"/authors/OL1234567A\"\n author_id = author_blob['key']\n url = '%s%s' % (self.base_url, author_id)\n yield self.get_or_create_author(url)\n\n\n def get_cover_url(self, cover_blob):\n ''' ask openlibrary for the cover '''\n cover_id = cover_blob[0]\n image_name = '%s-L.jpg' % cover_id\n return '%s/b/id/%s' % (self.covers_url, image_name)\n\n\n def parse_search_data(self, data):\n return data.get('docs')\n\n\n def format_search_result(self, search_result):\n # build the remote id from the openlibrary key\n key = self.books_url + search_result['key']\n author = search_result.get('author_name') or ['Unknown']\n return SearchResult(\n title=search_result.get('title'),\n key=key,\n author=', '.join(author),\n connector=self,\n year=search_result.get('first_publish_year'),\n )\n\n\n def load_edition_data(self, olkey):\n ''' query openlibrary for editions of a work '''\n url = '%s/works/%s/editions' % (self.books_url, olkey)\n return get_data(url)\n\n\n def expand_book_data(self, book):\n work = book\n # go from the edition to the work, if necessary\n if isinstance(book, models.Edition):\n work = book.parent_work\n\n # we can mass download edition data from OL to avoid repeatedly querying\n edition_options = self.load_edition_data(work.openlibrary_key)\n for edition_data in edition_options.get('entries'):\n self.create_edition_from_data(work, edition_data)\n\n\ndef get_description(description_blob):\n ''' descriptions can be a string or a dict '''\n if isinstance(description_blob, dict):\n return description_blob.get('value')\n return description_blob\n\n\ndef get_openlibrary_key(key):\n ''' convert /books/OL27320736M into OL27320736M '''\n return key.split('/')[-1]\n\n\ndef get_languages(language_blob):\n ''' /language/eng -> English '''\n langs = []\n for lang in language_blob:\n langs.append(\n languages.get(lang.get('key', ''), None)\n )\n return langs\n\n\ndef pick_default_edition(options):\n ''' favor physical copies with covers in english '''\n if not options:\n return None\n if len(options) == 1:\n return options[0]\n\n options = [e for e in options if e.get('covers')] or options\n options = [e for e in options if \\\n '/languages/eng' in str(e.get('languages'))] or options\n formats = ['paperback', 'hardcover', 'mass market paperback']\n options = [e for e in options if \\\n str(e.get('physical_format')).lower() in formats] or options\n options = [e for e in options if e.get('isbn_13')] or options\n options = [e for e in options if e.get('ocaid')] or options\n return options[0]\n", "path": "bookwyrm/connectors/openlibrary.py"}]}
| 2,464 | 521 |
gh_patches_debug_37011
|
rasdani/github-patches
|
git_diff
|
huggingface__dataset-viewer-1084
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No specific error when dataset tries to import a non-installed module
When a dataset script tries to import a module/library that is not installed, there is no informative error message.
See:
- #1067
- #1068
Related to:
- #976
</issue>
<code>
[start of services/worker/src/worker/job_runners/config_names.py]
1 # SPDX-License-Identifier: Apache-2.0
2 # Copyright 2022 The HuggingFace Authors.
3
4 import logging
5 from http import HTTPStatus
6 from typing import Any, List, Literal, Mapping, Optional, TypedDict, Union
7
8 from datasets import get_dataset_config_names
9 from datasets.data_files import EmptyDatasetError as _EmptyDatasetError
10 from libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION
11 from libcommon.simple_cache import SplitFullName
12
13 from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
14 from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
15
16 ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "ConfigNamesError"]
17
18
19 class ConfigNamesJobRunnerError(JobRunnerError):
20 """Base class for job runner exceptions."""
21
22 def __init__(
23 self,
24 message: str,
25 status_code: HTTPStatus,
26 code: ConfigNamesJobRunnerErrorCode,
27 cause: Optional[BaseException] = None,
28 disclose_cause: bool = False,
29 ):
30 super().__init__(
31 message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause
32 )
33
34
35 class EmptyDatasetError(ConfigNamesJobRunnerError):
36 """Raised when the dataset has no data."""
37
38 def __init__(self, message: str, cause: Optional[BaseException] = None):
39 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
40
41
42 class ConfigNamesError(ConfigNamesJobRunnerError):
43 """Raised when the config names could not be fetched."""
44
45 def __init__(self, message: str, cause: Optional[BaseException] = None):
46 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ConfigNamesError", cause, True)
47
48
49 class ConfigNameItem(TypedDict):
50 dataset: str
51 config: str
52
53
54 class ConfigNamesResponse(TypedDict):
55 config_names: List[ConfigNameItem]
56
57
58 def compute_config_names_response(
59 dataset: str,
60 hf_token: Optional[str] = None,
61 ) -> ConfigNamesResponse:
62 """
63 Get the response of /config-names for one specific dataset on huggingface.co.
64 Dataset can be private or gated if you pass an acceptable token.
65
66 It is assumed that the dataset exists and can be accessed using the token.
67
68 Args:
69 dataset (`str`):
70 A namespace (user or an organization) and a repo name separated
71 by a `/`.
72 hf_token (`str`, *optional*):
73 An authentication token (See https://huggingface.co/settings/token)
74 Returns:
75 `ConfigNamesResponse`: An object with the list of config names.
76 <Tip>
77 Raises the following errors:
78 - [`~job_runners.config_names.EmptyDatasetError`]
79 The dataset is empty.
80 - [`~job_runners.config_names.ConfigNamesError`]
81 If the list of configs could not be obtained using the datasets library.
82 </Tip>
83 """
84 logging.info(f"get config names for dataset={dataset}")
85 use_auth_token: Union[bool, str, None] = hf_token if hf_token is not None else False
86 # get the list of splits in streaming mode
87 try:
88 config_name_items: List[ConfigNameItem] = [
89 {"dataset": dataset, "config": str(config)}
90 for config in sorted(get_dataset_config_names(path=dataset, use_auth_token=use_auth_token))
91 ]
92 except _EmptyDatasetError as err:
93 raise EmptyDatasetError("The dataset is empty.", cause=err) from err
94 except Exception as err:
95 raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
96 return ConfigNamesResponse(config_names=config_name_items)
97
98
99 class ConfigNamesJobRunner(DatasetsBasedJobRunner):
100 @staticmethod
101 def get_job_type() -> str:
102 return "/config-names"
103
104 @staticmethod
105 def get_job_runner_version() -> int:
106 return PROCESSING_STEP_CONFIG_NAMES_VERSION
107
108 def compute(self) -> CompleteJobResult:
109 if self.dataset is None:
110 raise ParameterMissingError("'dataset' parameter is required")
111 return CompleteJobResult(
112 compute_config_names_response(dataset=self.dataset, hf_token=self.common_config.hf_token)
113 )
114
115 def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:
116 """Get the set of new splits, from the content created by the compute."""
117 return {SplitFullName(dataset=s["dataset"], config=s["config"], split=None) for s in content["config_names"]}
118
[end of services/worker/src/worker/job_runners/config_names.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/config_names.py
--- a/services/worker/src/worker/job_runners/config_names.py
+++ b/services/worker/src/worker/job_runners/config_names.py
@@ -13,7 +13,7 @@
from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError
from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner
-ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "ConfigNamesError"]
+ConfigNamesJobRunnerErrorCode = Literal["EmptyDatasetError", "DatasetModuleNotInstalledError", "ConfigNamesError"]
class ConfigNamesJobRunnerError(JobRunnerError):
@@ -39,6 +39,13 @@
super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "EmptyDatasetError", cause, True)
+class DatasetModuleNotInstalledError(ConfigNamesJobRunnerError):
+ """Raised when the dataset tries to import a module that is not installed."""
+
+ def __init__(self, message: str, cause: Optional[BaseException] = None):
+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "DatasetModuleNotInstalledError", cause, True)
+
+
class ConfigNamesError(ConfigNamesJobRunnerError):
"""Raised when the config names could not be fetched."""
@@ -77,6 +84,8 @@
Raises the following errors:
- [`~job_runners.config_names.EmptyDatasetError`]
The dataset is empty.
+ - [`~job_runners.config_names.DatasetModuleNotInstalledError`]
+ The dataset tries to import a module that is not installed.
- [`~job_runners.config_names.ConfigNamesError`]
If the list of configs could not be obtained using the datasets library.
</Tip>
@@ -91,6 +100,10 @@
]
except _EmptyDatasetError as err:
raise EmptyDatasetError("The dataset is empty.", cause=err) from err
+ except ImportError as err:
+ raise DatasetModuleNotInstalledError(
+ "The dataset tries to import a module that is not installed.", cause=err
+ ) from err
except Exception as err:
raise ConfigNamesError("Cannot get the config names for the dataset.", cause=err) from err
return ConfigNamesResponse(config_names=config_name_items)
|
{"golden_diff": "diff --git a/services/worker/src/worker/job_runners/config_names.py b/services/worker/src/worker/job_runners/config_names.py\n--- a/services/worker/src/worker/job_runners/config_names.py\n+++ b/services/worker/src/worker/job_runners/config_names.py\n@@ -13,7 +13,7 @@\n from worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError\n from worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner\n \n-ConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"ConfigNamesError\"]\n+ConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"DatasetModuleNotInstalledError\", \"ConfigNamesError\"]\n \n \n class ConfigNamesJobRunnerError(JobRunnerError):\n@@ -39,6 +39,13 @@\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"EmptyDatasetError\", cause, True)\n \n \n+class DatasetModuleNotInstalledError(ConfigNamesJobRunnerError):\n+ \"\"\"Raised when the dataset tries to import a module that is not installed.\"\"\"\n+\n+ def __init__(self, message: str, cause: Optional[BaseException] = None):\n+ super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"DatasetModuleNotInstalledError\", cause, True)\n+\n+\n class ConfigNamesError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the config names could not be fetched.\"\"\"\n \n@@ -77,6 +84,8 @@\n Raises the following errors:\n - [`~job_runners.config_names.EmptyDatasetError`]\n The dataset is empty.\n+ - [`~job_runners.config_names.DatasetModuleNotInstalledError`]\n+ The dataset tries to import a module that is not installed.\n - [`~job_runners.config_names.ConfigNamesError`]\n If the list of configs could not be obtained using the datasets library.\n </Tip>\n@@ -91,6 +100,10 @@\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n+ except ImportError as err:\n+ raise DatasetModuleNotInstalledError(\n+ \"The dataset tries to import a module that is not installed.\", cause=err\n+ ) from err\n except Exception as err:\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n return ConfigNamesResponse(config_names=config_name_items)\n", "issue": "No specific error when dataset tries to import a non-installed module\nWhen a dataset script tries to import a module/library that is not installed, there is no informative error message.\r\n\r\nSee:\r\n- #1067 \r\n- #1068\r\n\r\nRelated to:\r\n- #976\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom http import HTTPStatus\nfrom typing import Any, List, Literal, Mapping, Optional, TypedDict, Union\n\nfrom datasets import get_dataset_config_names\nfrom datasets.data_files import EmptyDatasetError as _EmptyDatasetError\nfrom libcommon.constants import PROCESSING_STEP_CONFIG_NAMES_VERSION\nfrom libcommon.simple_cache import SplitFullName\n\nfrom worker.job_runner import CompleteJobResult, JobRunnerError, ParameterMissingError\nfrom worker.job_runners._datasets_based_job_runner import DatasetsBasedJobRunner\n\nConfigNamesJobRunnerErrorCode = Literal[\"EmptyDatasetError\", \"ConfigNamesError\"]\n\n\nclass ConfigNamesJobRunnerError(JobRunnerError):\n \"\"\"Base class for job runner exceptions.\"\"\"\n\n def __init__(\n self,\n message: str,\n status_code: HTTPStatus,\n code: ConfigNamesJobRunnerErrorCode,\n cause: Optional[BaseException] = None,\n disclose_cause: bool = False,\n ):\n super().__init__(\n message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause\n )\n\n\nclass EmptyDatasetError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the dataset has no data.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"EmptyDatasetError\", cause, True)\n\n\nclass ConfigNamesError(ConfigNamesJobRunnerError):\n \"\"\"Raised when the config names could not be fetched.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"ConfigNamesError\", cause, True)\n\n\nclass ConfigNameItem(TypedDict):\n dataset: str\n config: str\n\n\nclass ConfigNamesResponse(TypedDict):\n config_names: List[ConfigNameItem]\n\n\ndef compute_config_names_response(\n dataset: str,\n hf_token: Optional[str] = None,\n) -> ConfigNamesResponse:\n \"\"\"\n Get the response of /config-names for one specific dataset on huggingface.co.\n Dataset can be private or gated if you pass an acceptable token.\n\n It is assumed that the dataset exists and can be accessed using the token.\n\n Args:\n dataset (`str`):\n A namespace (user or an organization) and a repo name separated\n by a `/`.\n hf_token (`str`, *optional*):\n An authentication token (See https://huggingface.co/settings/token)\n Returns:\n `ConfigNamesResponse`: An object with the list of config names.\n <Tip>\n Raises the following errors:\n - [`~job_runners.config_names.EmptyDatasetError`]\n The dataset is empty.\n - [`~job_runners.config_names.ConfigNamesError`]\n If the list of configs could not be obtained using the datasets library.\n </Tip>\n \"\"\"\n logging.info(f\"get config names for dataset={dataset}\")\n use_auth_token: Union[bool, str, None] = hf_token if hf_token is not None else False\n # get the list of splits in streaming mode\n try:\n config_name_items: List[ConfigNameItem] = [\n {\"dataset\": dataset, \"config\": str(config)}\n for config in sorted(get_dataset_config_names(path=dataset, use_auth_token=use_auth_token))\n ]\n except _EmptyDatasetError as err:\n raise EmptyDatasetError(\"The dataset is empty.\", cause=err) from err\n except Exception as err:\n raise ConfigNamesError(\"Cannot get the config names for the dataset.\", cause=err) from err\n return ConfigNamesResponse(config_names=config_name_items)\n\n\nclass ConfigNamesJobRunner(DatasetsBasedJobRunner):\n @staticmethod\n def get_job_type() -> str:\n return \"/config-names\"\n\n @staticmethod\n def get_job_runner_version() -> int:\n return PROCESSING_STEP_CONFIG_NAMES_VERSION\n\n def compute(self) -> CompleteJobResult:\n if self.dataset is None:\n raise ParameterMissingError(\"'dataset' parameter is required\")\n return CompleteJobResult(\n compute_config_names_response(dataset=self.dataset, hf_token=self.common_config.hf_token)\n )\n\n def get_new_splits(self, content: Mapping[str, Any]) -> set[SplitFullName]:\n \"\"\"Get the set of new splits, from the content created by the compute.\"\"\"\n return {SplitFullName(dataset=s[\"dataset\"], config=s[\"config\"], split=None) for s in content[\"config_names\"]}\n", "path": "services/worker/src/worker/job_runners/config_names.py"}]}
| 1,845 | 524 |
gh_patches_debug_19065
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-2850
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is it possible to query Log Analytics via the az cli with a saved query?
I can’t tell from the documentation, is it possible to run a saved Log Analytics Query from this CLI command?
If not, a useful enhancement would be to enable the use a saved query in addition to the ability to execute queries in-line. The queries get long and cumbersome to maintain outside of Log Analytics.
If it is, however, possible to run a saved query, would you mind updating the documentation here? Thanks.
---
#### Document Details
⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.*
* ID: f0fd6a58-ac1a-fa45-8d96-579b4af36499
* Version Independent ID: 4098ca97-1b85-eb29-18e9-e6f0495fd030
* Content: [az monitor log-analytics](https://docs.microsoft.com/en-us/cli/azure/ext/log-analytics/monitor/log-analytics?view=azure-cli-latest)
* Content Source: [latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml)
* GitHub Login: @rloutlaw
* Microsoft Alias: **routlaw**
</issue>
<code>
[start of src/log-analytics/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8 from codecs import open
9 from setuptools import setup, find_packages
10
11 VERSION = "0.2.1"
12
13 CLASSIFIERS = [
14 'Development Status :: 4 - Beta',
15 'Intended Audience :: Developers',
16 'Intended Audience :: System Administrators',
17 'Programming Language :: Python',
18 'Programming Language :: Python :: 2',
19 'Programming Language :: Python :: 2.7',
20 'Programming Language :: Python :: 3',
21 'Programming Language :: Python :: 3.4',
22 'Programming Language :: Python :: 3.5',
23 'Programming Language :: Python :: 3.6',
24 'License :: OSI Approved :: MIT License',
25 ]
26
27 DEPENDENCIES = []
28
29 with open('README.rst', 'r', encoding='utf-8') as f:
30 README = f.read()
31 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
32 HISTORY = f.read()
33
34 setup(
35 name='log-analytics',
36 version=VERSION,
37 description='Support for Azure Log Analytics query capabilities.',
38 long_description=README + '\n\n' + HISTORY,
39 license='MIT',
40 author='Ace Eldeib',
41 author_email='[email protected]',
42 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/log-analytics',
43 classifiers=CLASSIFIERS,
44 packages=find_packages(exclude=["tests"]),
45 package_data={'azext_loganalytics': ['azext_metadata.json']},
46 install_requires=DEPENDENCIES
47 )
48
[end of src/log-analytics/setup.py]
[start of src/log-analytics/azext_loganalytics/_help.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 from knack.help_files import helps
7
8 # pylint: disable=line-too-long
9
10 helps['monitor log-analytics'] = """
11 type: group
12 short-summary: Commands for querying data in Log Analytics workspaces.
13 """
14
15 helps['monitor log-analytics query'] = """
16 type: command
17 short-summary: Query a Log Analytics workspace.
18 examples:
19 - name: Execute a simple query over past 3.5 days.
20 text: |
21 az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
22 """
23
[end of src/log-analytics/azext_loganalytics/_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/log-analytics/azext_loganalytics/_help.py b/src/log-analytics/azext_loganalytics/_help.py
--- a/src/log-analytics/azext_loganalytics/_help.py
+++ b/src/log-analytics/azext_loganalytics/_help.py
@@ -18,5 +18,9 @@
examples:
- name: Execute a simple query over past 3.5 days.
text: |
- az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
+ az monitor log-analytics query -w workspace-customId --analytics-query "AzureActivity | summarize count() by bin(timestamp, 1h)" -t P3DT12H
+ - name: Execute a saved query in workspace
+ text: |
+ QUERY=$(az monitor log-analytics workspace saved-search show -g resource-group --workspace-name workspace-name -n query-name --query query --output tsv)
+ az monitor log-analytics query -w workspace-customId --analytics-query "$QUERY"
"""
diff --git a/src/log-analytics/setup.py b/src/log-analytics/setup.py
--- a/src/log-analytics/setup.py
+++ b/src/log-analytics/setup.py
@@ -8,7 +8,7 @@
from codecs import open
from setuptools import setup, find_packages
-VERSION = "0.2.1"
+VERSION = "0.2.2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',
|
{"golden_diff": "diff --git a/src/log-analytics/azext_loganalytics/_help.py b/src/log-analytics/azext_loganalytics/_help.py\n--- a/src/log-analytics/azext_loganalytics/_help.py\n+++ b/src/log-analytics/azext_loganalytics/_help.py\n@@ -18,5 +18,9 @@\n examples:\n - name: Execute a simple query over past 3.5 days.\n text: |\n- az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n+ az monitor log-analytics query -w workspace-customId --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n+ - name: Execute a saved query in workspace\n+ text: |\n+ QUERY=$(az monitor log-analytics workspace saved-search show -g resource-group --workspace-name workspace-name -n query-name --query query --output tsv)\n+ az monitor log-analytics query -w workspace-customId --analytics-query \"$QUERY\"\n \"\"\"\ndiff --git a/src/log-analytics/setup.py b/src/log-analytics/setup.py\n--- a/src/log-analytics/setup.py\n+++ b/src/log-analytics/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.2.1\"\n+VERSION = \"0.2.2\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "Is it possible to query Log Analytics via the az cli with a saved query?\n\r\nI can\u2019t tell from the documentation, is it possible to run a saved Log Analytics Query from this CLI command? \r\n\r\nIf not, a useful enhancement would be to enable the use a saved query in addition to the ability to execute queries in-line. The queries get long and cumbersome to maintain outside of Log Analytics.\r\n\r\nIf it is, however, possible to run a saved query, would you mind updating the documentation here? Thanks.\r\n\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: f0fd6a58-ac1a-fa45-8d96-579b4af36499\r\n* Version Independent ID: 4098ca97-1b85-eb29-18e9-e6f0495fd030\r\n* Content: [az monitor log-analytics](https://docs.microsoft.com/en-us/cli/azure/ext/log-analytics/monitor/log-analytics?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/log-analytics/monitor/log-analytics.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.2.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='log-analytics',\n version=VERSION,\n description='Support for Azure Log Analytics query capabilities.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/log-analytics',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_loganalytics': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/log-analytics/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom knack.help_files import helps\n\n# pylint: disable=line-too-long\n\nhelps['monitor log-analytics'] = \"\"\"\n type: group\n short-summary: Commands for querying data in Log Analytics workspaces.\n\"\"\"\n\nhelps['monitor log-analytics query'] = \"\"\"\n type: command\n short-summary: Query a Log Analytics workspace.\n examples:\n - name: Execute a simple query over past 3.5 days.\n text: |\n az monitor log-analytics query -w b8317023-66e4-4edc-8a5b-7c002b22f92f --analytics-query \"AzureActivity | summarize count() by bin(timestamp, 1h)\" -t P3DT12H\n\"\"\"\n", "path": "src/log-analytics/azext_loganalytics/_help.py"}]}
| 1,567 | 361 |
gh_patches_debug_36638
|
rasdani/github-patches
|
git_diff
|
zigpy__zha-device-handlers-1479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Aqara plug (lumi.plug.maeu01) generates errors post-2022.2
**Describe the bug**
I use this plug with HA 2022.2.3, where it's been updated to use the quirk for lumi.plug.mmeu01 after [this pull](https://github.com/zigpy/zha-device-handlers/pull/1252/commits).
There are errors popping up in the log after this update.
```
Logger: homeassistant.util.logging
Source: util/logging.py:105
First occurred: 4:34:56 PM (16 occurrences)
Last logged: 4:55:26 PM
Exception in async_state_changed when dispatching 'LUMI lumi.plug.maeu01_54:ef:44:10:00:0e:52:9d_available_entity': () Traceback (most recent call last):
File "/usr/src/homeassistant/homeassistant/components/zha/entity.py", line 107, in async_state_changed self.async_write_ha_state()
File "/usr/src/homeassistant/homeassistant/helpers/entity.py", line 530, in async_write_ha_state self._async_write_ha_state()
File "/usr/src/homeassistant/homeassistant/helpers/entity.py", line 563, in _async_write_ha_state state = self._stringify_state()
File "/usr/src/homeassistant/homeassistant/helpers/entity.py", line 536, in _stringify_state if (state := self.state) is None:
File "/usr/src/homeassistant/homeassistant/components/sensor/__init__.py", line 371, in state value = self.native_value
File "/usr/src/homeassistant/homeassistant/components/zha/sensor.py", line 175, in native_value return self.formatter(raw_state)
File "/usr/src/homeassistant/homeassistant/components/zha/sensor.py", line 472, in formatter return self._channel.summa_formatter(value)
File "/usr/src/homeassistant/homeassistant/components/zha/core/channels/smartenergy.py", line 196, in _formatter_function return self._summa_format.format(value).lstrip() AttributeError: 'NoneType' object has no attribute 'format'
```
**To Reproduce**
Steps to reproduce the behavior: unknown
**Additional context**
```
{
"node_descriptor": "NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4447, maximum_buffer_size=127, maximum_incoming_transfer_size=100, server_mask=11264, maximum_outgoing_transfer_size=100, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)",
"endpoints": {
"1": {
"profile_id": 260,
"device_type": "0x0051",
"in_clusters": [
"0x0000",
"0x0002",
"0x0003",
"0x0004",
"0x0005",
"0x0006",
"0x0009",
"0x0702",
"0x0b04"
],
"out_clusters": [
"0x000a",
"0x0019"
]
},
"21": {
"profile_id": 260,
"device_type": "0x0009",
"in_clusters": [
"0x000c"
],
"out_clusters": [
"0x0004",
"0x000c"
]
},
"242": {
"profile_id": 41440,
"device_type": "0x0061",
"in_clusters": [],
"out_clusters": [
"0x0021"
]
}
},
"manufacturer": "LUMI",
"model": "lumi.plug.maeu01",
"class": "zhaquirks.xiaomi.aqara.plug_mmeu01.Plug"
}
```
</issue>
<code>
[start of zhaquirks/xiaomi/aqara/plug_mmeu01.py]
1 """Xiaomi lumi.plug.mmeu01 plug."""
2 import logging
3
4 from zigpy.profiles import zha
5 from zigpy.zcl.clusters.general import (
6 Alarms,
7 AnalogInput,
8 Basic,
9 DeviceTemperature,
10 GreenPowerProxy,
11 Groups,
12 Identify,
13 OnOff,
14 Ota,
15 Scenes,
16 Time,
17 )
18 from zigpy.zcl.clusters.homeautomation import ElectricalMeasurement
19 from zigpy.zcl.clusters.smartenergy import Metering
20
21 from zhaquirks import Bus
22 from zhaquirks.const import (
23 DEVICE_TYPE,
24 ENDPOINTS,
25 INPUT_CLUSTERS,
26 MODELS_INFO,
27 OUTPUT_CLUSTERS,
28 PROFILE_ID,
29 SKIP_CONFIGURATION,
30 )
31 from zhaquirks.xiaomi import (
32 LUMI,
33 AnalogInputCluster,
34 BasicCluster,
35 ElectricalMeasurementCluster,
36 XiaomiCustomDevice,
37 )
38
39 _LOGGER = logging.getLogger(__name__)
40
41 XIAOMI_PROFILE_ID = 0xA1E0
42 XIAOMI_DEVICE_TYPE = 0x61
43
44
45 class Plug(XiaomiCustomDevice):
46 """lumi.plug.mmeu01 plug."""
47
48 def __init__(self, *args, **kwargs):
49 """Init."""
50 self.voltage_bus = Bus()
51 self.consumption_bus = Bus()
52 self.power_bus = Bus()
53 super().__init__(*args, **kwargs)
54
55 signature = {
56 MODELS_INFO: [
57 (LUMI, "lumi.plug.mmeu01"),
58 (LUMI, "lumi.plug.maeu01"),
59 ],
60 ENDPOINTS: {
61 # <SimpleDescriptor endpoint=1 profile=260 device_type=81
62 # device_version=1
63 # input_clusters=[0, 2, 3, 4, 5, 6, 9, 1794, 2820]
64 # output_clusters=[10, 25]>
65 1: {
66 PROFILE_ID: zha.PROFILE_ID,
67 DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
68 INPUT_CLUSTERS: [
69 Basic.cluster_id,
70 DeviceTemperature.cluster_id,
71 Identify.cluster_id,
72 Groups.cluster_id,
73 Scenes.cluster_id,
74 OnOff.cluster_id,
75 Alarms.cluster_id,
76 Metering.cluster_id,
77 ElectricalMeasurement.cluster_id,
78 ],
79 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
80 },
81 # <SimpleDescriptor endpoint=242 profile=41440 device_type=97
82 # device_version=0
83 # input_clusters=[]
84 # output_clusters=[33]>
85 242: {
86 PROFILE_ID: XIAOMI_PROFILE_ID,
87 DEVICE_TYPE: XIAOMI_DEVICE_TYPE,
88 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
89 },
90 },
91 }
92 replacement = {
93 SKIP_CONFIGURATION: True,
94 ENDPOINTS: {
95 1: {
96 PROFILE_ID: zha.PROFILE_ID,
97 DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
98 INPUT_CLUSTERS: [
99 BasicCluster,
100 DeviceTemperature.cluster_id,
101 Identify.cluster_id,
102 Groups.cluster_id,
103 Scenes.cluster_id,
104 OnOff.cluster_id,
105 Alarms.cluster_id,
106 Metering.cluster_id,
107 ElectricalMeasurementCluster,
108 ],
109 OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
110 },
111 21: {
112 PROFILE_ID: zha.PROFILE_ID,
113 DEVICE_TYPE: zha.DeviceType.MAIN_POWER_OUTLET,
114 INPUT_CLUSTERS: [AnalogInputCluster],
115 OUTPUT_CLUSTERS: [AnalogInput.cluster_id, Groups.cluster_id],
116 },
117 242: {
118 PROFILE_ID: XIAOMI_PROFILE_ID,
119 DEVICE_TYPE: XIAOMI_DEVICE_TYPE,
120 OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
121 },
122 },
123 }
124
[end of zhaquirks/xiaomi/aqara/plug_mmeu01.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/zhaquirks/xiaomi/aqara/plug_mmeu01.py b/zhaquirks/xiaomi/aqara/plug_mmeu01.py
--- a/zhaquirks/xiaomi/aqara/plug_mmeu01.py
+++ b/zhaquirks/xiaomi/aqara/plug_mmeu01.py
@@ -2,6 +2,7 @@
import logging
from zigpy.profiles import zha
+import zigpy.types as types
from zigpy.zcl.clusters.general import (
Alarms,
AnalogInput,
@@ -33,6 +34,7 @@
AnalogInputCluster,
BasicCluster,
ElectricalMeasurementCluster,
+ XiaomiAqaraE1Cluster,
XiaomiCustomDevice,
)
@@ -40,6 +42,7 @@
XIAOMI_PROFILE_ID = 0xA1E0
XIAOMI_DEVICE_TYPE = 0x61
+OPPLE_MFG_CODE = 0x115F
class Plug(XiaomiCustomDevice):
@@ -55,7 +58,6 @@
signature = {
MODELS_INFO: [
(LUMI, "lumi.plug.mmeu01"),
- (LUMI, "lumi.plug.maeu01"),
],
ENDPOINTS: {
# <SimpleDescriptor endpoint=1 profile=260 device_type=81
@@ -121,3 +123,58 @@
},
},
}
+
+
+class OppleCluster(XiaomiAqaraE1Cluster):
+ """Opple cluster."""
+
+ ep_attribute = "opple_cluster"
+ attributes = {
+ 0x0009: ("mode", types.uint8_t, True),
+ }
+ attr_config = {0x0009: 0x00}
+
+ async def bind(self):
+ """Bind cluster."""
+ result = await super().bind()
+ await self.write_attributes(self.attr_config, manufacturer=OPPLE_MFG_CODE)
+ return result
+
+
+class PlugMAEU01(Plug):
+ """lumi.plug.maeu01 plug."""
+
+ signature = {
+ MODELS_INFO: [
+ (LUMI, "lumi.plug.maeu01"),
+ ],
+ ENDPOINTS: Plug.signature[ENDPOINTS],
+ }
+
+ replacement = {
+ SKIP_CONFIGURATION: False,
+ ENDPOINTS: {
+ 1: {
+ PROFILE_ID: zha.PROFILE_ID,
+ DEVICE_TYPE: zha.DeviceType.SMART_PLUG,
+ INPUT_CLUSTERS: [
+ Basic.cluster_id,
+ DeviceTemperature.cluster_id,
+ Identify.cluster_id,
+ Groups.cluster_id,
+ Scenes.cluster_id,
+ OnOff.cluster_id,
+ Alarms.cluster_id,
+ Metering.cluster_id,
+ ElectricalMeasurement.cluster_id,
+ OppleCluster,
+ ],
+ OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],
+ },
+ 242: {
+ PROFILE_ID: XIAOMI_PROFILE_ID,
+ DEVICE_TYPE: XIAOMI_DEVICE_TYPE,
+ OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],
+ },
+ },
+ }
|
{"golden_diff": "diff --git a/zhaquirks/xiaomi/aqara/plug_mmeu01.py b/zhaquirks/xiaomi/aqara/plug_mmeu01.py\n--- a/zhaquirks/xiaomi/aqara/plug_mmeu01.py\n+++ b/zhaquirks/xiaomi/aqara/plug_mmeu01.py\n@@ -2,6 +2,7 @@\n import logging\n \n from zigpy.profiles import zha\n+import zigpy.types as types\n from zigpy.zcl.clusters.general import (\n Alarms,\n AnalogInput,\n@@ -33,6 +34,7 @@\n AnalogInputCluster,\n BasicCluster,\n ElectricalMeasurementCluster,\n+ XiaomiAqaraE1Cluster,\n XiaomiCustomDevice,\n )\n \n@@ -40,6 +42,7 @@\n \n XIAOMI_PROFILE_ID = 0xA1E0\n XIAOMI_DEVICE_TYPE = 0x61\n+OPPLE_MFG_CODE = 0x115F\n \n \n class Plug(XiaomiCustomDevice):\n@@ -55,7 +58,6 @@\n signature = {\n MODELS_INFO: [\n (LUMI, \"lumi.plug.mmeu01\"),\n- (LUMI, \"lumi.plug.maeu01\"),\n ],\n ENDPOINTS: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=81\n@@ -121,3 +123,58 @@\n },\n },\n }\n+\n+\n+class OppleCluster(XiaomiAqaraE1Cluster):\n+ \"\"\"Opple cluster.\"\"\"\n+\n+ ep_attribute = \"opple_cluster\"\n+ attributes = {\n+ 0x0009: (\"mode\", types.uint8_t, True),\n+ }\n+ attr_config = {0x0009: 0x00}\n+\n+ async def bind(self):\n+ \"\"\"Bind cluster.\"\"\"\n+ result = await super().bind()\n+ await self.write_attributes(self.attr_config, manufacturer=OPPLE_MFG_CODE)\n+ return result\n+\n+\n+class PlugMAEU01(Plug):\n+ \"\"\"lumi.plug.maeu01 plug.\"\"\"\n+\n+ signature = {\n+ MODELS_INFO: [\n+ (LUMI, \"lumi.plug.maeu01\"),\n+ ],\n+ ENDPOINTS: Plug.signature[ENDPOINTS],\n+ }\n+\n+ replacement = {\n+ SKIP_CONFIGURATION: False,\n+ ENDPOINTS: {\n+ 1: {\n+ PROFILE_ID: zha.PROFILE_ID,\n+ DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n+ INPUT_CLUSTERS: [\n+ Basic.cluster_id,\n+ DeviceTemperature.cluster_id,\n+ Identify.cluster_id,\n+ Groups.cluster_id,\n+ Scenes.cluster_id,\n+ OnOff.cluster_id,\n+ Alarms.cluster_id,\n+ Metering.cluster_id,\n+ ElectricalMeasurement.cluster_id,\n+ OppleCluster,\n+ ],\n+ OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n+ },\n+ 242: {\n+ PROFILE_ID: XIAOMI_PROFILE_ID,\n+ DEVICE_TYPE: XIAOMI_DEVICE_TYPE,\n+ OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n+ },\n+ },\n+ }\n", "issue": "[BUG] Aqara plug (lumi.plug.maeu01) generates errors post-2022.2\n**Describe the bug**\r\nI use this plug with HA 2022.2.3, where it's been updated to use the quirk for lumi.plug.mmeu01 after [this pull](https://github.com/zigpy/zha-device-handlers/pull/1252/commits).\r\n\r\nThere are errors popping up in the log after this update.\r\n\r\n```\r\nLogger: homeassistant.util.logging\r\nSource: util/logging.py:105\r\nFirst occurred: 4:34:56 PM (16 occurrences)\r\nLast logged: 4:55:26 PM\r\n\r\nException in async_state_changed when dispatching 'LUMI lumi.plug.maeu01_54:ef:44:10:00:0e:52:9d_available_entity': () Traceback (most recent call last): \r\n File \"/usr/src/homeassistant/homeassistant/components/zha/entity.py\", line 107, in async_state_changed self.async_write_ha_state() \r\n File \"/usr/src/homeassistant/homeassistant/helpers/entity.py\", line 530, in async_write_ha_state self._async_write_ha_state() \r\n File \"/usr/src/homeassistant/homeassistant/helpers/entity.py\", line 563, in _async_write_ha_state state = self._stringify_state() \r\n File \"/usr/src/homeassistant/homeassistant/helpers/entity.py\", line 536, in _stringify_state if (state := self.state) is None: \r\n File \"/usr/src/homeassistant/homeassistant/components/sensor/__init__.py\", line 371, in state value = self.native_value \r\n File \"/usr/src/homeassistant/homeassistant/components/zha/sensor.py\", line 175, in native_value return self.formatter(raw_state) \r\n File \"/usr/src/homeassistant/homeassistant/components/zha/sensor.py\", line 472, in formatter return self._channel.summa_formatter(value) \r\n File \"/usr/src/homeassistant/homeassistant/components/zha/core/channels/smartenergy.py\", line 196, in _formatter_function return self._summa_format.format(value).lstrip() AttributeError: 'NoneType' object has no attribute 'format'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior: unknown\r\n\r\n**Additional context**\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(logical_type=<LogicalType.Router: 1>, complex_descriptor_available=0, user_descriptor_available=0, reserved=0, aps_flags=0, frequency_band=<FrequencyBand.Freq2400MHz: 8>, mac_capability_flags=<MACCapabilityFlags.AllocateAddress|RxOnWhenIdle|MainsPowered|FullFunctionDevice: 142>, manufacturer_code=4447, maximum_buffer_size=127, maximum_incoming_transfer_size=100, server_mask=11264, maximum_outgoing_transfer_size=100, descriptor_capability_field=<DescriptorCapability.NONE: 0>, *allocate_address=True, *is_alternate_pan_coordinator=False, *is_coordinator=False, *is_end_device=False, *is_full_function_device=True, *is_mains_powered=True, *is_receiver_on_when_idle=True, *is_router=True, *is_security_capable=False)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0051\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0002\",\r\n \"0x0003\",\r\n \"0x0004\",\r\n \"0x0005\",\r\n \"0x0006\",\r\n \"0x0009\",\r\n \"0x0702\",\r\n \"0x0b04\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x000a\",\r\n \"0x0019\"\r\n ]\r\n },\r\n \"21\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0009\",\r\n \"in_clusters\": [\r\n \"0x000c\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0004\",\r\n \"0x000c\"\r\n ]\r\n },\r\n \"242\": {\r\n \"profile_id\": 41440,\r\n \"device_type\": \"0x0061\",\r\n \"in_clusters\": [],\r\n \"out_clusters\": [\r\n \"0x0021\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"LUMI\",\r\n \"model\": \"lumi.plug.maeu01\",\r\n \"class\": \"zhaquirks.xiaomi.aqara.plug_mmeu01.Plug\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Xiaomi lumi.plug.mmeu01 plug.\"\"\"\nimport logging\n\nfrom zigpy.profiles import zha\nfrom zigpy.zcl.clusters.general import (\n Alarms,\n AnalogInput,\n Basic,\n DeviceTemperature,\n GreenPowerProxy,\n Groups,\n Identify,\n OnOff,\n Ota,\n Scenes,\n Time,\n)\nfrom zigpy.zcl.clusters.homeautomation import ElectricalMeasurement\nfrom zigpy.zcl.clusters.smartenergy import Metering\n\nfrom zhaquirks import Bus\nfrom zhaquirks.const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n SKIP_CONFIGURATION,\n)\nfrom zhaquirks.xiaomi import (\n LUMI,\n AnalogInputCluster,\n BasicCluster,\n ElectricalMeasurementCluster,\n XiaomiCustomDevice,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nXIAOMI_PROFILE_ID = 0xA1E0\nXIAOMI_DEVICE_TYPE = 0x61\n\n\nclass Plug(XiaomiCustomDevice):\n \"\"\"lumi.plug.mmeu01 plug.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Init.\"\"\"\n self.voltage_bus = Bus()\n self.consumption_bus = Bus()\n self.power_bus = Bus()\n super().__init__(*args, **kwargs)\n\n signature = {\n MODELS_INFO: [\n (LUMI, \"lumi.plug.mmeu01\"),\n (LUMI, \"lumi.plug.maeu01\"),\n ],\n ENDPOINTS: {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=81\n # device_version=1\n # input_clusters=[0, 2, 3, 4, 5, 6, 9, 1794, 2820]\n # output_clusters=[10, 25]>\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n DeviceTemperature.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n Alarms.cluster_id,\n Metering.cluster_id,\n ElectricalMeasurement.cluster_id,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n # <SimpleDescriptor endpoint=242 profile=41440 device_type=97\n # device_version=0\n # input_clusters=[]\n # output_clusters=[33]>\n 242: {\n PROFILE_ID: XIAOMI_PROFILE_ID,\n DEVICE_TYPE: XIAOMI_DEVICE_TYPE,\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n replacement = {\n SKIP_CONFIGURATION: True,\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SMART_PLUG,\n INPUT_CLUSTERS: [\n BasicCluster,\n DeviceTemperature.cluster_id,\n Identify.cluster_id,\n Groups.cluster_id,\n Scenes.cluster_id,\n OnOff.cluster_id,\n Alarms.cluster_id,\n Metering.cluster_id,\n ElectricalMeasurementCluster,\n ],\n OUTPUT_CLUSTERS: [Time.cluster_id, Ota.cluster_id],\n },\n 21: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.MAIN_POWER_OUTLET,\n INPUT_CLUSTERS: [AnalogInputCluster],\n OUTPUT_CLUSTERS: [AnalogInput.cluster_id, Groups.cluster_id],\n },\n 242: {\n PROFILE_ID: XIAOMI_PROFILE_ID,\n DEVICE_TYPE: XIAOMI_DEVICE_TYPE,\n OUTPUT_CLUSTERS: [GreenPowerProxy.cluster_id],\n },\n },\n }\n", "path": "zhaquirks/xiaomi/aqara/plug_mmeu01.py"}]}
| 2,745 | 746 |
gh_patches_debug_18209
|
rasdani/github-patches
|
git_diff
|
paperless-ngx__paperless-ngx-4334
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] HTTP 500 in Tika for some OpenDocument Documents
### Description
When recreating my local paperless instance, I had to rerun the document archival and thumbnail jobs for all documents.
Those failed for all OpenDocument Documents from my originals.
I noticed this being related to the latest changes in paperless_tika/parser.py. After testing the parser.py from 1.15 all worked.
I am already working on further investigation on my fork and will start the corresponding pull request.
### Steps to reproduce
upload ODT document -> parsing fails
### Webserver logs
```bash
---TIKA---
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1440)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:190)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1355)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:191)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)
at org.eclipse.jetty.server.Server.handle(Server.java:516)
at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:487)
at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:732)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:479)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:277)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)
at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)
at java.base/java.lang.Thread.run(Thread.java:833)
Caused by: java.io.IOException: Stream Closed
at java.base/java.io.FileInputStream.available0(Native Method)
at java.base/java.io.FileInputStream.available(FileInputStream.java:415)
at org.apache.cxf.attachment.DelegatingInputStream.available(DelegatingInputStream.java:75)
at org.apache.cxf.helpers.IOUtils.consume(IOUtils.java:382)
at org.apache.cxf.attachment.DelegatingInputStream.close(DelegatingInputStream.java:46)
at org.apache.tika.server.core.resource.TikaResource.parse(TikaResource.java:374)
at org.apache.tika.server.core.resource.TikaResource.parseToMetadata(TikaResource.java:611)
at org.apache.tika.server.core.resource.TikaResource.getJsonFromMultipart(TikaResource.java:564)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:568)
at org.apache.cxf.service.invoker.AbstractInvoker.performInvocation(AbstractInvoker.java:179)
at org.apache.cxf.service.invoker.AbstractInvoker.invoke(AbstractInvoker.java:96)
... 28 more
---PAPERLESS---
[2023-07-06 17:57:46,034] [INFO] [paperless.consumer] Consuming 2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt
[2023-07-06 17:57:46,112] [INFO] [paperless.parsing.tika] Sending /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt to Tika server
[2023-07-06 17:57:47,289] [ERROR] [paperless.consumer] Error while consuming document 2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'
For more information check: https://httpstatuses.com/500
Traceback (most recent call last):
File "/usr/src/paperless/src/paperless_tika/parsers.py", line 54, in parse
parsed = client.tika.as_text.from_file(document_path, mime_type)
File "/usr/local/lib/python3.9/site-packages/tika_client/_resource_tika.py", line 36, in from_file
return self._decoded_response(self._put_multipart(self.MULTI_PART_PLAIN_TEXT_CONTENT, filepath, mime_type))
File "/usr/local/lib/python3.9/site-packages/tika_client/_utils.py", line 46, in _put_multipart
resp.raise_for_status()
File "/usr/local/lib/python3.9/site-packages/httpx/_models.py", line 749, in raise_for_status
raise HTTPStatusError(message, request=request, response=self)
httpx.HTTPStatusError: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'
For more information check: https://httpstatuses.com/500
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/src/paperless/src/documents/consumer.py", line 382, in try_consume_file
document_parser.parse(self.path, mime_type, self.filename)
File "/usr/src/paperless/src/paperless_tika/parsers.py", line 56, in parse
raise ParseError(
documents.parsers.ParseError: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'
For more information check: https://httpstatuses.com/500
[2023-07-06 17:57:47,309] [ERROR] [celery.app.trace] Task documents.tasks.consume_file[1a0c8479-65a4-4de7-a431-29ecb537a030] raised unexpected: ConsumerError("2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt: Error while consuming document 2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universität Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\nFor more information check: https://httpstatuses.com/500")
Traceback (most recent call last):
File "/usr/src/paperless/src/paperless_tika/parsers.py", line 54, in parse
parsed = client.tika.as_text.from_file(document_path, mime_type)
File "/usr/local/lib/python3.9/site-packages/tika_client/_resource_tika.py", line 36, in from_file
return self._decoded_response(self._put_multipart(self.MULTI_PART_PLAIN_TEXT_CONTENT, filepath, mime_type))
File "/usr/local/lib/python3.9/site-packages/tika_client/_utils.py", line 46, in _put_multipart
resp.raise_for_status()
File "/usr/local/lib/python3.9/site-packages/httpx/_models.py", line 749, in raise_for_status
raise HTTPStatusError(message, request=request, response=self)
httpx.HTTPStatusError: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'
For more information check: https://httpstatuses.com/500
```
### Browser logs
_No response_
### Paperless-ngx version
1.16.5
### Host OS
Truenas
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless_tika/parsers.py]
1 import os
2 from pathlib import Path
3
4 import httpx
5 from django.conf import settings
6 from django.utils import timezone
7 from tika_client import TikaClient
8
9 from documents.parsers import DocumentParser
10 from documents.parsers import ParseError
11 from documents.parsers import make_thumbnail_from_pdf
12
13
14 class TikaDocumentParser(DocumentParser):
15 """
16 This parser sends documents to a local tika server
17 """
18
19 logging_name = "paperless.parsing.tika"
20
21 def get_thumbnail(self, document_path, mime_type, file_name=None):
22 if not self.archive_path:
23 self.archive_path = self.convert_to_pdf(document_path, file_name)
24
25 return make_thumbnail_from_pdf(
26 self.archive_path,
27 self.tempdir,
28 self.logging_group,
29 )
30
31 def extract_metadata(self, document_path, mime_type):
32 try:
33 with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:
34 parsed = client.metadata.from_file(document_path, mime_type)
35 return [
36 {
37 "namespace": "",
38 "prefix": "",
39 "key": key,
40 "value": parsed.data[key],
41 }
42 for key in parsed.data
43 ]
44 except Exception as e:
45 self.log.warning(
46 f"Error while fetching document metadata for {document_path}: {e}",
47 )
48 return []
49
50 def parse(self, document_path: Path, mime_type: str, file_name=None):
51 self.log.info(f"Sending {document_path} to Tika server")
52
53 try:
54 with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:
55 parsed = client.tika.as_text.from_file(document_path, mime_type)
56 except Exception as err:
57 raise ParseError(
58 f"Could not parse {document_path} with tika server at "
59 f"{settings.TIKA_ENDPOINT}: {err}",
60 ) from err
61
62 self.text = parsed.content
63 if self.text is not None:
64 self.text = self.text.strip()
65
66 self.date = parsed.created
67 if self.date is not None and timezone.is_naive(self.date):
68 self.date = timezone.make_aware(self.date)
69
70 self.archive_path = self.convert_to_pdf(document_path, file_name)
71
72 def convert_to_pdf(self, document_path, file_name):
73 pdf_path = os.path.join(self.tempdir, "convert.pdf")
74 gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT
75 url = gotenberg_server + "/forms/libreoffice/convert"
76
77 self.log.info(f"Converting {document_path} to PDF as {pdf_path}")
78 with open(document_path, "rb") as document_handle:
79 files = {
80 "files": (
81 "convert" + os.path.splitext(document_path)[-1],
82 document_handle,
83 ),
84 }
85 headers = {}
86 data = {}
87
88 # Set the output format of the resulting PDF
89 # Valid inputs: https://gotenberg.dev/docs/modules/pdf-engines#uno
90 if settings.OCR_OUTPUT_TYPE in {"pdfa", "pdfa-2"}:
91 data["pdfFormat"] = "PDF/A-2b"
92 elif settings.OCR_OUTPUT_TYPE == "pdfa-1":
93 data["pdfFormat"] = "PDF/A-1a"
94 elif settings.OCR_OUTPUT_TYPE == "pdfa-3":
95 data["pdfFormat"] = "PDF/A-3b"
96
97 try:
98 response = httpx.post(
99 url,
100 files=files,
101 headers=headers,
102 data=data,
103 timeout=settings.CELERY_TASK_TIME_LIMIT,
104 )
105 response.raise_for_status() # ensure we notice bad responses
106 except Exception as err:
107 raise ParseError(
108 f"Error while converting document to PDF: {err}",
109 ) from err
110
111 with open(pdf_path, "wb") as file:
112 file.write(response.content)
113 file.close()
114
115 return pdf_path
116
[end of src/paperless_tika/parsers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/paperless_tika/parsers.py b/src/paperless_tika/parsers.py
--- a/src/paperless_tika/parsers.py
+++ b/src/paperless_tika/parsers.py
@@ -52,7 +52,18 @@
try:
with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:
- parsed = client.tika.as_text.from_file(document_path, mime_type)
+ try:
+ parsed = client.tika.as_text.from_file(document_path, mime_type)
+ except httpx.HTTPStatusError as err:
+ # Workaround https://issues.apache.org/jira/browse/TIKA-4110
+ # Tika fails with some files as multi-part form data
+ if err.response.status_code == httpx.codes.INTERNAL_SERVER_ERROR:
+ parsed = client.tika.as_text.from_buffer(
+ document_path.read_bytes(),
+ mime_type,
+ )
+ else: # pragma: nocover
+ raise
except Exception as err:
raise ParseError(
f"Could not parse {document_path} with tika server at "
|
{"golden_diff": "diff --git a/src/paperless_tika/parsers.py b/src/paperless_tika/parsers.py\n--- a/src/paperless_tika/parsers.py\n+++ b/src/paperless_tika/parsers.py\n@@ -52,7 +52,18 @@\n \n try:\n with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:\n- parsed = client.tika.as_text.from_file(document_path, mime_type)\n+ try:\n+ parsed = client.tika.as_text.from_file(document_path, mime_type)\n+ except httpx.HTTPStatusError as err:\n+ # Workaround https://issues.apache.org/jira/browse/TIKA-4110\n+ # Tika fails with some files as multi-part form data\n+ if err.response.status_code == httpx.codes.INTERNAL_SERVER_ERROR:\n+ parsed = client.tika.as_text.from_buffer(\n+ document_path.read_bytes(),\n+ mime_type,\n+ )\n+ else: # pragma: nocover\n+ raise\n except Exception as err:\n raise ParseError(\n f\"Could not parse {document_path} with tika server at \"\n", "issue": "[BUG] HTTP 500 in Tika for some OpenDocument Documents\n### Description\n\nWhen recreating my local paperless instance, I had to rerun the document archival and thumbnail jobs for all documents.\r\n\r\nThose failed for all OpenDocument Documents from my originals. \r\n\r\nI noticed this being related to the latest changes in paperless_tika/parser.py. After testing the parser.py from 1.15 all worked.\r\n\r\nI am already working on further investigation on my fork and will start the corresponding pull request.\r\n\r\n\n\n### Steps to reproduce\n\nupload ODT document -> parsing fails\n\n### Webserver logs\n\n```bash\n---TIKA---\r\n\tat org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1440)\r\n\tat org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:190)\r\n\tat org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1355)\r\n\tat org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)\r\n\tat org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:191)\r\n\tat org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127)\r\n\tat org.eclipse.jetty.server.Server.handle(Server.java:516)\r\n\tat org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:487)\r\n\tat org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:732)\r\n\tat org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:479)\r\n\tat org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:277)\r\n\tat org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311)\r\n\tat org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105)\r\n\tat org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104)\r\n\tat org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883)\r\n\tat org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034)\r\n\tat java.base/java.lang.Thread.run(Thread.java:833)\r\nCaused by: java.io.IOException: Stream Closed\r\n\tat java.base/java.io.FileInputStream.available0(Native Method)\r\n\tat java.base/java.io.FileInputStream.available(FileInputStream.java:415)\r\n\tat org.apache.cxf.attachment.DelegatingInputStream.available(DelegatingInputStream.java:75)\r\n\tat org.apache.cxf.helpers.IOUtils.consume(IOUtils.java:382)\r\n\tat org.apache.cxf.attachment.DelegatingInputStream.close(DelegatingInputStream.java:46)\r\n\tat org.apache.tika.server.core.resource.TikaResource.parse(TikaResource.java:374)\r\n\tat org.apache.tika.server.core.resource.TikaResource.parseToMetadata(TikaResource.java:611)\r\n\tat org.apache.tika.server.core.resource.TikaResource.getJsonFromMultipart(TikaResource.java:564)\r\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)\r\n\tat java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77)\r\n\tat java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)\r\n\tat java.base/java.lang.reflect.Method.invoke(Method.java:568)\r\n\tat org.apache.cxf.service.invoker.AbstractInvoker.performInvocation(AbstractInvoker.java:179)\r\n\tat org.apache.cxf.service.invoker.AbstractInvoker.invoke(AbstractInvoker.java:96)\r\n\t... 28 more\r\n\r\n---PAPERLESS---\r\n[2023-07-06 17:57:46,034] [INFO] [paperless.consumer] Consuming 2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt\r\n[2023-07-06 17:57:46,112] [INFO] [paperless.parsing.tika] Sending /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt to Tika server\r\n[2023-07-06 17:57:47,289] [ERROR] [paperless.consumer] Error while consuming document 2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\r\nFor more information check: https://httpstatuses.com/500\r\nTraceback (most recent call last):\r\n File \"/usr/src/paperless/src/paperless_tika/parsers.py\", line 54, in parse\r\n parsed = client.tika.as_text.from_file(document_path, mime_type)\r\n File \"/usr/local/lib/python3.9/site-packages/tika_client/_resource_tika.py\", line 36, in from_file\r\n return self._decoded_response(self._put_multipart(self.MULTI_PART_PLAIN_TEXT_CONTENT, filepath, mime_type))\r\n File \"/usr/local/lib/python3.9/site-packages/tika_client/_utils.py\", line 46, in _put_multipart\r\n resp.raise_for_status()\r\n File \"/usr/local/lib/python3.9/site-packages/httpx/_models.py\", line 749, in raise_for_status\r\n raise HTTPStatusError(message, request=request, response=self)\r\nhttpx.HTTPStatusError: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\r\nFor more information check: https://httpstatuses.com/500\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/src/paperless/src/documents/consumer.py\", line 382, in try_consume_file\r\n document_parser.parse(self.path, mime_type, self.filename)\r\n File \"/usr/src/paperless/src/paperless_tika/parsers.py\", line 56, in parse\r\n raise ParseError(\r\ndocuments.parsers.ParseError: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\r\nFor more information check: https://httpstatuses.com/500\r\n[2023-07-06 17:57:47,309] [ERROR] [celery.app.trace] Task documents.tasks.consume_file[1a0c8479-65a4-4de7-a431-29ecb537a030] raised unexpected: ConsumerError(\"2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt: Error while consuming document 2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt: Could not parse /tmp/paperless/paperless-ngxzed2cbo7/2023-03-28 Johannes Gutenberg-Universit\u00e4t Mainz Datenbank Praktikum Gruppe 3.odt with tika server at http://localhost:9998: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\\nFor more information check: https://httpstatuses.com/500\")\r\nTraceback (most recent call last):\r\n File \"/usr/src/paperless/src/paperless_tika/parsers.py\", line 54, in parse\r\n parsed = client.tika.as_text.from_file(document_path, mime_type)\r\n File \"/usr/local/lib/python3.9/site-packages/tika_client/_resource_tika.py\", line 36, in from_file\r\n return self._decoded_response(self._put_multipart(self.MULTI_PART_PLAIN_TEXT_CONTENT, filepath, mime_type))\r\n File \"/usr/local/lib/python3.9/site-packages/tika_client/_utils.py\", line 46, in _put_multipart\r\n resp.raise_for_status()\r\n File \"/usr/local/lib/python3.9/site-packages/httpx/_models.py\", line 749, in raise_for_status\r\n raise HTTPStatusError(message, request=request, response=self)\r\nhttpx.HTTPStatusError: Server error '500 Server Error' for url 'http://localhost:9998/tika/form/text'\r\nFor more information check: https://httpstatuses.com/500\n```\n\n\n### Browser logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.16.5\n\n### Host OS\n\nTruenas\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\n_No response_\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "import os\nfrom pathlib import Path\n\nimport httpx\nfrom django.conf import settings\nfrom django.utils import timezone\nfrom tika_client import TikaClient\n\nfrom documents.parsers import DocumentParser\nfrom documents.parsers import ParseError\nfrom documents.parsers import make_thumbnail_from_pdf\n\n\nclass TikaDocumentParser(DocumentParser):\n \"\"\"\n This parser sends documents to a local tika server\n \"\"\"\n\n logging_name = \"paperless.parsing.tika\"\n\n def get_thumbnail(self, document_path, mime_type, file_name=None):\n if not self.archive_path:\n self.archive_path = self.convert_to_pdf(document_path, file_name)\n\n return make_thumbnail_from_pdf(\n self.archive_path,\n self.tempdir,\n self.logging_group,\n )\n\n def extract_metadata(self, document_path, mime_type):\n try:\n with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:\n parsed = client.metadata.from_file(document_path, mime_type)\n return [\n {\n \"namespace\": \"\",\n \"prefix\": \"\",\n \"key\": key,\n \"value\": parsed.data[key],\n }\n for key in parsed.data\n ]\n except Exception as e:\n self.log.warning(\n f\"Error while fetching document metadata for {document_path}: {e}\",\n )\n return []\n\n def parse(self, document_path: Path, mime_type: str, file_name=None):\n self.log.info(f\"Sending {document_path} to Tika server\")\n\n try:\n with TikaClient(tika_url=settings.TIKA_ENDPOINT) as client:\n parsed = client.tika.as_text.from_file(document_path, mime_type)\n except Exception as err:\n raise ParseError(\n f\"Could not parse {document_path} with tika server at \"\n f\"{settings.TIKA_ENDPOINT}: {err}\",\n ) from err\n\n self.text = parsed.content\n if self.text is not None:\n self.text = self.text.strip()\n\n self.date = parsed.created\n if self.date is not None and timezone.is_naive(self.date):\n self.date = timezone.make_aware(self.date)\n\n self.archive_path = self.convert_to_pdf(document_path, file_name)\n\n def convert_to_pdf(self, document_path, file_name):\n pdf_path = os.path.join(self.tempdir, \"convert.pdf\")\n gotenberg_server = settings.TIKA_GOTENBERG_ENDPOINT\n url = gotenberg_server + \"/forms/libreoffice/convert\"\n\n self.log.info(f\"Converting {document_path} to PDF as {pdf_path}\")\n with open(document_path, \"rb\") as document_handle:\n files = {\n \"files\": (\n \"convert\" + os.path.splitext(document_path)[-1],\n document_handle,\n ),\n }\n headers = {}\n data = {}\n\n # Set the output format of the resulting PDF\n # Valid inputs: https://gotenberg.dev/docs/modules/pdf-engines#uno\n if settings.OCR_OUTPUT_TYPE in {\"pdfa\", \"pdfa-2\"}:\n data[\"pdfFormat\"] = \"PDF/A-2b\"\n elif settings.OCR_OUTPUT_TYPE == \"pdfa-1\":\n data[\"pdfFormat\"] = \"PDF/A-1a\"\n elif settings.OCR_OUTPUT_TYPE == \"pdfa-3\":\n data[\"pdfFormat\"] = \"PDF/A-3b\"\n\n try:\n response = httpx.post(\n url,\n files=files,\n headers=headers,\n data=data,\n timeout=settings.CELERY_TASK_TIME_LIMIT,\n )\n response.raise_for_status() # ensure we notice bad responses\n except Exception as err:\n raise ParseError(\n f\"Error while converting document to PDF: {err}\",\n ) from err\n\n with open(pdf_path, \"wb\") as file:\n file.write(response.content)\n file.close()\n\n return pdf_path\n", "path": "src/paperless_tika/parsers.py"}]}
| 3,658 | 252 |
gh_patches_debug_30852
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-1798
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename RPC methods
### Describe the feature
The RPC server currently supports the following endpoints:
- compile
- run
- compile_project
- run_project
- seed_project
- test_project
These endpoints should be remapped to:
- compile_sql
- run_sql
- compile
- run
- seed
- test
This will obviously be a breaking change for anyone using the RPC server, but we're going to have to do it eventually, so we might as well do it now! Parity between the RPC methods and CLI arguments will be drastically less confusing for dbt users in the long run.
</issue>
<code>
[start of core/dbt/task/remote.py]
1 import signal
2 import threading
3 from dataclasses import dataclass
4 from datetime import datetime
5 from typing import Union, List, Optional
6
7 from hologram import JsonSchemaMixin
8
9 from dbt.adapters.factory import get_adapter
10 from dbt.clients.jinja import extract_toplevel_blocks
11 from dbt.compilation import compile_manifest
12 from dbt.parser.results import ParseResult
13 from dbt.parser.rpc import RPCCallParser, RPCMacroParser
14 from dbt.parser.util import ParserUtils
15 import dbt.ui.printer
16 from dbt.logger import GLOBAL_LOGGER as logger
17 from dbt.rpc.node_runners import RPCCompileRunner, RPCExecuteRunner
18 from dbt.rpc.task import RemoteCallableResult, RPCTask
19
20 from dbt.task.run import RunTask
21 from dbt.task.seed import SeedTask
22 from dbt.task.test import TestTask
23
24
25 @dataclass
26 class RPCExecParameters(JsonSchemaMixin):
27 name: str
28 sql: str
29 macros: Optional[str]
30
31
32 @dataclass
33 class RPCCompileProjectParameters(JsonSchemaMixin):
34 models: Union[None, str, List[str]] = None
35 exclude: Union[None, str, List[str]] = None
36
37
38 @dataclass
39 class RPCTestProjectParameters(RPCCompileProjectParameters):
40 data: bool = False
41 schema: bool = False
42
43
44 @dataclass
45 class RPCSeedProjectParameters(JsonSchemaMixin):
46 show: bool = False
47
48
49 class _RPCExecTask(RPCTask):
50 def __init__(self, args, config, manifest):
51 super().__init__(args, config)
52 self._base_manifest = manifest.deepcopy(config=config)
53
54 def runtime_cleanup(self, selected_uids):
55 """Do some pre-run cleanup that is usually performed in Task __init__.
56 """
57 self.run_count = 0
58 self.num_nodes = len(selected_uids)
59 self.node_results = []
60 self._skipped_children = {}
61 self._skipped_children = {}
62 self._raise_next_tick = None
63
64 def _extract_request_data(self, data):
65 data = self.decode_sql(data)
66 macro_blocks = []
67 data_chunks = []
68 for block in extract_toplevel_blocks(data):
69 if block.block_type_name == 'macro':
70 macro_blocks.append(block.full_block)
71 else:
72 data_chunks.append(block.full_block)
73 macros = '\n'.join(macro_blocks)
74 sql = ''.join(data_chunks)
75 return sql, macros
76
77 def _get_exec_node(self, name, sql, macros):
78 results = ParseResult.rpc()
79 macro_overrides = {}
80 sql, macros = self._extract_request_data(sql)
81
82 if macros:
83 macro_parser = RPCMacroParser(results, self.config)
84 for node in macro_parser.parse_remote(macros):
85 macro_overrides[node.unique_id] = node
86
87 self._base_manifest.macros.update(macro_overrides)
88 rpc_parser = RPCCallParser(
89 results=results,
90 project=self.config,
91 root_project=self.config,
92 macro_manifest=self._base_manifest,
93 )
94 node = rpc_parser.parse_remote(sql, name)
95 self.manifest = ParserUtils.add_new_refs(
96 manifest=self._base_manifest,
97 current_project=self.config,
98 node=node,
99 macros=macro_overrides
100 )
101
102 # don't write our new, weird manifest!
103 self.linker = compile_manifest(self.config, self.manifest, write=False)
104 return node
105
106 def _raise_set_error(self):
107 if self._raise_next_tick is not None:
108 raise self._raise_next_tick
109
110 def _in_thread(self, node, thread_done):
111 runner = self.get_runner(node)
112 try:
113 self.node_results.append(runner.safe_run(self.manifest))
114 except Exception as exc:
115 logger.debug('Got exception {}'.format(exc), exc_info=True)
116 self._raise_next_tick = exc
117 finally:
118 thread_done.set()
119
120 def handle_request(
121 self, params: RPCExecParameters
122 ) -> RemoteCallableResult:
123 # we could get a ctrl+c at any time, including during parsing.
124 thread = None
125 started = datetime.utcnow()
126 try:
127 node = self._get_exec_node(params.name, params.sql, params.macros)
128
129 selected_uids = [node.unique_id]
130 self.runtime_cleanup(selected_uids)
131
132 thread_done = threading.Event()
133 thread = threading.Thread(target=self._in_thread,
134 args=(node, thread_done))
135 thread.start()
136 thread_done.wait()
137 except KeyboardInterrupt:
138 adapter = get_adapter(self.config)
139 if adapter.is_cancelable():
140
141 for conn_name in adapter.cancel_open_connections():
142 logger.debug('canceled query {}'.format(conn_name))
143 if thread:
144 thread.join()
145 else:
146 msg = ("The {} adapter does not support query "
147 "cancellation. Some queries may still be "
148 "running!".format(adapter.type()))
149
150 logger.debug(msg)
151
152 raise dbt.exceptions.RPCKilledException(signal.SIGINT)
153
154 self._raise_set_error()
155
156 ended = datetime.utcnow()
157 elapsed = (ended - started).total_seconds()
158 return self.get_result(
159 results=self.node_results,
160 elapsed_time=elapsed,
161 generated_at=ended,
162 )
163
164
165 class RemoteCompileTask(_RPCExecTask):
166 METHOD_NAME = 'compile'
167
168 def get_runner_type(self):
169 return RPCCompileRunner
170
171
172 class RemoteRunTask(_RPCExecTask, RunTask):
173 METHOD_NAME = 'run'
174
175 def get_runner_type(self):
176 return RPCExecuteRunner
177
178
179 class RemoteCompileProjectTask(RPCTask):
180 METHOD_NAME = 'compile_project'
181
182 def __init__(self, args, config, manifest):
183 super().__init__(args, config)
184 self.manifest = manifest.deepcopy(config=config)
185
186 def load_manifest(self):
187 # we started out with a manifest!
188 pass
189
190 def handle_request(
191 self, params: RPCCompileProjectParameters
192 ) -> RemoteCallableResult:
193 self.args.models = self._listify(params.models)
194 self.args.exclude = self._listify(params.exclude)
195
196 results = self.run()
197 return results
198
199
200 class RemoteRunProjectTask(RPCTask, RunTask):
201 METHOD_NAME = 'run_project'
202
203 def __init__(self, args, config, manifest):
204 super().__init__(args, config)
205 self.manifest = manifest.deepcopy(config=config)
206
207 def load_manifest(self):
208 # we started out with a manifest!
209 pass
210
211 def handle_request(
212 self, params: RPCCompileProjectParameters
213 ) -> RemoteCallableResult:
214 self.args.models = self._listify(params.models)
215 self.args.exclude = self._listify(params.exclude)
216
217 results = self.run()
218 return results
219
220
221 class RemoteSeedProjectTask(RPCTask, SeedTask):
222 METHOD_NAME = 'seed_project'
223
224 def __init__(self, args, config, manifest):
225 super().__init__(args, config)
226 self.manifest = manifest.deepcopy(config=config)
227
228 def load_manifest(self):
229 # we started out with a manifest!
230 pass
231
232 def handle_request(
233 self, params: RPCSeedProjectParameters
234 ) -> RemoteCallableResult:
235 self.args.show = params.show
236
237 results = self.run()
238 return results
239
240
241 class RemoteTestProjectTask(RPCTask, TestTask):
242 METHOD_NAME = 'test_project'
243
244 def __init__(self, args, config, manifest):
245 super().__init__(args, config)
246 self.manifest = manifest.deepcopy(config=config)
247
248 def load_manifest(self):
249 # we started out with a manifest!
250 pass
251
252 def handle_request(
253 self, params: RPCTestProjectParameters,
254 ) -> RemoteCallableResult:
255 self.args.models = self._listify(params.models)
256 self.args.exclude = self._listify(params.exclude)
257 self.args.data = params.data
258 self.args.schema = params.schema
259
260 results = self.run()
261 return results
262
[end of core/dbt/task/remote.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/dbt/task/remote.py b/core/dbt/task/remote.py
--- a/core/dbt/task/remote.py
+++ b/core/dbt/task/remote.py
@@ -163,21 +163,21 @@
class RemoteCompileTask(_RPCExecTask):
- METHOD_NAME = 'compile'
+ METHOD_NAME = 'compile_sql'
def get_runner_type(self):
return RPCCompileRunner
class RemoteRunTask(_RPCExecTask, RunTask):
- METHOD_NAME = 'run'
+ METHOD_NAME = 'run_sql'
def get_runner_type(self):
return RPCExecuteRunner
class RemoteCompileProjectTask(RPCTask):
- METHOD_NAME = 'compile_project'
+ METHOD_NAME = 'compile'
def __init__(self, args, config, manifest):
super().__init__(args, config)
@@ -198,7 +198,7 @@
class RemoteRunProjectTask(RPCTask, RunTask):
- METHOD_NAME = 'run_project'
+ METHOD_NAME = 'run'
def __init__(self, args, config, manifest):
super().__init__(args, config)
@@ -219,7 +219,7 @@
class RemoteSeedProjectTask(RPCTask, SeedTask):
- METHOD_NAME = 'seed_project'
+ METHOD_NAME = 'seed'
def __init__(self, args, config, manifest):
super().__init__(args, config)
@@ -239,7 +239,7 @@
class RemoteTestProjectTask(RPCTask, TestTask):
- METHOD_NAME = 'test_project'
+ METHOD_NAME = 'test'
def __init__(self, args, config, manifest):
super().__init__(args, config)
|
{"golden_diff": "diff --git a/core/dbt/task/remote.py b/core/dbt/task/remote.py\n--- a/core/dbt/task/remote.py\n+++ b/core/dbt/task/remote.py\n@@ -163,21 +163,21 @@\n \n \n class RemoteCompileTask(_RPCExecTask):\n- METHOD_NAME = 'compile'\n+ METHOD_NAME = 'compile_sql'\n \n def get_runner_type(self):\n return RPCCompileRunner\n \n \n class RemoteRunTask(_RPCExecTask, RunTask):\n- METHOD_NAME = 'run'\n+ METHOD_NAME = 'run_sql'\n \n def get_runner_type(self):\n return RPCExecuteRunner\n \n \n class RemoteCompileProjectTask(RPCTask):\n- METHOD_NAME = 'compile_project'\n+ METHOD_NAME = 'compile'\n \n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n@@ -198,7 +198,7 @@\n \n \n class RemoteRunProjectTask(RPCTask, RunTask):\n- METHOD_NAME = 'run_project'\n+ METHOD_NAME = 'run'\n \n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n@@ -219,7 +219,7 @@\n \n \n class RemoteSeedProjectTask(RPCTask, SeedTask):\n- METHOD_NAME = 'seed_project'\n+ METHOD_NAME = 'seed'\n \n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n@@ -239,7 +239,7 @@\n \n \n class RemoteTestProjectTask(RPCTask, TestTask):\n- METHOD_NAME = 'test_project'\n+ METHOD_NAME = 'test'\n \n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n", "issue": "Rename RPC methods\n### Describe the feature\r\nThe RPC server currently supports the following endpoints:\r\n - compile\r\n - run\r\n - compile_project\r\n - run_project\r\n - seed_project\r\n - test_project\r\n\r\nThese endpoints should be remapped to:\r\n - compile_sql\r\n - run_sql\r\n - compile\r\n - run\r\n - seed\r\n - test\r\n\r\nThis will obviously be a breaking change for anyone using the RPC server, but we're going to have to do it eventually, so we might as well do it now! Parity between the RPC methods and CLI arguments will be drastically less confusing for dbt users in the long run.\n", "before_files": [{"content": "import signal\nimport threading\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom typing import Union, List, Optional\n\nfrom hologram import JsonSchemaMixin\n\nfrom dbt.adapters.factory import get_adapter\nfrom dbt.clients.jinja import extract_toplevel_blocks\nfrom dbt.compilation import compile_manifest\nfrom dbt.parser.results import ParseResult\nfrom dbt.parser.rpc import RPCCallParser, RPCMacroParser\nfrom dbt.parser.util import ParserUtils\nimport dbt.ui.printer\nfrom dbt.logger import GLOBAL_LOGGER as logger\nfrom dbt.rpc.node_runners import RPCCompileRunner, RPCExecuteRunner\nfrom dbt.rpc.task import RemoteCallableResult, RPCTask\n\nfrom dbt.task.run import RunTask\nfrom dbt.task.seed import SeedTask\nfrom dbt.task.test import TestTask\n\n\n@dataclass\nclass RPCExecParameters(JsonSchemaMixin):\n name: str\n sql: str\n macros: Optional[str]\n\n\n@dataclass\nclass RPCCompileProjectParameters(JsonSchemaMixin):\n models: Union[None, str, List[str]] = None\n exclude: Union[None, str, List[str]] = None\n\n\n@dataclass\nclass RPCTestProjectParameters(RPCCompileProjectParameters):\n data: bool = False\n schema: bool = False\n\n\n@dataclass\nclass RPCSeedProjectParameters(JsonSchemaMixin):\n show: bool = False\n\n\nclass _RPCExecTask(RPCTask):\n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n self._base_manifest = manifest.deepcopy(config=config)\n\n def runtime_cleanup(self, selected_uids):\n \"\"\"Do some pre-run cleanup that is usually performed in Task __init__.\n \"\"\"\n self.run_count = 0\n self.num_nodes = len(selected_uids)\n self.node_results = []\n self._skipped_children = {}\n self._skipped_children = {}\n self._raise_next_tick = None\n\n def _extract_request_data(self, data):\n data = self.decode_sql(data)\n macro_blocks = []\n data_chunks = []\n for block in extract_toplevel_blocks(data):\n if block.block_type_name == 'macro':\n macro_blocks.append(block.full_block)\n else:\n data_chunks.append(block.full_block)\n macros = '\\n'.join(macro_blocks)\n sql = ''.join(data_chunks)\n return sql, macros\n\n def _get_exec_node(self, name, sql, macros):\n results = ParseResult.rpc()\n macro_overrides = {}\n sql, macros = self._extract_request_data(sql)\n\n if macros:\n macro_parser = RPCMacroParser(results, self.config)\n for node in macro_parser.parse_remote(macros):\n macro_overrides[node.unique_id] = node\n\n self._base_manifest.macros.update(macro_overrides)\n rpc_parser = RPCCallParser(\n results=results,\n project=self.config,\n root_project=self.config,\n macro_manifest=self._base_manifest,\n )\n node = rpc_parser.parse_remote(sql, name)\n self.manifest = ParserUtils.add_new_refs(\n manifest=self._base_manifest,\n current_project=self.config,\n node=node,\n macros=macro_overrides\n )\n\n # don't write our new, weird manifest!\n self.linker = compile_manifest(self.config, self.manifest, write=False)\n return node\n\n def _raise_set_error(self):\n if self._raise_next_tick is not None:\n raise self._raise_next_tick\n\n def _in_thread(self, node, thread_done):\n runner = self.get_runner(node)\n try:\n self.node_results.append(runner.safe_run(self.manifest))\n except Exception as exc:\n logger.debug('Got exception {}'.format(exc), exc_info=True)\n self._raise_next_tick = exc\n finally:\n thread_done.set()\n\n def handle_request(\n self, params: RPCExecParameters\n ) -> RemoteCallableResult:\n # we could get a ctrl+c at any time, including during parsing.\n thread = None\n started = datetime.utcnow()\n try:\n node = self._get_exec_node(params.name, params.sql, params.macros)\n\n selected_uids = [node.unique_id]\n self.runtime_cleanup(selected_uids)\n\n thread_done = threading.Event()\n thread = threading.Thread(target=self._in_thread,\n args=(node, thread_done))\n thread.start()\n thread_done.wait()\n except KeyboardInterrupt:\n adapter = get_adapter(self.config)\n if adapter.is_cancelable():\n\n for conn_name in adapter.cancel_open_connections():\n logger.debug('canceled query {}'.format(conn_name))\n if thread:\n thread.join()\n else:\n msg = (\"The {} adapter does not support query \"\n \"cancellation. Some queries may still be \"\n \"running!\".format(adapter.type()))\n\n logger.debug(msg)\n\n raise dbt.exceptions.RPCKilledException(signal.SIGINT)\n\n self._raise_set_error()\n\n ended = datetime.utcnow()\n elapsed = (ended - started).total_seconds()\n return self.get_result(\n results=self.node_results,\n elapsed_time=elapsed,\n generated_at=ended,\n )\n\n\nclass RemoteCompileTask(_RPCExecTask):\n METHOD_NAME = 'compile'\n\n def get_runner_type(self):\n return RPCCompileRunner\n\n\nclass RemoteRunTask(_RPCExecTask, RunTask):\n METHOD_NAME = 'run'\n\n def get_runner_type(self):\n return RPCExecuteRunner\n\n\nclass RemoteCompileProjectTask(RPCTask):\n METHOD_NAME = 'compile_project'\n\n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n self.manifest = manifest.deepcopy(config=config)\n\n def load_manifest(self):\n # we started out with a manifest!\n pass\n\n def handle_request(\n self, params: RPCCompileProjectParameters\n ) -> RemoteCallableResult:\n self.args.models = self._listify(params.models)\n self.args.exclude = self._listify(params.exclude)\n\n results = self.run()\n return results\n\n\nclass RemoteRunProjectTask(RPCTask, RunTask):\n METHOD_NAME = 'run_project'\n\n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n self.manifest = manifest.deepcopy(config=config)\n\n def load_manifest(self):\n # we started out with a manifest!\n pass\n\n def handle_request(\n self, params: RPCCompileProjectParameters\n ) -> RemoteCallableResult:\n self.args.models = self._listify(params.models)\n self.args.exclude = self._listify(params.exclude)\n\n results = self.run()\n return results\n\n\nclass RemoteSeedProjectTask(RPCTask, SeedTask):\n METHOD_NAME = 'seed_project'\n\n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n self.manifest = manifest.deepcopy(config=config)\n\n def load_manifest(self):\n # we started out with a manifest!\n pass\n\n def handle_request(\n self, params: RPCSeedProjectParameters\n ) -> RemoteCallableResult:\n self.args.show = params.show\n\n results = self.run()\n return results\n\n\nclass RemoteTestProjectTask(RPCTask, TestTask):\n METHOD_NAME = 'test_project'\n\n def __init__(self, args, config, manifest):\n super().__init__(args, config)\n self.manifest = manifest.deepcopy(config=config)\n\n def load_manifest(self):\n # we started out with a manifest!\n pass\n\n def handle_request(\n self, params: RPCTestProjectParameters,\n ) -> RemoteCallableResult:\n self.args.models = self._listify(params.models)\n self.args.exclude = self._listify(params.exclude)\n self.args.data = params.data\n self.args.schema = params.schema\n\n results = self.run()\n return results\n", "path": "core/dbt/task/remote.py"}]}
| 3,051 | 396 |
gh_patches_debug_5355
|
rasdani/github-patches
|
git_diff
|
saulpw__visidata-491
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] [v2.-4dev] dup-rows/-deep does not copy rows
I found that if I try and do a `dup-rows-deep` with a DirSheet I get the following error with no rows in the sheet (columns are correct and as expected).
This only appears to happen with a DirSheet, I just tried with a TextSheet which works as expected.
```
Traceback (most recent call last):
File "/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/threads.py", line 201, in _toplevelTryFunc
t.status = func(*args, **kwargs)
File "/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/threads.py", line 80, in _async_deepcopy
newlist.append(deepcopy(r))
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copy.py", line 281, in _reconstruct
if hasattr(y, '__setstate__'):
File "/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py", line 77, in __getattr__
r = getattr(self._path, k)
File "/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py", line 77, in __getattr__
r = getattr(self._path, k)
File "/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py", line 77, in __getattr__
r = getattr(self._path, k)
[Previous line repeated 492 more times]
RecursionError: maximum recursion depth exceeded
```
### Replicate:
1. Use v2.-4dev branch
2. Open a DirSheet with `vd .`
3. Do a deep copy of the sheet with `dup-rows-deep`
### Result:
A sheet with no rows, columns as expected for a DirSheet, and the error message above.
### Expected:
A deep copy of the original DirSheet with all rows (including those selected)
### cmdlog:
```
sheet col row longname input keystrokes comment
SqliteSheet header set-option 0
UsvSheet delimiter set-option ␞
UsvSheet row_delimiter set-option ␟
override disp_date_fmt set-option %Y-%m-%d %H:%M:%S
open-file . o
_files dup-rows-deep gz" open duplicate sheet with deepcopy of all rows
```
Also, am I correct in understanding that if I make a deep copy, modifications I make to that copy should propagate to the original sheet? And this should include selecting/deselecting rows?
</issue>
<code>
[start of visidata/path.py]
1 import os
2 import os.path
3 import sys
4 import pathlib
5 from urllib.parse import urlparse, urlunparse
6
7 from visidata import *
8
9 option('encoding', 'utf-8', 'encoding passed to codecs.open', replay=True)
10 option('encoding_errors', 'surrogateescape', 'encoding_errors passed to codecs.open', replay=True)
11
12 @functools.lru_cache()
13 def vstat(path, force=False):
14 try:
15 return os.stat(path)
16 except Exception as e:
17 return None
18
19 def filesize(path):
20 if hasattr(path, 'filesize') and path.filesize is not None:
21 return path.filesize
22 if path.fp or path.is_url():
23 return 0
24 st = path.stat() # vstat(path)
25 return st and st.st_size
26
27 def modtime(path):
28 st = path.stat()
29 return st and st.st_mtime
30
31
32 class Path(os.PathLike):
33 'File and path-handling class, modeled on `pathlib.Path`.'
34 def __init__(self, given, fp=None, lines=None, filesize=None):
35 # Resolve pathname shell variables and ~userdir
36 self.given = os.path.expandvars(os.path.expanduser(given))
37 self.fp = fp
38 self.lines = lines or [] # shared among all RepeatFile instances
39 self.filesize = filesize
40 self.rfile = None
41
42 @functools.lru_cache()
43 def stat(self, force=False):
44 return self._path.stat()
45
46 @property
47 def given(self):
48 return self._given
49
50 @given.setter
51 def given(self, given):
52 self._given = given
53 if isinstance(given, os.PathLike):
54 self._path = given
55 else:
56 self._path = pathlib.Path(given)
57
58 self.ext = self.suffix[1:]
59 if self.suffix:
60 self.name = self._path.name[:-len(self.suffix)]
61 else:
62 self.name = self._path.name
63
64 # check if file is compressed
65 if self.suffix in ['.gz', '.bz2', '.xz']:
66 self.compression = self.ext
67 uncompressedpath = Path(self.given[:-len(self.suffix)])
68 self.name = uncompressedpath.name
69 self.ext = uncompressedpath.ext
70 else:
71 self.compression = None
72
73 def __getattr__(self, k):
74 if hasattr(self.__dict__, k):
75 r = getattr(self.__dict__, k)
76 else:
77 r = getattr(self._path, k)
78 if isinstance(r, pathlib.Path):
79 return Path(r)
80 return r
81
82 def __fspath__(self):
83 return self._path.__fspath__()
84
85 def __lt__(self, a):
86 return self._path.__lt__(a)
87
88 def __truediv__(self, a):
89 return Path(self._path.__truediv__(a))
90
91 def open_text(self, mode='rt'):
92 # rfile makes a single-access fp reusable
93
94 if self.rfile:
95 return self.rfile
96
97 if self.fp:
98 self.rfile = RepeatFile(fp=self.fp)
99 return self.rfile
100
101 if 't' not in mode:
102 mode += 't'
103
104 if self.given == '-':
105 if 'r' in mode:
106 return vd._stdin
107 elif 'w' in mode or 'a' in mode:
108 # convert 'a' to 'w' for stdout: https://bugs.python.org/issue27805
109 return open(os.dup(vd._stdout.fileno()), 'wt')
110 else:
111 error('invalid mode "%s" for Path.open_text()' % mode)
112 return sys.stderr
113
114 return self.open(mode=mode, encoding=options.encoding, errors=options.encoding_errors)
115
116 def read_text(self, *args):
117 if self.lines:
118 return RepeatFile(iter_lines=self.lines).read()
119 elif self.fp:
120 return self.fp.read()
121 else:
122 return self._path.read_text(*args)
123
124 def open(self, *args, **kwargs):
125 fn = self
126 if self.compression == 'gz':
127 import gzip
128 return gzip.open(fn, *args, **kwargs)
129 elif self.compression == 'bz2':
130 import bz2
131 return bz2.open(fn, *args, **kwargs)
132 elif self.compression == 'xz':
133 import lzma
134 return lzma.open(fn, *args, **kwargs)
135 else:
136 return self._path.open(*args, **kwargs)
137
138 def __iter__(self):
139 with Progress(total=filesize(self)) as prog:
140 for i, line in enumerate(self.open_text()):
141 prog.addProgress(len(line))
142 yield line[:-1]
143
144 def open_bytes(self, mode='rb'):
145 if 'b' not in mode:
146 mode += 'b'
147 return self.open(mode=mode)
148
149 def read_bytes(self):
150 with self.open(mode='rb') as fp:
151 return fp.read()
152
153 def is_url(self):
154 return '://' in self.given
155
156 def __str__(self):
157 if self.is_url():
158 return self.given
159 return str(self._path)
160
161 @functools.lru_cache()
162 def stat(self, force=False):
163 try:
164 if not self.is_url():
165 return self._path.stat()
166 except Exception as e:
167 return None
168
169 def exists(self):
170 if self.fp or self.is_url():
171 return True
172 return self._path.exists()
173
174 @property
175 def scheme(self):
176 if self.is_url():
177 return urlparse(self.given).scheme
178
179 def with_name(self, name):
180 if self.is_url():
181 urlparts = list(urlparse(self.given))
182 urlparts[2] = '/'.join(Path(urlparts[2])._parts[1:-1] + [name])
183 return Path(urlunparse(urlparts))
184 else:
185 return Path(self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]))
186
187
188 class RepeatFile:
189 def __init__(self, *, fp=None, iter_lines=None):
190 'Provide either fp or iter_lines, and lines will be filled from it.'
191 self.fp = fp
192 self.iter_lines = iter_lines
193 self.lines = []
194 self.iter = RepeatFileIter(self)
195
196 def __enter__(self):
197 self.iter = RepeatFileIter(self)
198 return self
199
200 def __exit__(self, a,b,c):
201 pass
202
203 def read(self, n=None):
204 r = ''
205 if n is None:
206 n = 10**12 # some too huge number
207 while len(r) < n:
208 try:
209 s = next(self.iter)
210 r += s + '\n'
211 n += len(r)
212 except StopIteration:
213 break # end of file
214 return r
215
216 def seek(self, n):
217 assert n == 0, 'RepeatFile can only seek to beginning'
218 self.iter = RepeatFileIter(self)
219
220 def __iter__(self):
221 return RepeatFileIter(self)
222
223 def __next__(self):
224 return next(self.iter)
225
226 def exists(self):
227 return True
228
229
230 class RepeatFileIter:
231 def __init__(self, rf):
232 self.rf = rf
233 self.nextIndex = 0
234
235 def __iter__(self):
236 return RepeatFileIter(self.rf)
237
238 def __next__(self):
239 if self.nextIndex < len(self.rf.lines):
240 r = self.rf.lines[self.nextIndex]
241 elif self.rf.iter_lines:
242 try:
243 r = next(self.rf.iter_lines)
244 self.rf.lines.append(r)
245 except StopIteration:
246 self.rf.iter_lines = None
247 raise
248 elif self.rf.fp:
249 try:
250 r = next(self.rf.fp)
251 self.rf.lines.append(r)
252 except StopIteration:
253 self.rf.fp = None
254 raise
255 else:
256 raise StopIteration()
257
258
259 self.nextIndex += 1
260 return r
261
[end of visidata/path.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/visidata/path.py b/visidata/path.py
--- a/visidata/path.py
+++ b/visidata/path.py
@@ -74,7 +74,10 @@
if hasattr(self.__dict__, k):
r = getattr(self.__dict__, k)
else:
- r = getattr(self._path, k)
+ if self.__dict__.get('_path', None) is not None:
+ r = getattr(self._path, k)
+ else:
+ raise AttributeError(k)
if isinstance(r, pathlib.Path):
return Path(r)
return r
|
{"golden_diff": "diff --git a/visidata/path.py b/visidata/path.py\n--- a/visidata/path.py\n+++ b/visidata/path.py\n@@ -74,7 +74,10 @@\n if hasattr(self.__dict__, k):\n r = getattr(self.__dict__, k)\n else:\n- r = getattr(self._path, k)\n+ if self.__dict__.get('_path', None) is not None:\n+ r = getattr(self._path, k)\n+ else:\n+ raise AttributeError(k)\n if isinstance(r, pathlib.Path):\n return Path(r)\n return r\n", "issue": "[Bug] [v2.-4dev] dup-rows/-deep does not copy rows\nI found that if I try and do a `dup-rows-deep` with a DirSheet I get the following error with no rows in the sheet (columns are correct and as expected).\r\n\r\nThis only appears to happen with a DirSheet, I just tried with a TextSheet which works as expected.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/threads.py\", line 201, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/threads.py\", line 80, in _async_deepcopy\r\n newlist.append(deepcopy(r))\r\n File \"/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copy.py\", line 180, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copy.py\", line 281, in _reconstruct\r\n if hasattr(y, '__setstate__'):\r\n File \"/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py\", line 77, in __getattr__\r\n r = getattr(self._path, k)\r\n File \"/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py\", line 77, in __getattr__\r\n r = getattr(self._path, k)\r\n File \"/path/vd_plugins/radare2/r2/lib/python3.7/site-packages/visidata-2._4dev-py3.7.egg/visidata/path.py\", line 77, in __getattr__\r\n r = getattr(self._path, k)\r\n [Previous line repeated 492 more times]\r\nRecursionError: maximum recursion depth exceeded\r\n```\r\n### Replicate:\r\n\r\n1. Use v2.-4dev branch\r\n2. Open a DirSheet with `vd .`\r\n3. Do a deep copy of the sheet with `dup-rows-deep`\r\n\r\n### Result:\r\nA sheet with no rows, columns as expected for a DirSheet, and the error message above.\r\n\r\n### Expected:\r\nA deep copy of the original DirSheet with all rows (including those selected)\r\n\r\n### cmdlog:\r\n```\r\nsheet\tcol\trow\tlongname\tinput\tkeystrokes\tcomment\r\n\tSqliteSheet\theader\tset-option\t0\r\n\tUsvSheet\tdelimiter\tset-option\t\u241e\r\n\tUsvSheet\trow_delimiter\tset-option\t\u241f\r\n\toverride\tdisp_date_fmt\tset-option\t%Y-%m-%d %H:%M:%S\r\n\t\t\topen-file\t.\to\r\n_files\t\t\tdup-rows-deep\t\tgz\"\topen duplicate sheet with deepcopy of all rows\r\n```\r\n\r\n\r\nAlso, am I correct in understanding that if I make a deep copy, modifications I make to that copy should propagate to the original sheet? And this should include selecting/deselecting rows?\n", "before_files": [{"content": "import os\nimport os.path\nimport sys\nimport pathlib\nfrom urllib.parse import urlparse, urlunparse\n\nfrom visidata import *\n\noption('encoding', 'utf-8', 'encoding passed to codecs.open', replay=True)\noption('encoding_errors', 'surrogateescape', 'encoding_errors passed to codecs.open', replay=True)\n\[email protected]_cache()\ndef vstat(path, force=False):\n try:\n return os.stat(path)\n except Exception as e:\n return None\n\ndef filesize(path):\n if hasattr(path, 'filesize') and path.filesize is not None:\n return path.filesize\n if path.fp or path.is_url():\n return 0\n st = path.stat() # vstat(path)\n return st and st.st_size\n\ndef modtime(path):\n st = path.stat()\n return st and st.st_mtime\n\n\nclass Path(os.PathLike):\n 'File and path-handling class, modeled on `pathlib.Path`.'\n def __init__(self, given, fp=None, lines=None, filesize=None):\n # Resolve pathname shell variables and ~userdir\n self.given = os.path.expandvars(os.path.expanduser(given))\n self.fp = fp\n self.lines = lines or [] # shared among all RepeatFile instances\n self.filesize = filesize\n self.rfile = None\n\n @functools.lru_cache()\n def stat(self, force=False):\n return self._path.stat()\n\n @property\n def given(self):\n return self._given\n\n @given.setter\n def given(self, given):\n self._given = given\n if isinstance(given, os.PathLike):\n self._path = given\n else:\n self._path = pathlib.Path(given)\n\n self.ext = self.suffix[1:]\n if self.suffix:\n self.name = self._path.name[:-len(self.suffix)]\n else:\n self.name = self._path.name\n\n # check if file is compressed\n if self.suffix in ['.gz', '.bz2', '.xz']:\n self.compression = self.ext\n uncompressedpath = Path(self.given[:-len(self.suffix)])\n self.name = uncompressedpath.name\n self.ext = uncompressedpath.ext\n else:\n self.compression = None\n\n def __getattr__(self, k):\n if hasattr(self.__dict__, k):\n r = getattr(self.__dict__, k)\n else:\n r = getattr(self._path, k)\n if isinstance(r, pathlib.Path):\n return Path(r)\n return r\n\n def __fspath__(self):\n return self._path.__fspath__()\n\n def __lt__(self, a):\n return self._path.__lt__(a)\n\n def __truediv__(self, a):\n return Path(self._path.__truediv__(a))\n\n def open_text(self, mode='rt'):\n # rfile makes a single-access fp reusable\n\n if self.rfile:\n return self.rfile\n\n if self.fp:\n self.rfile = RepeatFile(fp=self.fp)\n return self.rfile\n\n if 't' not in mode:\n mode += 't'\n\n if self.given == '-':\n if 'r' in mode:\n return vd._stdin\n elif 'w' in mode or 'a' in mode:\n # convert 'a' to 'w' for stdout: https://bugs.python.org/issue27805\n return open(os.dup(vd._stdout.fileno()), 'wt')\n else:\n error('invalid mode \"%s\" for Path.open_text()' % mode)\n return sys.stderr\n\n return self.open(mode=mode, encoding=options.encoding, errors=options.encoding_errors)\n\n def read_text(self, *args):\n if self.lines:\n return RepeatFile(iter_lines=self.lines).read()\n elif self.fp:\n return self.fp.read()\n else:\n return self._path.read_text(*args)\n\n def open(self, *args, **kwargs):\n fn = self\n if self.compression == 'gz':\n import gzip\n return gzip.open(fn, *args, **kwargs)\n elif self.compression == 'bz2':\n import bz2\n return bz2.open(fn, *args, **kwargs)\n elif self.compression == 'xz':\n import lzma\n return lzma.open(fn, *args, **kwargs)\n else:\n return self._path.open(*args, **kwargs)\n\n def __iter__(self):\n with Progress(total=filesize(self)) as prog:\n for i, line in enumerate(self.open_text()):\n prog.addProgress(len(line))\n yield line[:-1]\n\n def open_bytes(self, mode='rb'):\n if 'b' not in mode:\n mode += 'b'\n return self.open(mode=mode)\n\n def read_bytes(self):\n with self.open(mode='rb') as fp:\n return fp.read()\n\n def is_url(self):\n return '://' in self.given\n\n def __str__(self):\n if self.is_url():\n return self.given\n return str(self._path)\n\n @functools.lru_cache()\n def stat(self, force=False):\n try:\n if not self.is_url():\n return self._path.stat()\n except Exception as e:\n return None\n\n def exists(self):\n if self.fp or self.is_url():\n return True\n return self._path.exists()\n\n @property\n def scheme(self):\n if self.is_url():\n return urlparse(self.given).scheme\n\n def with_name(self, name):\n if self.is_url():\n urlparts = list(urlparse(self.given))\n urlparts[2] = '/'.join(Path(urlparts[2])._parts[1:-1] + [name])\n return Path(urlunparse(urlparts))\n else:\n return Path(self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]))\n\n\nclass RepeatFile:\n def __init__(self, *, fp=None, iter_lines=None):\n 'Provide either fp or iter_lines, and lines will be filled from it.'\n self.fp = fp\n self.iter_lines = iter_lines\n self.lines = []\n self.iter = RepeatFileIter(self)\n\n def __enter__(self):\n self.iter = RepeatFileIter(self)\n return self\n\n def __exit__(self, a,b,c):\n pass\n\n def read(self, n=None):\n r = ''\n if n is None:\n n = 10**12 # some too huge number\n while len(r) < n:\n try:\n s = next(self.iter)\n r += s + '\\n'\n n += len(r)\n except StopIteration:\n break # end of file\n return r\n\n def seek(self, n):\n assert n == 0, 'RepeatFile can only seek to beginning'\n self.iter = RepeatFileIter(self)\n\n def __iter__(self):\n return RepeatFileIter(self)\n\n def __next__(self):\n return next(self.iter)\n\n def exists(self):\n return True\n\n\nclass RepeatFileIter:\n def __init__(self, rf):\n self.rf = rf\n self.nextIndex = 0\n\n def __iter__(self):\n return RepeatFileIter(self.rf)\n\n def __next__(self):\n if self.nextIndex < len(self.rf.lines):\n r = self.rf.lines[self.nextIndex]\n elif self.rf.iter_lines:\n try:\n r = next(self.rf.iter_lines)\n self.rf.lines.append(r)\n except StopIteration:\n self.rf.iter_lines = None\n raise\n elif self.rf.fp:\n try:\n r = next(self.rf.fp)\n self.rf.lines.append(r)\n except StopIteration:\n self.rf.fp = None\n raise\n else:\n raise StopIteration()\n\n\n self.nextIndex += 1\n return r\n", "path": "visidata/path.py"}]}
| 3,716 | 133 |
gh_patches_debug_12771
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-2242
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
q<Fn> tries to record macro
I'm not sure if this is solved in #2113, but currently when doing `q<Fn>` I get `Recording macro ''...`
cc @blyxxyz
</issue>
<code>
[start of qutebrowser/keyinput/modeparsers.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """KeyChainParser for "hint" and "normal" modes.
21
22 Module attributes:
23 STARTCHARS: Possible chars for starting a commandline input.
24 """
25
26 import traceback
27
28 from PyQt5.QtCore import pyqtSlot, Qt
29
30 from qutebrowser.commands import cmdexc
31 from qutebrowser.config import config
32 from qutebrowser.keyinput import keyparser
33 from qutebrowser.utils import usertypes, log, message, objreg, utils
34
35
36 STARTCHARS = ":/?"
37 LastPress = usertypes.enum('LastPress', ['none', 'filtertext', 'keystring'])
38
39
40 class NormalKeyParser(keyparser.CommandKeyParser):
41
42 """KeyParser for normal mode with added STARTCHARS detection and more.
43
44 Attributes:
45 _partial_timer: Timer to clear partial keypresses.
46 """
47
48 def __init__(self, win_id, parent=None):
49 super().__init__(win_id, parent, supports_count=True,
50 supports_chains=True)
51 self.read_config('normal')
52 self._partial_timer = usertypes.Timer(self, 'partial-match')
53 self._partial_timer.setSingleShot(True)
54 self._inhibited = False
55 self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')
56 self._inhibited_timer.setSingleShot(True)
57
58 def __repr__(self):
59 return utils.get_repr(self)
60
61 def _handle_single_key(self, e):
62 """Override _handle_single_key to abort if the key is a startchar.
63
64 Args:
65 e: the KeyPressEvent from Qt.
66
67 Return:
68 A self.Match member.
69 """
70 txt = e.text().strip()
71 if self._inhibited:
72 self._debug_log("Ignoring key '{}', because the normal mode is "
73 "currently inhibited.".format(txt))
74 return self.Match.none
75 match = super()._handle_single_key(e)
76 if match == self.Match.partial:
77 timeout = config.get('input', 'partial-timeout')
78 if timeout != 0:
79 self._partial_timer.setInterval(timeout)
80 self._partial_timer.timeout.connect(self._clear_partial_match)
81 self._partial_timer.start()
82 return match
83
84 def set_inhibited_timeout(self, timeout):
85 if timeout != 0:
86 self._debug_log("Inhibiting the normal mode for {}ms.".format(
87 timeout))
88 self._inhibited = True
89 self._inhibited_timer.setInterval(timeout)
90 self._inhibited_timer.timeout.connect(self._clear_inhibited)
91 self._inhibited_timer.start()
92
93 @pyqtSlot()
94 def _clear_partial_match(self):
95 """Clear a partial keystring after a timeout."""
96 self._debug_log("Clearing partial keystring {}".format(
97 self._keystring))
98 self._keystring = ''
99 self.keystring_updated.emit(self._keystring)
100
101 @pyqtSlot()
102 def _clear_inhibited(self):
103 """Reset inhibition state after a timeout."""
104 self._debug_log("Releasing inhibition state of normal mode.")
105 self._inhibited = False
106
107 @pyqtSlot()
108 def _stop_timers(self):
109 super()._stop_timers()
110 self._partial_timer.stop()
111 try:
112 self._partial_timer.timeout.disconnect(self._clear_partial_match)
113 except TypeError:
114 # no connections
115 pass
116 self._inhibited_timer.stop()
117 try:
118 self._inhibited_timer.timeout.disconnect(self._clear_inhibited)
119 except TypeError:
120 # no connections
121 pass
122
123
124 class PromptKeyParser(keyparser.CommandKeyParser):
125
126 """KeyParser for yes/no prompts."""
127
128 def __init__(self, win_id, parent=None):
129 super().__init__(win_id, parent, supports_count=False,
130 supports_chains=True)
131 # We don't want an extra section for this in the config, so we just
132 # abuse the prompt section.
133 self.read_config('prompt')
134
135 def __repr__(self):
136 return utils.get_repr(self)
137
138
139 class HintKeyParser(keyparser.CommandKeyParser):
140
141 """KeyChainParser for hints.
142
143 Attributes:
144 _filtertext: The text to filter with.
145 _last_press: The nature of the last keypress, a LastPress member.
146 """
147
148 def __init__(self, win_id, parent=None):
149 super().__init__(win_id, parent, supports_count=False,
150 supports_chains=True)
151 self._filtertext = ''
152 self._last_press = LastPress.none
153 self.read_config('hint')
154 self.keystring_updated.connect(self.on_keystring_updated)
155
156 def _handle_special_key(self, e):
157 """Override _handle_special_key to handle string filtering.
158
159 Return True if the keypress has been handled, and False if not.
160
161 Args:
162 e: the KeyPressEvent from Qt.
163
164 Return:
165 True if event has been handled, False otherwise.
166 """
167 log.keyboard.debug("Got special key 0x{:x} text {}".format(
168 e.key(), e.text()))
169 hintmanager = objreg.get('hintmanager', scope='tab',
170 window=self._win_id, tab='current')
171 if e.key() == Qt.Key_Backspace:
172 log.keyboard.debug("Got backspace, mode {}, filtertext '{}', "
173 "keystring '{}'".format(self._last_press,
174 self._filtertext,
175 self._keystring))
176 if self._last_press == LastPress.filtertext and self._filtertext:
177 self._filtertext = self._filtertext[:-1]
178 hintmanager.filter_hints(self._filtertext)
179 return True
180 elif self._last_press == LastPress.keystring and self._keystring:
181 self._keystring = self._keystring[:-1]
182 self.keystring_updated.emit(self._keystring)
183 if not self._keystring and self._filtertext:
184 # Switch back to hint filtering mode (this can happen only
185 # in numeric mode after the number has been deleted).
186 hintmanager.filter_hints(self._filtertext)
187 self._last_press = LastPress.filtertext
188 return True
189 else:
190 return super()._handle_special_key(e)
191 elif hintmanager.current_mode() != 'number':
192 return super()._handle_special_key(e)
193 elif not e.text():
194 return super()._handle_special_key(e)
195 else:
196 self._filtertext += e.text()
197 hintmanager.filter_hints(self._filtertext)
198 self._last_press = LastPress.filtertext
199 return True
200
201 def handle(self, e):
202 """Handle a new keypress and call the respective handlers.
203
204 Args:
205 e: the KeyPressEvent from Qt
206
207 Returns:
208 True if the match has been handled, False otherwise.
209 """
210 match = self._handle_single_key(e)
211 if match == self.Match.partial:
212 self.keystring_updated.emit(self._keystring)
213 self._last_press = LastPress.keystring
214 return True
215 elif match == self.Match.definitive:
216 self._last_press = LastPress.none
217 return True
218 elif match == self.Match.other:
219 pass
220 elif match == self.Match.none:
221 # We couldn't find a keychain so we check if it's a special key.
222 return self._handle_special_key(e)
223 else:
224 raise ValueError("Got invalid match type {}!".format(match))
225
226 def execute(self, cmdstr, keytype, count=None):
227 """Handle a completed keychain."""
228 if not isinstance(keytype, self.Type):
229 raise TypeError("Type {} is no Type member!".format(keytype))
230 if keytype == self.Type.chain:
231 hintmanager = objreg.get('hintmanager', scope='tab',
232 window=self._win_id, tab='current')
233 hintmanager.handle_partial_key(cmdstr)
234 else:
235 # execute as command
236 super().execute(cmdstr, keytype, count)
237
238 def update_bindings(self, strings, preserve_filter=False):
239 """Update bindings when the hint strings changed.
240
241 Args:
242 strings: A list of hint strings.
243 preserve_filter: Whether to keep the current value of
244 `self._filtertext`.
245 """
246 self.bindings = {s: s for s in strings}
247 if not preserve_filter:
248 self._filtertext = ''
249
250 @pyqtSlot(str)
251 def on_keystring_updated(self, keystr):
252 """Update hintmanager when the keystring was updated."""
253 hintmanager = objreg.get('hintmanager', scope='tab',
254 window=self._win_id, tab='current')
255 hintmanager.handle_partial_key(keystr)
256
257
258 class CaretKeyParser(keyparser.CommandKeyParser):
259
260 """KeyParser for caret mode."""
261
262 passthrough = True
263
264 def __init__(self, win_id, parent=None):
265 super().__init__(win_id, parent, supports_count=True,
266 supports_chains=True)
267 self.read_config('caret')
268
269
270 class RegisterKeyParser(keyparser.CommandKeyParser):
271
272 """KeyParser for modes that record a register key.
273
274 Attributes:
275 _mode: One of KeyMode.set_mark, KeyMode.jump_mark, KeyMode.record_macro
276 and KeyMode.run_macro.
277 """
278
279 def __init__(self, win_id, mode, parent=None):
280 super().__init__(win_id, parent, supports_count=False,
281 supports_chains=False)
282 self._mode = mode
283 self.read_config('register')
284
285 def handle(self, e):
286 """Override handle to always match the next key and use the register.
287
288 Args:
289 e: the KeyPressEvent from Qt.
290
291 Return:
292 True if event has been handled, False otherwise.
293 """
294 if super().handle(e):
295 return True
296
297 if utils.keyevent_to_string(e) is None:
298 # this is a modifier key, let it pass and keep going
299 return False
300
301 key = e.text()
302
303 tabbed_browser = objreg.get('tabbed-browser', scope='window',
304 window=self._win_id)
305 macro_recorder = objreg.get('macro-recorder')
306
307 try:
308 if self._mode == usertypes.KeyMode.set_mark:
309 tabbed_browser.set_mark(key)
310 elif self._mode == usertypes.KeyMode.jump_mark:
311 tabbed_browser.jump_mark(key)
312 elif self._mode == usertypes.KeyMode.record_macro:
313 macro_recorder.record_macro(key)
314 elif self._mode == usertypes.KeyMode.run_macro:
315 macro_recorder.run_macro(self._win_id, key)
316 else:
317 raise ValueError(
318 "{} is not a valid register mode".format(self._mode))
319 except (cmdexc.CommandMetaError, cmdexc.CommandError) as err:
320 message.error(str(err), stack=traceback.format_exc())
321
322 self.request_leave.emit(self._mode, "valid register key", True)
323
324 return True
325
326 @pyqtSlot(str)
327 def on_keyconfig_changed(self, mode):
328 """RegisterKeyParser has no config section (no bindable keys)."""
329 pass
330
[end of qutebrowser/keyinput/modeparsers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qutebrowser/keyinput/modeparsers.py b/qutebrowser/keyinput/modeparsers.py
--- a/qutebrowser/keyinput/modeparsers.py
+++ b/qutebrowser/keyinput/modeparsers.py
@@ -294,12 +294,12 @@
if super().handle(e):
return True
- if utils.keyevent_to_string(e) is None:
- # this is a modifier key, let it pass and keep going
- return False
-
key = e.text()
+ if key == '' or utils.keyevent_to_string(e) is None:
+ # this is not a proper register key, let it pass and keep going
+ return False
+
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=self._win_id)
macro_recorder = objreg.get('macro-recorder')
|
{"golden_diff": "diff --git a/qutebrowser/keyinput/modeparsers.py b/qutebrowser/keyinput/modeparsers.py\n--- a/qutebrowser/keyinput/modeparsers.py\n+++ b/qutebrowser/keyinput/modeparsers.py\n@@ -294,12 +294,12 @@\n if super().handle(e):\n return True\n \n- if utils.keyevent_to_string(e) is None:\n- # this is a modifier key, let it pass and keep going\n- return False\n-\n key = e.text()\n \n+ if key == '' or utils.keyevent_to_string(e) is None:\n+ # this is not a proper register key, let it pass and keep going\n+ return False\n+\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=self._win_id)\n macro_recorder = objreg.get('macro-recorder')\n", "issue": "q<Fn> tries to record macro\nI'm not sure if this is solved in #2113, but currently when doing `q<Fn>` I get `Recording macro ''...`\r\n\r\ncc @blyxxyz\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"KeyChainParser for \"hint\" and \"normal\" modes.\n\nModule attributes:\n STARTCHARS: Possible chars for starting a commandline input.\n\"\"\"\n\nimport traceback\n\nfrom PyQt5.QtCore import pyqtSlot, Qt\n\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.config import config\nfrom qutebrowser.keyinput import keyparser\nfrom qutebrowser.utils import usertypes, log, message, objreg, utils\n\n\nSTARTCHARS = \":/?\"\nLastPress = usertypes.enum('LastPress', ['none', 'filtertext', 'keystring'])\n\n\nclass NormalKeyParser(keyparser.CommandKeyParser):\n\n \"\"\"KeyParser for normal mode with added STARTCHARS detection and more.\n\n Attributes:\n _partial_timer: Timer to clear partial keypresses.\n \"\"\"\n\n def __init__(self, win_id, parent=None):\n super().__init__(win_id, parent, supports_count=True,\n supports_chains=True)\n self.read_config('normal')\n self._partial_timer = usertypes.Timer(self, 'partial-match')\n self._partial_timer.setSingleShot(True)\n self._inhibited = False\n self._inhibited_timer = usertypes.Timer(self, 'normal-inhibited')\n self._inhibited_timer.setSingleShot(True)\n\n def __repr__(self):\n return utils.get_repr(self)\n\n def _handle_single_key(self, e):\n \"\"\"Override _handle_single_key to abort if the key is a startchar.\n\n Args:\n e: the KeyPressEvent from Qt.\n\n Return:\n A self.Match member.\n \"\"\"\n txt = e.text().strip()\n if self._inhibited:\n self._debug_log(\"Ignoring key '{}', because the normal mode is \"\n \"currently inhibited.\".format(txt))\n return self.Match.none\n match = super()._handle_single_key(e)\n if match == self.Match.partial:\n timeout = config.get('input', 'partial-timeout')\n if timeout != 0:\n self._partial_timer.setInterval(timeout)\n self._partial_timer.timeout.connect(self._clear_partial_match)\n self._partial_timer.start()\n return match\n\n def set_inhibited_timeout(self, timeout):\n if timeout != 0:\n self._debug_log(\"Inhibiting the normal mode for {}ms.\".format(\n timeout))\n self._inhibited = True\n self._inhibited_timer.setInterval(timeout)\n self._inhibited_timer.timeout.connect(self._clear_inhibited)\n self._inhibited_timer.start()\n\n @pyqtSlot()\n def _clear_partial_match(self):\n \"\"\"Clear a partial keystring after a timeout.\"\"\"\n self._debug_log(\"Clearing partial keystring {}\".format(\n self._keystring))\n self._keystring = ''\n self.keystring_updated.emit(self._keystring)\n\n @pyqtSlot()\n def _clear_inhibited(self):\n \"\"\"Reset inhibition state after a timeout.\"\"\"\n self._debug_log(\"Releasing inhibition state of normal mode.\")\n self._inhibited = False\n\n @pyqtSlot()\n def _stop_timers(self):\n super()._stop_timers()\n self._partial_timer.stop()\n try:\n self._partial_timer.timeout.disconnect(self._clear_partial_match)\n except TypeError:\n # no connections\n pass\n self._inhibited_timer.stop()\n try:\n self._inhibited_timer.timeout.disconnect(self._clear_inhibited)\n except TypeError:\n # no connections\n pass\n\n\nclass PromptKeyParser(keyparser.CommandKeyParser):\n\n \"\"\"KeyParser for yes/no prompts.\"\"\"\n\n def __init__(self, win_id, parent=None):\n super().__init__(win_id, parent, supports_count=False,\n supports_chains=True)\n # We don't want an extra section for this in the config, so we just\n # abuse the prompt section.\n self.read_config('prompt')\n\n def __repr__(self):\n return utils.get_repr(self)\n\n\nclass HintKeyParser(keyparser.CommandKeyParser):\n\n \"\"\"KeyChainParser for hints.\n\n Attributes:\n _filtertext: The text to filter with.\n _last_press: The nature of the last keypress, a LastPress member.\n \"\"\"\n\n def __init__(self, win_id, parent=None):\n super().__init__(win_id, parent, supports_count=False,\n supports_chains=True)\n self._filtertext = ''\n self._last_press = LastPress.none\n self.read_config('hint')\n self.keystring_updated.connect(self.on_keystring_updated)\n\n def _handle_special_key(self, e):\n \"\"\"Override _handle_special_key to handle string filtering.\n\n Return True if the keypress has been handled, and False if not.\n\n Args:\n e: the KeyPressEvent from Qt.\n\n Return:\n True if event has been handled, False otherwise.\n \"\"\"\n log.keyboard.debug(\"Got special key 0x{:x} text {}\".format(\n e.key(), e.text()))\n hintmanager = objreg.get('hintmanager', scope='tab',\n window=self._win_id, tab='current')\n if e.key() == Qt.Key_Backspace:\n log.keyboard.debug(\"Got backspace, mode {}, filtertext '{}', \"\n \"keystring '{}'\".format(self._last_press,\n self._filtertext,\n self._keystring))\n if self._last_press == LastPress.filtertext and self._filtertext:\n self._filtertext = self._filtertext[:-1]\n hintmanager.filter_hints(self._filtertext)\n return True\n elif self._last_press == LastPress.keystring and self._keystring:\n self._keystring = self._keystring[:-1]\n self.keystring_updated.emit(self._keystring)\n if not self._keystring and self._filtertext:\n # Switch back to hint filtering mode (this can happen only\n # in numeric mode after the number has been deleted).\n hintmanager.filter_hints(self._filtertext)\n self._last_press = LastPress.filtertext\n return True\n else:\n return super()._handle_special_key(e)\n elif hintmanager.current_mode() != 'number':\n return super()._handle_special_key(e)\n elif not e.text():\n return super()._handle_special_key(e)\n else:\n self._filtertext += e.text()\n hintmanager.filter_hints(self._filtertext)\n self._last_press = LastPress.filtertext\n return True\n\n def handle(self, e):\n \"\"\"Handle a new keypress and call the respective handlers.\n\n Args:\n e: the KeyPressEvent from Qt\n\n Returns:\n True if the match has been handled, False otherwise.\n \"\"\"\n match = self._handle_single_key(e)\n if match == self.Match.partial:\n self.keystring_updated.emit(self._keystring)\n self._last_press = LastPress.keystring\n return True\n elif match == self.Match.definitive:\n self._last_press = LastPress.none\n return True\n elif match == self.Match.other:\n pass\n elif match == self.Match.none:\n # We couldn't find a keychain so we check if it's a special key.\n return self._handle_special_key(e)\n else:\n raise ValueError(\"Got invalid match type {}!\".format(match))\n\n def execute(self, cmdstr, keytype, count=None):\n \"\"\"Handle a completed keychain.\"\"\"\n if not isinstance(keytype, self.Type):\n raise TypeError(\"Type {} is no Type member!\".format(keytype))\n if keytype == self.Type.chain:\n hintmanager = objreg.get('hintmanager', scope='tab',\n window=self._win_id, tab='current')\n hintmanager.handle_partial_key(cmdstr)\n else:\n # execute as command\n super().execute(cmdstr, keytype, count)\n\n def update_bindings(self, strings, preserve_filter=False):\n \"\"\"Update bindings when the hint strings changed.\n\n Args:\n strings: A list of hint strings.\n preserve_filter: Whether to keep the current value of\n `self._filtertext`.\n \"\"\"\n self.bindings = {s: s for s in strings}\n if not preserve_filter:\n self._filtertext = ''\n\n @pyqtSlot(str)\n def on_keystring_updated(self, keystr):\n \"\"\"Update hintmanager when the keystring was updated.\"\"\"\n hintmanager = objreg.get('hintmanager', scope='tab',\n window=self._win_id, tab='current')\n hintmanager.handle_partial_key(keystr)\n\n\nclass CaretKeyParser(keyparser.CommandKeyParser):\n\n \"\"\"KeyParser for caret mode.\"\"\"\n\n passthrough = True\n\n def __init__(self, win_id, parent=None):\n super().__init__(win_id, parent, supports_count=True,\n supports_chains=True)\n self.read_config('caret')\n\n\nclass RegisterKeyParser(keyparser.CommandKeyParser):\n\n \"\"\"KeyParser for modes that record a register key.\n\n Attributes:\n _mode: One of KeyMode.set_mark, KeyMode.jump_mark, KeyMode.record_macro\n and KeyMode.run_macro.\n \"\"\"\n\n def __init__(self, win_id, mode, parent=None):\n super().__init__(win_id, parent, supports_count=False,\n supports_chains=False)\n self._mode = mode\n self.read_config('register')\n\n def handle(self, e):\n \"\"\"Override handle to always match the next key and use the register.\n\n Args:\n e: the KeyPressEvent from Qt.\n\n Return:\n True if event has been handled, False otherwise.\n \"\"\"\n if super().handle(e):\n return True\n\n if utils.keyevent_to_string(e) is None:\n # this is a modifier key, let it pass and keep going\n return False\n\n key = e.text()\n\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=self._win_id)\n macro_recorder = objreg.get('macro-recorder')\n\n try:\n if self._mode == usertypes.KeyMode.set_mark:\n tabbed_browser.set_mark(key)\n elif self._mode == usertypes.KeyMode.jump_mark:\n tabbed_browser.jump_mark(key)\n elif self._mode == usertypes.KeyMode.record_macro:\n macro_recorder.record_macro(key)\n elif self._mode == usertypes.KeyMode.run_macro:\n macro_recorder.run_macro(self._win_id, key)\n else:\n raise ValueError(\n \"{} is not a valid register mode\".format(self._mode))\n except (cmdexc.CommandMetaError, cmdexc.CommandError) as err:\n message.error(str(err), stack=traceback.format_exc())\n\n self.request_leave.emit(self._mode, \"valid register key\", True)\n\n return True\n\n @pyqtSlot(str)\n def on_keyconfig_changed(self, mode):\n \"\"\"RegisterKeyParser has no config section (no bindable keys).\"\"\"\n pass\n", "path": "qutebrowser/keyinput/modeparsers.py"}]}
| 4,036 | 195 |
gh_patches_debug_60894
|
rasdani/github-patches
|
git_diff
|
tiangolo__fastapi-493
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI exceptions module mistakenly references the 'requests' package
**Describe the bug**
Starting up a FastAPI 0.38.0 app displays the following error:
```python
from fastapi import FastAPI
File ".../lib/site-packages/fastapi/__init__.py", line 7, in <module>
from .applications import FastAPI
File ".../lib/site-packages/fastapi/applications.py", line 3, in <module>
from fastapi import routing
File ".../lib/site-packages/fastapi/routing.py", line 7, in <module>
from fastapi.dependencies.models import Dependant
File ".../lib/site-packages/fastapi/dependencies/models.py", line 3, in <module>
from fastapi.security.base import SecurityBase
File ".../lib/site-packages/fastapi/security/__init__.py", line 2, in <module>
from .http import (
File ".../lib/site-packages/fastapi/security/http.py", line 5, in <module>
from fastapi.exceptions import HTTPException
File ".../lib/site-packages/fastapi/exceptions.py", line 5, in <module>
from requests import Request
ModuleNotFoundError: No module named 'requests'
```
**Expected behavior**
The app should start without import errors.
**Environment:**
- OS: Linux, Windows, and macOS
- FastAPI Version: 0.38.0
**Additional context**
It's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`
FastAPI exceptions module mistakenly references the 'requests' package
**Describe the bug**
Starting up a FastAPI 0.38.0 app displays the following error:
```python
from fastapi import FastAPI
File ".../lib/site-packages/fastapi/__init__.py", line 7, in <module>
from .applications import FastAPI
File ".../lib/site-packages/fastapi/applications.py", line 3, in <module>
from fastapi import routing
File ".../lib/site-packages/fastapi/routing.py", line 7, in <module>
from fastapi.dependencies.models import Dependant
File ".../lib/site-packages/fastapi/dependencies/models.py", line 3, in <module>
from fastapi.security.base import SecurityBase
File ".../lib/site-packages/fastapi/security/__init__.py", line 2, in <module>
from .http import (
File ".../lib/site-packages/fastapi/security/http.py", line 5, in <module>
from fastapi.exceptions import HTTPException
File ".../lib/site-packages/fastapi/exceptions.py", line 5, in <module>
from requests import Request
ModuleNotFoundError: No module named 'requests'
```
**Expected behavior**
The app should start without import errors.
**Environment:**
- OS: Linux, Windows, and macOS
- FastAPI Version: 0.38.0
**Additional context**
It's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`
</issue>
<code>
[start of fastapi/exceptions.py]
1 from typing import Any, Sequence
2
3 from pydantic import ValidationError
4 from pydantic.error_wrappers import ErrorList
5 from requests import Request
6 from starlette.exceptions import HTTPException as StarletteHTTPException
7 from starlette.websockets import WebSocket
8
9
10 class HTTPException(StarletteHTTPException):
11 def __init__(
12 self, status_code: int, detail: Any = None, headers: dict = None
13 ) -> None:
14 super().__init__(status_code=status_code, detail=detail)
15 self.headers = headers
16
17
18 class RequestValidationError(ValidationError):
19 def __init__(self, errors: Sequence[ErrorList]) -> None:
20 super().__init__(errors, Request)
21
22
23 class WebSocketRequestValidationError(ValidationError):
24 def __init__(self, errors: Sequence[ErrorList]) -> None:
25 super().__init__(errors, WebSocket)
26
[end of fastapi/exceptions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fastapi/exceptions.py b/fastapi/exceptions.py
--- a/fastapi/exceptions.py
+++ b/fastapi/exceptions.py
@@ -2,8 +2,8 @@
from pydantic import ValidationError
from pydantic.error_wrappers import ErrorList
-from requests import Request
from starlette.exceptions import HTTPException as StarletteHTTPException
+from starlette.requests import Request
from starlette.websockets import WebSocket
|
{"golden_diff": "diff --git a/fastapi/exceptions.py b/fastapi/exceptions.py\n--- a/fastapi/exceptions.py\n+++ b/fastapi/exceptions.py\n@@ -2,8 +2,8 @@\n \n from pydantic import ValidationError\n from pydantic.error_wrappers import ErrorList\n-from requests import Request\n from starlette.exceptions import HTTPException as StarletteHTTPException\n+from starlette.requests import Request\n from starlette.websockets import WebSocket\n", "issue": "FastAPI exceptions module mistakenly references the 'requests' package\n**Describe the bug**\r\nStarting up a FastAPI 0.38.0 app displays the following error:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\n File \".../lib/site-packages/fastapi/__init__.py\", line 7, in <module>\r\n from .applications import FastAPI\r\n File \".../lib/site-packages/fastapi/applications.py\", line 3, in <module>\r\n from fastapi import routing\r\n File \".../lib/site-packages/fastapi/routing.py\", line 7, in <module>\r\n from fastapi.dependencies.models import Dependant\r\n File \".../lib/site-packages/fastapi/dependencies/models.py\", line 3, in <module>\r\n from fastapi.security.base import SecurityBase\r\n File \".../lib/site-packages/fastapi/security/__init__.py\", line 2, in <module>\r\n from .http import (\r\n File \".../lib/site-packages/fastapi/security/http.py\", line 5, in <module>\r\n from fastapi.exceptions import HTTPException\r\n File \".../lib/site-packages/fastapi/exceptions.py\", line 5, in <module>\r\n from requests import Request\r\nModuleNotFoundError: No module named 'requests'\r\n```\r\n\r\n**Expected behavior**\r\nThe app should start without import errors.\r\n\r\n**Environment:**\r\n - OS: Linux, Windows, and macOS\r\n - FastAPI Version: 0.38.0\r\n\r\n**Additional context**\r\nIt's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`\nFastAPI exceptions module mistakenly references the 'requests' package\n**Describe the bug**\r\nStarting up a FastAPI 0.38.0 app displays the following error:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\n File \".../lib/site-packages/fastapi/__init__.py\", line 7, in <module>\r\n from .applications import FastAPI\r\n File \".../lib/site-packages/fastapi/applications.py\", line 3, in <module>\r\n from fastapi import routing\r\n File \".../lib/site-packages/fastapi/routing.py\", line 7, in <module>\r\n from fastapi.dependencies.models import Dependant\r\n File \".../lib/site-packages/fastapi/dependencies/models.py\", line 3, in <module>\r\n from fastapi.security.base import SecurityBase\r\n File \".../lib/site-packages/fastapi/security/__init__.py\", line 2, in <module>\r\n from .http import (\r\n File \".../lib/site-packages/fastapi/security/http.py\", line 5, in <module>\r\n from fastapi.exceptions import HTTPException\r\n File \".../lib/site-packages/fastapi/exceptions.py\", line 5, in <module>\r\n from requests import Request\r\nModuleNotFoundError: No module named 'requests'\r\n```\r\n\r\n**Expected behavior**\r\nThe app should start without import errors.\r\n\r\n**Environment:**\r\n - OS: Linux, Windows, and macOS\r\n - FastAPI Version: 0.38.0\r\n\r\n**Additional context**\r\nIt's likely the `from requests import Request` should be replaced with `from starlette.requests import Request` in line 5 of `fastapi/exceptions.py`\n", "before_files": [{"content": "from typing import Any, Sequence\n\nfrom pydantic import ValidationError\nfrom pydantic.error_wrappers import ErrorList\nfrom requests import Request\nfrom starlette.exceptions import HTTPException as StarletteHTTPException\nfrom starlette.websockets import WebSocket\n\n\nclass HTTPException(StarletteHTTPException):\n def __init__(\n self, status_code: int, detail: Any = None, headers: dict = None\n ) -> None:\n super().__init__(status_code=status_code, detail=detail)\n self.headers = headers\n\n\nclass RequestValidationError(ValidationError):\n def __init__(self, errors: Sequence[ErrorList]) -> None:\n super().__init__(errors, Request)\n\n\nclass WebSocketRequestValidationError(ValidationError):\n def __init__(self, errors: Sequence[ErrorList]) -> None:\n super().__init__(errors, WebSocket)\n", "path": "fastapi/exceptions.py"}]}
| 1,440 | 95 |
gh_patches_debug_9408
|
rasdani/github-patches
|
git_diff
|
pytorch__pytorch-5108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
torch.nn.DataParallel supporting unequal sizes
As documented [here](http://pytorch.org/docs/master/_modules/torch/nn/parallel/data_parallel.html):
```
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
```
To use `torch.nn.DataParallel`, people should carefully set the batch size according to the number of gpus they plan to use, otherwise it will pop up errors.
This issue becomes more subtle when using `torch.utils.data.DataLoader` with `drop_last=False` by default. As the total number of training/validation samples varies with the dataset, the size of the last batch of data loaded by `torch.utils.data.DataLoader` is easy to become indivisible by the number of GPUs (e.g., 2,3,4,8,...).
A feature request would be:
supporting `torch.nn.DataParallel` with batch size indivisible by the number of GPUs used.
</issue>
<code>
[start of torch/nn/parallel/data_parallel.py]
1 import torch
2 from ..modules import Module
3 from .scatter_gather import scatter_kwargs, gather
4 from .replicate import replicate
5 from .parallel_apply import parallel_apply
6
7
8 class DataParallel(Module):
9 r"""Implements data parallelism at the module level.
10
11 This container parallelizes the application of the given module by
12 splitting the input across the specified devices by chunking in the batch
13 dimension. In the forward pass, the module is replicated on each device,
14 and each replica handles a portion of the input. During the backwards
15 pass, gradients from each replica are summed into the original module.
16
17 The batch size should be larger than the number of GPUs used. It should
18 also be an integer multiple of the number of GPUs so that each chunk is the
19 same size (so that each GPU processes the same number of samples).
20
21 See also: :ref:`cuda-nn-dataparallel-instead`
22
23 Arbitrary positional and keyword inputs are allowed to be passed into
24 DataParallel EXCEPT Tensors. All variables will be scattered on dim
25 specified (default 0). Primitive types will be broadcasted, but all
26 other types will be a shallow copy and can be corrupted if written to in
27 the model's forward pass.
28
29 .. warning::
30 Forward and backwrad hooks defined on :attr:`module` and its submodules
31 won't be invoked anymore, unless the hooks are initialized in the
32 :meth:`forward` method.
33
34 Args:
35 module: module to be parallelized
36 device_ids: CUDA devices (default: all devices)
37 output_device: device location of output (default: device_ids[0])
38
39 Example::
40
41 >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
42 >>> output = net(input_var)
43 """
44
45 # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
46
47 def __init__(self, module, device_ids=None, output_device=None, dim=0):
48 super(DataParallel, self).__init__()
49
50 if not torch.cuda.is_available():
51 self.module = module
52 self.device_ids = []
53 return
54
55 if device_ids is None:
56 device_ids = list(range(torch.cuda.device_count()))
57 if output_device is None:
58 output_device = device_ids[0]
59 self.dim = dim
60 self.module = module
61 self.device_ids = device_ids
62 self.output_device = output_device
63 if len(self.device_ids) == 1:
64 self.module.cuda(device_ids[0])
65
66 def forward(self, *inputs, **kwargs):
67 if not self.device_ids:
68 return self.module(*inputs, **kwargs)
69 inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
70 if len(self.device_ids) == 1:
71 return self.module(*inputs[0], **kwargs[0])
72 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
73 outputs = self.parallel_apply(replicas, inputs, kwargs)
74 return self.gather(outputs, self.output_device)
75
76 def replicate(self, module, device_ids):
77 return replicate(module, device_ids)
78
79 def scatter(self, inputs, kwargs, device_ids):
80 return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
81
82 def parallel_apply(self, replicas, inputs, kwargs):
83 return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
84
85 def gather(self, outputs, output_device):
86 return gather(outputs, output_device, dim=self.dim)
87
88
89 def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
90 r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
91
92 This is the functional version of the DataParallel module.
93
94 Args:
95 module: the module to evaluate in parallel
96 inputs: inputs to the module
97 device_ids: GPU ids on which to replicate module
98 output_device: GPU location of the output Use -1 to indicate the CPU.
99 (default: device_ids[0])
100 Returns:
101 a Variable containing the result of module(input) located on
102 output_device
103 """
104 if not isinstance(inputs, tuple):
105 inputs = (inputs,)
106
107 if device_ids is None:
108 device_ids = list(range(torch.cuda.device_count()))
109
110 if output_device is None:
111 output_device = device_ids[0]
112
113 inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
114 if len(device_ids) == 1:
115 return module(*inputs[0], **module_kwargs[0])
116 used_device_ids = device_ids[:len(inputs)]
117 replicas = replicate(module, used_device_ids)
118 outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
119 return gather(outputs, output_device, dim)
120
[end of torch/nn/parallel/data_parallel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py
--- a/torch/nn/parallel/data_parallel.py
+++ b/torch/nn/parallel/data_parallel.py
@@ -14,9 +14,7 @@
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
- The batch size should be larger than the number of GPUs used. It should
- also be an integer multiple of the number of GPUs so that each chunk is the
- same size (so that each GPU processes the same number of samples).
+ The batch size should be larger than the number of GPUs used.
See also: :ref:`cuda-nn-dataparallel-instead`
|
{"golden_diff": "diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py\n--- a/torch/nn/parallel/data_parallel.py\n+++ b/torch/nn/parallel/data_parallel.py\n@@ -14,9 +14,7 @@\n and each replica handles a portion of the input. During the backwards\n pass, gradients from each replica are summed into the original module.\n \n- The batch size should be larger than the number of GPUs used. It should\n- also be an integer multiple of the number of GPUs so that each chunk is the\n- same size (so that each GPU processes the same number of samples).\n+ The batch size should be larger than the number of GPUs used.\n \n See also: :ref:`cuda-nn-dataparallel-instead`\n", "issue": "torch.nn.DataParallel supporting unequal sizes\nAs documented [here](http://pytorch.org/docs/master/_modules/torch/nn/parallel/data_parallel.html):\r\n```\r\nThe batch size should be larger than the number of GPUs used. It should\r\n also be an integer multiple of the number of GPUs so that each chunk is the\r\n same size (so that each GPU processes the same number of samples).\r\n```\r\nTo use `torch.nn.DataParallel`, people should carefully set the batch size according to the number of gpus they plan to use, otherwise it will pop up errors. \r\n\r\nThis issue becomes more subtle when using `torch.utils.data.DataLoader` with `drop_last=False` by default. As the total number of training/validation samples varies with the dataset, the size of the last batch of data loaded by `torch.utils.data.DataLoader` is easy to become indivisible by the number of GPUs (e.g., 2,3,4,8,...).\r\n\r\nA feature request would be:\r\nsupporting `torch.nn.DataParallel` with batch size indivisible by the number of GPUs used.\n", "before_files": [{"content": "import torch\nfrom ..modules import Module\nfrom .scatter_gather import scatter_kwargs, gather\nfrom .replicate import replicate\nfrom .parallel_apply import parallel_apply\n\n\nclass DataParallel(Module):\n r\"\"\"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards\n pass, gradients from each replica are summed into the original module.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is the\n same size (so that each GPU processes the same number of samples).\n\n See also: :ref:`cuda-nn-dataparallel-instead`\n\n Arbitrary positional and keyword inputs are allowed to be passed into\n DataParallel EXCEPT Tensors. All variables will be scattered on dim\n specified (default 0). Primitive types will be broadcasted, but all\n other types will be a shallow copy and can be corrupted if written to in\n the model's forward pass.\n\n .. warning::\n Forward and backwrad hooks defined on :attr:`module` and its submodules\n won't be invoked anymore, unless the hooks are initialized in the\n :meth:`forward` method.\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n output_device: device location of output (default: device_ids[0])\n\n Example::\n\n >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])\n >>> output = net(input_var)\n \"\"\"\n\n # TODO: update notes/cuda.rst when this class handles 8+ GPUs well\n\n def __init__(self, module, device_ids=None, output_device=None, dim=0):\n super(DataParallel, self).__init__()\n\n if not torch.cuda.is_available():\n self.module = module\n self.device_ids = []\n return\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n if output_device is None:\n output_device = device_ids[0]\n self.dim = dim\n self.module = module\n self.device_ids = device_ids\n self.output_device = output_device\n if len(self.device_ids) == 1:\n self.module.cuda(device_ids[0])\n\n def forward(self, *inputs, **kwargs):\n if not self.device_ids:\n return self.module(*inputs, **kwargs)\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(*inputs[0], **kwargs[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = self.parallel_apply(replicas, inputs, kwargs)\n return self.gather(outputs, self.output_device)\n\n def replicate(self, module, device_ids):\n return replicate(module, device_ids)\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n\ndef data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):\n r\"\"\"Evaluates module(input) in parallel across the GPUs given in device_ids.\n\n This is the functional version of the DataParallel module.\n\n Args:\n module: the module to evaluate in parallel\n inputs: inputs to the module\n device_ids: GPU ids on which to replicate module\n output_device: GPU location of the output Use -1 to indicate the CPU.\n (default: device_ids[0])\n Returns:\n a Variable containing the result of module(input) located on\n output_device\n \"\"\"\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n\n if output_device is None:\n output_device = device_ids[0]\n\n inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)\n if len(device_ids) == 1:\n return module(*inputs[0], **module_kwargs[0])\n used_device_ids = device_ids[:len(inputs)]\n replicas = replicate(module, used_device_ids)\n outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)\n return gather(outputs, output_device, dim)\n", "path": "torch/nn/parallel/data_parallel.py"}]}
| 2,062 | 176 |
gh_patches_debug_965
|
rasdani/github-patches
|
git_diff
|
tiangolo__fastapi-9468
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FastAPI tests in pydantic failing due to flask deprecation
### Privileged issue
- [X] I'm @tiangolo or he asked me directly to create an issue here.
### Issue Content
hope you don't mind me creating an issue, pydantic's 1.10.X tests are failing due to a new issue with running our fastapi tests, see
https://github.com/pydantic/pydantic/actions/runs/4832692304/jobs/8611783607?pr=5628
output from pydantic's tests:
```
==================================== ERRORS ====================================
______ ERROR collecting tests/test_tutorial/test_wsgi/test_tutorial001.py ______
tests/test_tutorial/test_wsgi/test_tutorial001.py:3: in <module>
from docs_src.wsgi.tutorial001 import app
docs_src/wsgi/tutorial001.py:3: in <module>
from flask import Flask, escape, request
<frozen importlib._bootstrap>:1075: in _handle_fromlist
???
/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/flask/__init__.py:71: in __getattr__
warnings.warn(
E DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' instead.
=========================== short test summary info ============================
ERROR tests/test_tutorial/test_wsgi/test_tutorial001.py - DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape'
```
related to https://github.com/pydantic/pydantic/pull/5628
</issue>
<code>
[start of docs_src/wsgi/tutorial001.py]
1 from fastapi import FastAPI
2 from fastapi.middleware.wsgi import WSGIMiddleware
3 from flask import Flask, escape, request
4
5 flask_app = Flask(__name__)
6
7
8 @flask_app.route("/")
9 def flask_main():
10 name = request.args.get("name", "World")
11 return f"Hello, {escape(name)} from Flask!"
12
13
14 app = FastAPI()
15
16
17 @app.get("/v2")
18 def read_main():
19 return {"message": "Hello World"}
20
21
22 app.mount("/v1", WSGIMiddleware(flask_app))
23
[end of docs_src/wsgi/tutorial001.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs_src/wsgi/tutorial001.py b/docs_src/wsgi/tutorial001.py
--- a/docs_src/wsgi/tutorial001.py
+++ b/docs_src/wsgi/tutorial001.py
@@ -1,6 +1,7 @@
from fastapi import FastAPI
from fastapi.middleware.wsgi import WSGIMiddleware
-from flask import Flask, escape, request
+from flask import Flask, request
+from markupsafe import escape
flask_app = Flask(__name__)
|
{"golden_diff": "diff --git a/docs_src/wsgi/tutorial001.py b/docs_src/wsgi/tutorial001.py\n--- a/docs_src/wsgi/tutorial001.py\n+++ b/docs_src/wsgi/tutorial001.py\n@@ -1,6 +1,7 @@\n from fastapi import FastAPI\n from fastapi.middleware.wsgi import WSGIMiddleware\n-from flask import Flask, escape, request\n+from flask import Flask, request\n+from markupsafe import escape\n \n flask_app = Flask(__name__)\n", "issue": "FastAPI tests in pydantic failing due to flask deprecation\n### Privileged issue\n\n- [X] I'm @tiangolo or he asked me directly to create an issue here.\n\n### Issue Content\n\nhope you don't mind me creating an issue, pydantic's 1.10.X tests are failing due to a new issue with running our fastapi tests, see\r\n\r\nhttps://github.com/pydantic/pydantic/actions/runs/4832692304/jobs/8611783607?pr=5628\r\n\r\noutput from pydantic's tests:\r\n\r\n```\r\n==================================== ERRORS ====================================\r\n______ ERROR collecting tests/test_tutorial/test_wsgi/test_tutorial001.py ______\r\ntests/test_tutorial/test_wsgi/test_tutorial001.py:3: in <module>\r\n from docs_src.wsgi.tutorial001 import app\r\ndocs_src/wsgi/tutorial001.py:3: in <module>\r\n from flask import Flask, escape, request\r\n<frozen importlib._bootstrap>:1075: in _handle_fromlist\r\n ???\r\n/opt/hostedtoolcache/Python/3.10.11/x64/lib/python3.10/site-packages/flask/__init__.py:71: in __getattr__\r\n warnings.warn(\r\nE DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' instead.\r\n=========================== short test summary info ============================\r\nERROR tests/test_tutorial/test_wsgi/test_tutorial001.py - DeprecationWarning: 'flask.escape' is deprecated and will be removed in Flask 2.4. Import 'markupsafe.escape' \r\n```\r\n\r\nrelated to https://github.com/pydantic/pydantic/pull/5628\n", "before_files": [{"content": "from fastapi import FastAPI\nfrom fastapi.middleware.wsgi import WSGIMiddleware\nfrom flask import Flask, escape, request\n\nflask_app = Flask(__name__)\n\n\n@flask_app.route(\"/\")\ndef flask_main():\n name = request.args.get(\"name\", \"World\")\n return f\"Hello, {escape(name)} from Flask!\"\n\n\napp = FastAPI()\n\n\[email protected](\"/v2\")\ndef read_main():\n return {\"message\": \"Hello World\"}\n\n\napp.mount(\"/v1\", WSGIMiddleware(flask_app))\n", "path": "docs_src/wsgi/tutorial001.py"}]}
| 1,099 | 109 |
gh_patches_debug_4721
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-3240
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: src/system/os.py does not correctly return architecture for bsd platform
### Describe the bug
Run `uname -m` will return follow in the freebsd:
```
amd64
```
The code here does not support `amd64` as input:
https://github.com/opensearch-project/opensearch-build/blob/main/src/system/os.py#L12-L19
```
def current_architecture() -> str:
architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
if architecture == "x86_64":
return "x64"
elif architecture == "aarch64" or architecture == "arm64":
return "arm64"
else:
raise ValueError(f"Unsupported architecture: {architecture}")
```
Thanks.
### To reproduce
Run the build process on a freebsd server and see output:
```
$ ./build.sh manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch
Installing dependencies in . ...
Installing dependencies from Pipfile.lock (b36c9c)...
To activate this project's virtualenv, run pipenv shell.
Alternatively, run a command inside the virtualenv with pipenv run.
Running ./src/run_build.py manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch ...
2023-02-23 23:15:47 INFO Building in /tmp/tmpllimwxjs
2023-02-23 23:15:47 INFO Removing /tmp/tmpllimwxjs
Traceback (most recent call last):
File "./src/run_build.py", line 81, in <module>
sys.exit(main())
File "./src/run_build.py", line 55, in main
architecture=args.architecture or manifest.build.architecture,
File "/usr/share/opensearch/opensearch-build/src/build_workflow/build_target.py", line 45, in __init__
self.architecture = architecture or current_architecture()
File "/usr/share/opensearch/opensearch-build/src/system/os.py", line 20, in current_architecture
raise ValueError(f"Unsupported architecture: {architecture}")
ValueError: Unsupported architecture: amd64
```
### Expected behavior
The bsd x64 hosts can run the code without specifying --architecture x64.
### Screenshots
If applicable, add screenshots to help explain your problem.
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
</issue>
<code>
[start of src/system/os.py]
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8 import os
9 import subprocess
10
11
12 def current_architecture() -> str:
13 architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
14 if architecture == "x86_64":
15 return "x64"
16 elif architecture == "aarch64" or architecture == "arm64":
17 return "arm64"
18 else:
19 raise ValueError(f"Unsupported architecture: {architecture}")
20
21
22 def current_platform() -> str:
23 if os.name == "nt":
24 return "windows"
25 else:
26 return subprocess.check_output(["uname", "-s"]).decode().strip().lower()
27
28
29 def deb_architecture(architecture: str) -> str:
30 # This would convert arch from "current_architecture" to deb specific architecture alternatives
31
32 deb_architecture_map = {
33 "x64": "amd64",
34 "arm64": "arm64",
35 }
36
37 return deb_architecture_map[architecture]
38
39
40 def rpm_architecture(architecture: str) -> str:
41 # This would convert arch from "current_architecture" to rpm specific architecture alternatives
42
43 rpm_architecture_map = {
44 "x64": "x86_64",
45 "arm64": "aarch64",
46 }
47
48 return rpm_architecture_map[architecture]
49
[end of src/system/os.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/system/os.py b/src/system/os.py
--- a/src/system/os.py
+++ b/src/system/os.py
@@ -11,7 +11,7 @@
def current_architecture() -> str:
architecture = subprocess.check_output(["uname", "-m"]).decode().strip()
- if architecture == "x86_64":
+ if architecture == "x86_64" or architecture == "amd64":
return "x64"
elif architecture == "aarch64" or architecture == "arm64":
return "arm64"
|
{"golden_diff": "diff --git a/src/system/os.py b/src/system/os.py\n--- a/src/system/os.py\n+++ b/src/system/os.py\n@@ -11,7 +11,7 @@\n \n def current_architecture() -> str:\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\n- if architecture == \"x86_64\":\n+ if architecture == \"x86_64\" or architecture == \"amd64\":\n return \"x64\"\n elif architecture == \"aarch64\" or architecture == \"arm64\":\n return \"arm64\"\n", "issue": "[Bug]: src/system/os.py does not correctly return architecture for bsd platform\n### Describe the bug\r\n\r\n\r\nRun `uname -m` will return follow in the freebsd:\r\n```\r\namd64\r\n```\r\n\r\nThe code here does not support `amd64` as input:\r\nhttps://github.com/opensearch-project/opensearch-build/blob/main/src/system/os.py#L12-L19\r\n```\r\ndef current_architecture() -> str:\r\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\r\n if architecture == \"x86_64\":\r\n return \"x64\"\r\n elif architecture == \"aarch64\" or architecture == \"arm64\":\r\n return \"arm64\"\r\n else:\r\n raise ValueError(f\"Unsupported architecture: {architecture}\")\r\n```\r\n\r\n\r\n\r\nThanks.\r\n\r\n\r\n### To reproduce\r\n\r\nRun the build process on a freebsd server and see output:\r\n```\r\n$ ./build.sh manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch\r\nInstalling dependencies in . ...\r\nInstalling dependencies from Pipfile.lock (b36c9c)...\r\nTo activate this project's virtualenv, run pipenv shell.\r\nAlternatively, run a command inside the virtualenv with pipenv run.\r\nRunning ./src/run_build.py manifests/2.4.0/opensearch-2.4.0.yml --component OpenSearch ...\r\n2023-02-23 23:15:47 INFO Building in /tmp/tmpllimwxjs\r\n2023-02-23 23:15:47 INFO Removing /tmp/tmpllimwxjs\r\nTraceback (most recent call last):\r\n File \"./src/run_build.py\", line 81, in <module>\r\n sys.exit(main())\r\n File \"./src/run_build.py\", line 55, in main\r\n architecture=args.architecture or manifest.build.architecture,\r\n File \"/usr/share/opensearch/opensearch-build/src/build_workflow/build_target.py\", line 45, in __init__\r\n self.architecture = architecture or current_architecture()\r\n File \"/usr/share/opensearch/opensearch-build/src/system/os.py\", line 20, in current_architecture\r\n raise ValueError(f\"Unsupported architecture: {architecture}\")\r\nValueError: Unsupported architecture: amd64\r\n\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe bsd x64 hosts can run the code without specifying --architecture x64.\r\n\r\n### Screenshots\r\n\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n### Host / Environment\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\r\n\r\n### Relevant log output\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport os\nimport subprocess\n\n\ndef current_architecture() -> str:\n architecture = subprocess.check_output([\"uname\", \"-m\"]).decode().strip()\n if architecture == \"x86_64\":\n return \"x64\"\n elif architecture == \"aarch64\" or architecture == \"arm64\":\n return \"arm64\"\n else:\n raise ValueError(f\"Unsupported architecture: {architecture}\")\n\n\ndef current_platform() -> str:\n if os.name == \"nt\":\n return \"windows\"\n else:\n return subprocess.check_output([\"uname\", \"-s\"]).decode().strip().lower()\n\n\ndef deb_architecture(architecture: str) -> str:\n # This would convert arch from \"current_architecture\" to deb specific architecture alternatives\n\n deb_architecture_map = {\n \"x64\": \"amd64\",\n \"arm64\": \"arm64\",\n }\n\n return deb_architecture_map[architecture]\n\n\ndef rpm_architecture(architecture: str) -> str:\n # This would convert arch from \"current_architecture\" to rpm specific architecture alternatives\n\n rpm_architecture_map = {\n \"x64\": \"x86_64\",\n \"arm64\": \"aarch64\",\n }\n\n return rpm_architecture_map[architecture]\n", "path": "src/system/os.py"}]}
| 1,518 | 131 |
gh_patches_debug_20228
|
rasdani/github-patches
|
git_diff
|
googleapis__google-api-python-client-2152
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`<1dev` is not a valid matcher for Python package versions in `setup.py`!
On the current master branch, you have this matcher for an `install_requires` package version in [setup.py](https://github.com/googleapis/google-api-python-client/blob/110667251e8b2c8852945bfe238f399742148cda/setup.py#L36):
```python
install_requires = [
"httplib2>=0.15.0,<1dev",
```
The `<1dev` part is ~not~ a valid version specifier (edit: it is valid, just doesn't parse correctly with distlib) in the sense of [PEP 440](https://peps.python.org/pep-0440/) and causes problems with a number of packing tools, as seen here:
```shell
$ python -c 'from distlib.version import NormalizedMatcher; NormalizedMatcher("httplib2>=0.15.0,<1dev")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 125, in __init__
vn, prefix = self.version_class(s), False
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 33, in __init__
self._parts = parts = self.parse(s)
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 267, in parse
result = _normalized_key(s)
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 188, in _pep_440_key
raise UnsupportedVersionError('Not a valid version: %s' % s)
distlib.version.UnsupportedVersionError: Not a valid version: 1dev
```
This is just speculation, but it looks like it was either faultily auto-generated by some tool, is an incomplete copy & paste or you meant `<1.dev0`. However, development versions are not typically considered by tooling and it's considered bad practice to but an upper bound on python dependencies for no good reason. This should be corrected and ideally the artifact files of the effected versions fixed and re-uploaded on pypi.org.
`<1dev` is not a valid matcher for Python package versions in `setup.py`!
On the current master branch, you have this matcher for an `install_requires` package version in [setup.py](https://github.com/googleapis/google-api-python-client/blob/110667251e8b2c8852945bfe238f399742148cda/setup.py#L36):
```python
install_requires = [
"httplib2>=0.15.0,<1dev",
```
The `<1dev` part is ~not~ a valid version specifier (edit: it is valid, just doesn't parse correctly with distlib) in the sense of [PEP 440](https://peps.python.org/pep-0440/) and causes problems with a number of packing tools, as seen here:
```shell
$ python -c 'from distlib.version import NormalizedMatcher; NormalizedMatcher("httplib2>=0.15.0,<1dev")'
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 125, in __init__
vn, prefix = self.version_class(s), False
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 33, in __init__
self._parts = parts = self.parse(s)
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 267, in parse
result = _normalized_key(s)
File "/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py", line 188, in _pep_440_key
raise UnsupportedVersionError('Not a valid version: %s' % s)
distlib.version.UnsupportedVersionError: Not a valid version: 1dev
```
This is just speculation, but it looks like it was either faultily auto-generated by some tool, is an incomplete copy & paste or you meant `<1.dev0`. However, development versions are not typically considered by tooling and it's considered bad practice to but an upper bound on python dependencies for no good reason. This should be corrected and ideally the artifact files of the effected versions fixed and re-uploaded on pypi.org.
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (3, 7):
25 print("google-api-python-client requires python3 version >= 3.7.", file=sys.stderr)
26 sys.exit(1)
27
28 import io
29 import os
30
31 from setuptools import setup
32
33 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
34
35 install_requires = [
36 "httplib2>=0.15.0,<1dev",
37 # NOTE: Maintainers, please do not require google-auth>=2.x.x
38 # Until this issue is closed
39 # https://github.com/googleapis/google-cloud-python/issues/10566
40 "google-auth>=1.19.0,<3.0.0dev",
41 "google-auth-httplib2>=0.1.0",
42 # NOTE: Maintainers, please do not require google-api-core>=2.x.x
43 # Until this issue is closed
44 # https://github.com/googleapis/google-cloud-python/issues/10566
45 "google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
46 "uritemplate>=3.0.1,<5",
47 ]
48
49 package_root = os.path.abspath(os.path.dirname(__file__))
50
51 readme_filename = os.path.join(package_root, "README.md")
52 with io.open(readme_filename, encoding="utf-8") as readme_file:
53 readme = readme_file.read()
54
55 package_root = os.path.abspath(os.path.dirname(__file__))
56
57 version = {}
58 with open(os.path.join(package_root, "googleapiclient/version.py")) as fp:
59 exec(fp.read(), version)
60 version = version["__version__"]
61
62 setup(
63 name="google-api-python-client",
64 version=version,
65 description="Google API Client Library for Python",
66 long_description=readme,
67 long_description_content_type="text/markdown",
68 author="Google LLC",
69 author_email="[email protected]",
70 url="https://github.com/googleapis/google-api-python-client/",
71 install_requires=install_requires,
72 python_requires=">=3.7",
73 packages=packages,
74 package_data={"googleapiclient": ["discovery_cache/documents/*.json"]},
75 license="Apache 2.0",
76 keywords="google api client",
77 classifiers=[
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Programming Language :: Python :: 3.10",
83 "Programming Language :: Python :: 3.11",
84 "Development Status :: 5 - Production/Stable",
85 "Intended Audience :: Developers",
86 "License :: OSI Approved :: Apache Software License",
87 "Operating System :: OS Independent",
88 "Topic :: Internet :: WWW/HTTP",
89 ],
90 )
91
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,16 +33,16 @@
packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
install_requires = [
- "httplib2>=0.15.0,<1dev",
+ "httplib2>=0.15.0,<1.dev0",
# NOTE: Maintainers, please do not require google-auth>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
- "google-auth>=1.19.0,<3.0.0dev",
+ "google-auth>=1.19.0,<3.0.0.dev0",
"google-auth-httplib2>=0.1.0",
# NOTE: Maintainers, please do not require google-api-core>=2.x.x
# Until this issue is closed
# https://github.com/googleapis/google-cloud-python/issues/10566
- "google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
+ "google-api-core >= 1.31.5, <3.0.0.dev0,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0",
"uritemplate>=3.0.1,<5",
]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,16 +33,16 @@\n packages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n \n install_requires = [\n- \"httplib2>=0.15.0,<1dev\",\n+ \"httplib2>=0.15.0,<1.dev0\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n- \"google-auth>=1.19.0,<3.0.0dev\",\n+ \"google-auth>=1.19.0,<3.0.0.dev0\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n- \"google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n+ \"google-api-core >= 1.31.5, <3.0.0.dev0,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"uritemplate>=3.0.1,<5\",\n ]\n", "issue": "`<1dev` is not a valid matcher for Python package versions in `setup.py`!\nOn the current master branch, you have this matcher for an `install_requires` package version in [setup.py](https://github.com/googleapis/google-api-python-client/blob/110667251e8b2c8852945bfe238f399742148cda/setup.py#L36):\r\n\r\n```python\r\ninstall_requires = [\r\n \"httplib2>=0.15.0,<1dev\",\r\n```\r\n\r\nThe `<1dev` part is ~not~ a valid version specifier (edit: it is valid, just doesn't parse correctly with distlib) in the sense of [PEP 440](https://peps.python.org/pep-0440/) and causes problems with a number of packing tools, as seen here:\r\n\r\n```shell\r\n$ python -c 'from distlib.version import NormalizedMatcher; NormalizedMatcher(\"httplib2>=0.15.0,<1dev\")'\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 125, in __init__\r\n vn, prefix = self.version_class(s), False\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 33, in __init__\r\n self._parts = parts = self.parse(s)\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 267, in parse\r\n result = _normalized_key(s)\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 188, in _pep_440_key\r\n raise UnsupportedVersionError('Not a valid version: %s' % s)\r\ndistlib.version.UnsupportedVersionError: Not a valid version: 1dev\r\n```\r\n\r\nThis is just speculation, but it looks like it was either faultily auto-generated by some tool, is an incomplete copy & paste or you meant `<1.dev0`. However, development versions are not typically considered by tooling and it's considered bad practice to but an upper bound on python dependencies for no good reason. This should be corrected and ideally the artifact files of the effected versions fixed and re-uploaded on pypi.org.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n`<1dev` is not a valid matcher for Python package versions in `setup.py`!\nOn the current master branch, you have this matcher for an `install_requires` package version in [setup.py](https://github.com/googleapis/google-api-python-client/blob/110667251e8b2c8852945bfe238f399742148cda/setup.py#L36):\r\n\r\n```python\r\ninstall_requires = [\r\n \"httplib2>=0.15.0,<1dev\",\r\n```\r\n\r\nThe `<1dev` part is ~not~ a valid version specifier (edit: it is valid, just doesn't parse correctly with distlib) in the sense of [PEP 440](https://peps.python.org/pep-0440/) and causes problems with a number of packing tools, as seen here:\r\n\r\n```shell\r\n$ python -c 'from distlib.version import NormalizedMatcher; NormalizedMatcher(\"httplib2>=0.15.0,<1dev\")'\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 125, in __init__\r\n vn, prefix = self.version_class(s), False\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 33, in __init__\r\n self._parts = parts = self.parse(s)\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 267, in parse\r\n result = _normalized_key(s)\r\n File \"/home/jan/.local/share/virtualenvs/master_clean-V3vlFZeD/lib/python3.7/site-packages/distlib/version.py\", line 188, in _pep_440_key\r\n raise UnsupportedVersionError('Not a valid version: %s' % s)\r\ndistlib.version.UnsupportedVersionError: Not a valid version: 1dev\r\n```\r\n\r\nThis is just speculation, but it looks like it was either faultily auto-generated by some tool, is an incomplete copy & paste or you meant `<1.dev0`. However, development versions are not typically considered by tooling and it's considered bad practice to but an upper bound on python dependencies for no good reason. This should be corrected and ideally the artifact files of the effected versions fixed and re-uploaded on pypi.org.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (3, 7):\n print(\"google-api-python-client requires python3 version >= 3.7.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\n\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n # NOTE: Maintainers, please do not require google-auth>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-auth>=1.19.0,<3.0.0dev\",\n \"google-auth-httplib2>=0.1.0\",\n # NOTE: Maintainers, please do not require google-api-core>=2.x.x\n # Until this issue is closed\n # https://github.com/googleapis/google-cloud-python/issues/10566\n \"google-api-core >= 1.31.5, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.0\",\n \"uritemplate>=3.0.1,<5\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"googleapiclient/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=3.7\",\n packages=packages,\n package_data={\"googleapiclient\": [\"discovery_cache/documents/*.json\"]},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
| 2,686 | 349 |
gh_patches_debug_558
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-691
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 1.6.4
On the docket:
+ [x] Restore pex.pex_bootstrapper.is_compressed API #684
+ [ ] Release more flexible pex binaries. #654
+ [x] If an `--interpreter-constraint` is set, it should always be honored. #656
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '1.6.3'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '1.6.3'
+__version__ = '1.6.4'
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.6.3'\n+__version__ = '1.6.4'\n", "issue": "Release 1.6.4\nOn the docket:\r\n+ [x] Restore pex.pex_bootstrapper.is_compressed API #684\r\n+ [ ] Release more flexible pex binaries. #654\r\n + [x] If an `--interpreter-constraint` is set, it should always be honored. #656\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.6.3'\n", "path": "pex/version.py"}]}
| 657 | 94 |
gh_patches_debug_39153
|
rasdani/github-patches
|
git_diff
|
pallets__click-1059
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow sorting of completions based on custom types
Currently completion candidates returned by click are sorted in ZSH based on alphanumeric rules. This means that completions ['1', '2', '10'] would be displayed out of natural sort order as ['1', '10', '2']. Update the completion script to bring the sorting into Python where custom types can be sorted more appropriately. Note that the version of bash that ships with OS X is v3.2. The nosort option for `complete` was introduced in v4.4 This change should support both.
</issue>
<code>
[start of click/_bashcomplete.py]
1 import collections
2 import copy
3 import os
4 import re
5
6 from .utils import echo
7 from .parser import split_arg_string
8 from .core import MultiCommand, Option, Argument
9 from .types import Choice
10
11 WORDBREAK = '='
12
13 COMPLETION_SCRIPT_BASH = '''
14 %(complete_func)s() {
15 local IFS=$'\n'
16 COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
17 COMP_CWORD=$COMP_CWORD \\
18 %(autocomplete_var)s=complete $1 ) )
19 return 0
20 }
21
22 complete -F %(complete_func)s %(script_names)s
23 '''
24
25 COMPLETION_SCRIPT_ZSH = '''
26 %(complete_func)s() {
27 local -a completions
28 local -a completions_with_descriptions
29 local -a response
30 response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
31 COMP_CWORD=$((CURRENT-1)) \\
32 %(autocomplete_var)s=\"complete_zsh\" \\
33 %(script_names)s )}")
34
35 for key descr in ${(kv)response}; do
36 if [[ "$descr" == "_" ]]; then
37 completions+=("$key")
38 else
39 completions_with_descriptions+=("$key":"$descr")
40 fi
41 done
42
43 if [ -n "$completions_with_descriptions" ]; then
44 _describe '' completions_with_descriptions
45 fi
46 if [ -n "$completions" ]; then
47 compadd -M 'r:|=* l:|=* r:|=*' -a completions
48 fi
49 }
50
51 compdef %(complete_func)s %(script_names)s
52 '''
53
54 _invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')
55
56
57 def get_completion_script(prog_name, complete_var, shell):
58 cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))
59 script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH
60 return (script % {
61 'complete_func': '_%s_completion' % cf_name,
62 'script_names': prog_name,
63 'autocomplete_var': complete_var,
64 }).strip() + ';'
65
66
67 def resolve_ctx(cli, prog_name, args):
68 """
69 Parse into a hierarchy of contexts. Contexts are connected through the parent variable.
70 :param cli: command definition
71 :param prog_name: the program that is running
72 :param args: full list of args
73 :return: the final context/command parsed
74 """
75 ctx = cli.make_context(prog_name, args, resilient_parsing=True)
76 args = ctx.protected_args + ctx.args
77 while args:
78 if isinstance(ctx.command, MultiCommand):
79 if not ctx.command.chain:
80 cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
81 if cmd is None:
82 return ctx
83 ctx = cmd.make_context(cmd_name, args, parent=ctx,
84 resilient_parsing=True)
85 args = ctx.protected_args + ctx.args
86 else:
87 # Walk chained subcommand contexts saving the last one.
88 while args:
89 cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
90 if cmd is None:
91 return ctx
92 sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,
93 allow_extra_args=True,
94 allow_interspersed_args=False,
95 resilient_parsing=True)
96 args = sub_ctx.args
97 ctx = sub_ctx
98 args = sub_ctx.protected_args + sub_ctx.args
99 else:
100 break
101 return ctx
102
103
104 def start_of_option(param_str):
105 """
106 :param param_str: param_str to check
107 :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--")
108 """
109 return param_str and param_str[:1] == '-'
110
111
112 def is_incomplete_option(all_args, cmd_param):
113 """
114 :param all_args: the full original list of args supplied
115 :param cmd_param: the current command paramter
116 :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and
117 corresponds to this cmd_param. In other words whether this cmd_param option can still accept
118 values
119 """
120 if not isinstance(cmd_param, Option):
121 return False
122 if cmd_param.is_flag:
123 return False
124 last_option = None
125 for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
126 if index + 1 > cmd_param.nargs:
127 break
128 if start_of_option(arg_str):
129 last_option = arg_str
130
131 return True if last_option and last_option in cmd_param.opts else False
132
133
134 def is_incomplete_argument(current_params, cmd_param):
135 """
136 :param current_params: the current params and values for this argument as already entered
137 :param cmd_param: the current command parameter
138 :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In
139 other words whether or not the this cmd_param argument can still accept values
140 """
141 if not isinstance(cmd_param, Argument):
142 return False
143 current_param_values = current_params[cmd_param.name]
144 if current_param_values is None:
145 return True
146 if cmd_param.nargs == -1:
147 return True
148 if isinstance(current_param_values, collections.Iterable) \
149 and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
150 return True
151 return False
152
153
154 def get_user_autocompletions(ctx, args, incomplete, cmd_param):
155 """
156 :param ctx: context associated with the parsed command
157 :param args: full list of args
158 :param incomplete: the incomplete text to autocomplete
159 :param cmd_param: command definition
160 :return: all the possible user-specified completions for the param
161 """
162 results = []
163 if isinstance(cmd_param.type, Choice):
164 # Choices don't support descriptions.
165 results = [(c, None)
166 for c in cmd_param.type.choices if c.startswith(incomplete)]
167 elif cmd_param.autocompletion is not None:
168 dynamic_completions = cmd_param.autocompletion(ctx=ctx,
169 args=args,
170 incomplete=incomplete)
171 results = [c if isinstance(c, tuple) else (c, None)
172 for c in dynamic_completions]
173 return results
174
175
176 def add_subcommand_completions(ctx, incomplete, completions_out):
177 # Add subcommand completions.
178 if isinstance(ctx.command, MultiCommand):
179 completions_out.extend(
180 [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])
181
182 # Walk up the context list and add any other completion possibilities from chained commands
183 while ctx.parent is not None:
184 ctx = ctx.parent
185 if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
186 remaining_commands = sorted(
187 set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))
188 completions_out.extend(
189 [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])
190
191
192 def get_choices(cli, prog_name, args, incomplete):
193 """
194 :param cli: command definition
195 :param prog_name: the program that is running
196 :param args: full list of args
197 :param incomplete: the incomplete text to autocomplete
198 :return: all the possible completions for the incomplete
199 """
200 all_args = copy.deepcopy(args)
201
202 ctx = resolve_ctx(cli, prog_name, args)
203 if ctx is None:
204 return []
205
206 # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
207 # without the '='
208 if start_of_option(incomplete) and WORDBREAK in incomplete:
209 partition_incomplete = incomplete.partition(WORDBREAK)
210 all_args.append(partition_incomplete[0])
211 incomplete = partition_incomplete[2]
212 elif incomplete == WORDBREAK:
213 incomplete = ''
214
215 completions = []
216 if start_of_option(incomplete):
217 # completions for partial options
218 for param in ctx.command.params:
219 if isinstance(param, Option):
220 param_opts = [param_opt for param_opt in param.opts +
221 param.secondary_opts if param_opt not in all_args or param.multiple]
222 completions.extend(
223 [(o, param.help) for o in param_opts if o.startswith(incomplete)])
224 return completions
225 # completion for option values from user supplied values
226 for param in ctx.command.params:
227 if is_incomplete_option(all_args, param):
228 return get_user_autocompletions(ctx, all_args, incomplete, param)
229 # completion for argument values from user supplied values
230 for param in ctx.command.params:
231 if is_incomplete_argument(ctx.params, param):
232 return get_user_autocompletions(ctx, all_args, incomplete, param)
233
234 add_subcommand_completions(ctx, incomplete, completions)
235 return completions
236
237
238 def do_complete(cli, prog_name, include_descriptions):
239 cwords = split_arg_string(os.environ['COMP_WORDS'])
240 cword = int(os.environ['COMP_CWORD'])
241 args = cwords[1:cword]
242 try:
243 incomplete = cwords[cword]
244 except IndexError:
245 incomplete = ''
246
247 for item in get_choices(cli, prog_name, args, incomplete):
248 echo(item[0])
249 if include_descriptions:
250 # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.
251 echo(item[1] if item[1] else '_')
252
253 return True
254
255
256 def bashcomplete(cli, prog_name, complete_var, complete_instr):
257 if complete_instr.startswith('source'):
258 shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'
259 echo(get_completion_script(prog_name, complete_var, shell))
260 return True
261 elif complete_instr == 'complete' or complete_instr == 'complete_zsh':
262 return do_complete(cli, prog_name, complete_instr == 'complete_zsh')
263 return False
264
[end of click/_bashcomplete.py]
[start of examples/bashcompletion/bashcompletion.py]
1 import click
2 import os
3
4
5 @click.group()
6 def cli():
7 pass
8
9
10 def get_env_vars(ctx, args, incomplete):
11 for key in os.environ.keys():
12 if incomplete in key:
13 yield key
14
15
16 @cli.command(help='A command to print environment variables')
17 @click.argument("envvar", type=click.STRING, autocompletion=get_env_vars)
18 def cmd1(envvar):
19 click.echo('Environment variable: %s' % envvar)
20 click.echo('Value: %s' % os.environ[envvar])
21
22
23 @click.group(help='A group that holds a subcommand')
24 def group():
25 pass
26
27
28 def list_users(ctx, args, incomplete):
29 # Here you can generate completions dynamically
30 users = ['bob', 'alice']
31 for user in users:
32 if user.startswith(incomplete):
33 yield user
34
35
36 @group.command(help='Choose a user')
37 @click.argument("user", type=click.STRING, autocompletion=list_users)
38 def subcmd(user):
39 click.echo('Chosen user is %s' % user)
40
41 cli.add_command(group)
42
[end of examples/bashcompletion/bashcompletion.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py
--- a/click/_bashcomplete.py
+++ b/click/_bashcomplete.py
@@ -10,6 +10,7 @@
WORDBREAK = '='
+# Note, only BASH version 4.4 and later have the nosort option.
COMPLETION_SCRIPT_BASH = '''
%(complete_func)s() {
local IFS=$'\n'
@@ -19,7 +20,17 @@
return 0
}
-complete -F %(complete_func)s %(script_names)s
+%(complete_func)setup() {
+ local COMPLETION_OPTIONS=""
+ local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+ if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then
+ COMPLETION_OPTIONS="-o nosort"
+ fi
+
+ complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
+}
+
+%(complete_func)setup
'''
COMPLETION_SCRIPT_ZSH = '''
@@ -41,11 +52,13 @@
done
if [ -n "$completions_with_descriptions" ]; then
- _describe '' completions_with_descriptions
+ _describe -V unsorted completions_with_descriptions -U -Q
fi
+
if [ -n "$completions" ]; then
- compadd -M 'r:|=* l:|=* r:|=*' -a completions
+ compadd -U -V unsorted -Q -a completions
fi
+ compstate[insert]="automenu"
}
compdef %(complete_func)s %(script_names)s
@@ -232,7 +245,8 @@
return get_user_autocompletions(ctx, all_args, incomplete, param)
add_subcommand_completions(ctx, incomplete, completions)
- return completions
+ # Sort before returning so that proper ordering can be enforced in custom types.
+ return sorted(completions)
def do_complete(cli, prog_name, include_descriptions):
diff --git a/examples/bashcompletion/bashcompletion.py b/examples/bashcompletion/bashcompletion.py
--- a/examples/bashcompletion/bashcompletion.py
+++ b/examples/bashcompletion/bashcompletion.py
@@ -8,6 +8,7 @@
def get_env_vars(ctx, args, incomplete):
+ # Completions returned as strings do not have a description displayed.
for key in os.environ.keys():
if incomplete in key:
yield key
@@ -26,11 +27,13 @@
def list_users(ctx, args, incomplete):
- # Here you can generate completions dynamically
- users = ['bob', 'alice']
- for user in users:
- if user.startswith(incomplete):
- yield user
+ # You can generate completions with descriptions by returning
+ # tuples in the form (completion, description).
+ users = [('bob', 'butcher'),
+ ('alice', 'baker'),
+ ('jerry', 'candlestick maker')]
+ # Ths will allow completion matches based on matches within the description string too!
+ return [user for user in users if incomplete in user[0] or incomplete in user[1]]
@group.command(help='Choose a user')
@@ -38,4 +41,5 @@
def subcmd(user):
click.echo('Chosen user is %s' % user)
+
cli.add_command(group)
|
{"golden_diff": "diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py\n--- a/click/_bashcomplete.py\n+++ b/click/_bashcomplete.py\n@@ -10,6 +10,7 @@\n \n WORDBREAK = '='\n \n+# Note, only BASH version 4.4 and later have the nosort option.\n COMPLETION_SCRIPT_BASH = '''\n %(complete_func)s() {\n local IFS=$'\\n'\n@@ -19,7 +20,17 @@\n return 0\n }\n \n-complete -F %(complete_func)s %(script_names)s\n+%(complete_func)setup() {\n+ local COMPLETION_OPTIONS=\"\"\n+ local BASH_VERSION_ARR=(${BASH_VERSION//./ })\n+ if [ ${BASH_VERSION_ARR[0]} -ge 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ];then\n+ COMPLETION_OPTIONS=\"-o nosort\"\n+ fi\n+\n+ complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s\n+}\n+\n+%(complete_func)setup\n '''\n \n COMPLETION_SCRIPT_ZSH = '''\n@@ -41,11 +52,13 @@\n done\n \n if [ -n \"$completions_with_descriptions\" ]; then\n- _describe '' completions_with_descriptions\n+ _describe -V unsorted completions_with_descriptions -U -Q\n fi\n+\n if [ -n \"$completions\" ]; then\n- compadd -M 'r:|=* l:|=* r:|=*' -a completions\n+ compadd -U -V unsorted -Q -a completions\n fi\n+ compstate[insert]=\"automenu\"\n }\n \n compdef %(complete_func)s %(script_names)s\n@@ -232,7 +245,8 @@\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n \n add_subcommand_completions(ctx, incomplete, completions)\n- return completions\n+ # Sort before returning so that proper ordering can be enforced in custom types.\n+ return sorted(completions)\n \n \n def do_complete(cli, prog_name, include_descriptions):\ndiff --git a/examples/bashcompletion/bashcompletion.py b/examples/bashcompletion/bashcompletion.py\n--- a/examples/bashcompletion/bashcompletion.py\n+++ b/examples/bashcompletion/bashcompletion.py\n@@ -8,6 +8,7 @@\n \n \n def get_env_vars(ctx, args, incomplete):\n+ # Completions returned as strings do not have a description displayed.\n for key in os.environ.keys():\n if incomplete in key:\n yield key\n@@ -26,11 +27,13 @@\n \n \n def list_users(ctx, args, incomplete):\n- # Here you can generate completions dynamically\n- users = ['bob', 'alice']\n- for user in users:\n- if user.startswith(incomplete):\n- yield user\n+ # You can generate completions with descriptions by returning\n+ # tuples in the form (completion, description).\n+ users = [('bob', 'butcher'),\n+ ('alice', 'baker'),\n+ ('jerry', 'candlestick maker')]\n+ # Ths will allow completion matches based on matches within the description string too!\n+ return [user for user in users if incomplete in user[0] or incomplete in user[1]]\n \n \n @group.command(help='Choose a user')\n@@ -38,4 +41,5 @@\n def subcmd(user):\n click.echo('Chosen user is %s' % user)\n \n+\n cli.add_command(group)\n", "issue": "Allow sorting of completions based on custom types\nCurrently completion candidates returned by click are sorted in ZSH based on alphanumeric rules. This means that completions ['1', '2', '10'] would be displayed out of natural sort order as ['1', '10', '2']. Update the completion script to bring the sorting into Python where custom types can be sorted more appropriately. Note that the version of bash that ships with OS X is v3.2. The nosort option for `complete` was introduced in v4.4 This change should support both.\n", "before_files": [{"content": "import collections\nimport copy\nimport os\nimport re\n\nfrom .utils import echo\nfrom .parser import split_arg_string\nfrom .core import MultiCommand, Option, Argument\nfrom .types import Choice\n\nWORDBREAK = '='\n\nCOMPLETION_SCRIPT_BASH = '''\n%(complete_func)s() {\n local IFS=$'\\n'\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n return 0\n}\n\ncomplete -F %(complete_func)s %(script_names)s\n'''\n\nCOMPLETION_SCRIPT_ZSH = '''\n%(complete_func)s() {\n local -a completions\n local -a completions_with_descriptions\n local -a response\n response=(\"${(@f)$( env COMP_WORDS=\\\"${words[*]}\\\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n %(autocomplete_var)s=\\\"complete_zsh\\\" \\\\\n %(script_names)s )}\")\n\n for key descr in ${(kv)response}; do\n if [[ \"$descr\" == \"_\" ]]; then\n completions+=(\"$key\")\n else\n completions_with_descriptions+=(\"$key\":\"$descr\")\n fi\n done\n\n if [ -n \"$completions_with_descriptions\" ]; then\n _describe '' completions_with_descriptions\n fi\n if [ -n \"$completions\" ]; then\n compadd -M 'r:|=* l:|=* r:|=*' -a completions\n fi\n}\n\ncompdef %(complete_func)s %(script_names)s\n'''\n\n_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')\n\n\ndef get_completion_script(prog_name, complete_var, shell):\n cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))\n script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH\n return (script % {\n 'complete_func': '_%s_completion' % cf_name,\n 'script_names': prog_name,\n 'autocomplete_var': complete_var,\n }).strip() + ';'\n\n\ndef resolve_ctx(cli, prog_name, args):\n \"\"\"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n \"\"\"\n ctx = cli.make_context(prog_name, args, resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n while args:\n if isinstance(ctx.command, MultiCommand):\n if not ctx.command.chain:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n ctx = cmd.make_context(cmd_name, args, parent=ctx,\n resilient_parsing=True)\n args = ctx.protected_args + ctx.args\n else:\n # Walk chained subcommand contexts saving the last one.\n while args:\n cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)\n if cmd is None:\n return ctx\n sub_ctx = cmd.make_context(cmd_name, args, parent=ctx,\n allow_extra_args=True,\n allow_interspersed_args=False,\n resilient_parsing=True)\n args = sub_ctx.args\n ctx = sub_ctx\n args = sub_ctx.protected_args + sub_ctx.args\n else:\n break\n return ctx\n\n\ndef start_of_option(param_str):\n \"\"\"\n :param param_str: param_str to check\n :return: whether or not this is the start of an option declaration (i.e. starts \"-\" or \"--\")\n \"\"\"\n return param_str and param_str[:1] == '-'\n\n\ndef is_incomplete_option(all_args, cmd_param):\n \"\"\"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n \"\"\"\n if not isinstance(cmd_param, Option):\n return False\n if cmd_param.is_flag:\n return False\n last_option = None\n for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):\n if index + 1 > cmd_param.nargs:\n break\n if start_of_option(arg_str):\n last_option = arg_str\n\n return True if last_option and last_option in cmd_param.opts else False\n\n\ndef is_incomplete_argument(current_params, cmd_param):\n \"\"\"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n \"\"\"\n if not isinstance(cmd_param, Argument):\n return False\n current_param_values = current_params[cmd_param.name]\n if current_param_values is None:\n return True\n if cmd_param.nargs == -1:\n return True\n if isinstance(current_param_values, collections.Iterable) \\\n and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:\n return True\n return False\n\n\ndef get_user_autocompletions(ctx, args, incomplete, cmd_param):\n \"\"\"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n \"\"\"\n results = []\n if isinstance(cmd_param.type, Choice):\n # Choices don't support descriptions.\n results = [(c, None)\n for c in cmd_param.type.choices if c.startswith(incomplete)]\n elif cmd_param.autocompletion is not None:\n dynamic_completions = cmd_param.autocompletion(ctx=ctx,\n args=args,\n incomplete=incomplete)\n results = [c if isinstance(c, tuple) else (c, None)\n for c in dynamic_completions]\n return results\n\n\ndef add_subcommand_completions(ctx, incomplete, completions_out):\n # Add subcommand completions.\n if isinstance(ctx.command, MultiCommand):\n completions_out.extend(\n [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in ctx.command.list_commands(ctx) if c.startswith(incomplete)])\n\n # Walk up the context list and add any other completion possibilities from chained commands\n while ctx.parent is not None:\n ctx = ctx.parent\n if isinstance(ctx.command, MultiCommand) and ctx.command.chain:\n remaining_commands = sorted(\n set(ctx.command.list_commands(ctx)) - set(ctx.protected_args))\n completions_out.extend(\n [(c, ctx.command.get_command(ctx, c).get_short_help_str()) for c in remaining_commands if c.startswith(incomplete)])\n\n\ndef get_choices(cli, prog_name, args, incomplete):\n \"\"\"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n \"\"\"\n all_args = copy.deepcopy(args)\n\n ctx = resolve_ctx(cli, prog_name, args)\n if ctx is None:\n return []\n\n # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse\n # without the '='\n if start_of_option(incomplete) and WORDBREAK in incomplete:\n partition_incomplete = incomplete.partition(WORDBREAK)\n all_args.append(partition_incomplete[0])\n incomplete = partition_incomplete[2]\n elif incomplete == WORDBREAK:\n incomplete = ''\n\n completions = []\n if start_of_option(incomplete):\n # completions for partial options\n for param in ctx.command.params:\n if isinstance(param, Option):\n param_opts = [param_opt for param_opt in param.opts +\n param.secondary_opts if param_opt not in all_args or param.multiple]\n completions.extend(\n [(o, param.help) for o in param_opts if o.startswith(incomplete)])\n return completions\n # completion for option values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_option(all_args, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n # completion for argument values from user supplied values\n for param in ctx.command.params:\n if is_incomplete_argument(ctx.params, param):\n return get_user_autocompletions(ctx, all_args, incomplete, param)\n\n add_subcommand_completions(ctx, incomplete, completions)\n return completions\n\n\ndef do_complete(cli, prog_name, include_descriptions):\n cwords = split_arg_string(os.environ['COMP_WORDS'])\n cword = int(os.environ['COMP_CWORD'])\n args = cwords[1:cword]\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = ''\n\n for item in get_choices(cli, prog_name, args, incomplete):\n echo(item[0])\n if include_descriptions:\n # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present.\n echo(item[1] if item[1] else '_')\n\n return True\n\n\ndef bashcomplete(cli, prog_name, complete_var, complete_instr):\n if complete_instr.startswith('source'):\n shell = 'zsh' if complete_instr == 'source_zsh' else 'bash'\n echo(get_completion_script(prog_name, complete_var, shell))\n return True\n elif complete_instr == 'complete' or complete_instr == 'complete_zsh':\n return do_complete(cli, prog_name, complete_instr == 'complete_zsh')\n return False\n", "path": "click/_bashcomplete.py"}, {"content": "import click\nimport os\n\n\[email protected]()\ndef cli():\n pass\n\n\ndef get_env_vars(ctx, args, incomplete):\n for key in os.environ.keys():\n if incomplete in key:\n yield key\n\n\[email protected](help='A command to print environment variables')\[email protected](\"envvar\", type=click.STRING, autocompletion=get_env_vars)\ndef cmd1(envvar):\n click.echo('Environment variable: %s' % envvar)\n click.echo('Value: %s' % os.environ[envvar])\n\n\[email protected](help='A group that holds a subcommand')\ndef group():\n pass\n\n\ndef list_users(ctx, args, incomplete):\n # Here you can generate completions dynamically\n users = ['bob', 'alice']\n for user in users:\n if user.startswith(incomplete):\n yield user\n\n\[email protected](help='Choose a user')\[email protected](\"user\", type=click.STRING, autocompletion=list_users)\ndef subcmd(user):\n click.echo('Chosen user is %s' % user)\n\ncli.add_command(group)\n", "path": "examples/bashcompletion/bashcompletion.py"}]}
| 3,867 | 786 |
gh_patches_debug_25746
|
rasdani/github-patches
|
git_diff
|
mito-ds__mito-359
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
spelling mistake during mitoinstaller install
**Describe the bug**
Small issue, very minor, found a spelling mistake when running mitoinstaller install,
Starting install...
Create mito user
Upgrade mitoinstaller
Setting up **enviornment** <--- **environment**
Check dependencies
Remove mitosheet3 if present
Install mitosheet
This might take a few moments...
**To Reproduce**
Steps to reproduce the behavior:
1. run python -m mitoinstaller install
Please include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.
**Expected behavior**
should be corrected to "environment"
**Screenshots**

**Desktop (please complete the following information):**
N/A
**Additional context**
N/A
</issue>
<code>
[start of mitoinstaller/mitoinstaller/__main__.py]
1 """
2 The Mito Installer package contains utils for installing
3 Mito within your Python enviornment.
4
5 Long term, we aim to meet:
6 1. This package has minimal dependencies, both for speed of download and the ultimate portability.
7 2. The installation attempts to fail as early as possible, and to give the user as much help
8 help as possible while doing so.
9 """
10 from colorama import init
11 from termcolor import colored # type: ignore
12
13 from mitoinstaller.install import do_install
14
15
16 def main() -> None:
17 """
18 The main function of the Mito installer, this function is responsible
19 for installing and upgrading the `mitosheet` package.
20
21 To install Mito:
22 python -m mitoinstaller install
23
24 To upgrade Mito:
25 python -m mitoinstaller upgrade
26
27 To install Mito from TestPyPi
28 python -m mitoinstaller install --test-pypi
29 """
30 import sys
31 init()
32
33 if len(sys.argv) > 1:
34 command = sys.argv[1]
35 else:
36 command = ''
37
38 if command == 'install' or command == 'upgrade':
39 do_install()
40 elif command == 'uninstall':
41 print('To uninstall, run,', colored('`pip uninstall mitosheet`', 'green'))
42 else:
43 # NOTE: we don't add upgrade_to_jupyterlab_3 to the help.
44 # We only send this command to the users who need to know this (namely, those that need to upgrade)
45 print('\nProper usage is', colored('`python -m mitoinstaller install`', 'green'), 'or', colored('`python -m mitoinstaller upgrade`', 'green'), '\n\nTry running the command ', colored('`python -m mitoinstaller install`', 'green'), '\n')
46
47
48 if __name__ == '__main__':
49 main()
50
[end of mitoinstaller/mitoinstaller/__main__.py]
[start of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
1 import importlib
2 import os
3 import sys
4
5 from mitoinstaller import __version__
6 from mitoinstaller.commands import upgrade_mito_installer
7 from mitoinstaller.installer_steps.installer_step import InstallerStep
8 from mitoinstaller.jupyter_utils import set_prefered_jupyter_env_variable
9 from mitoinstaller.log_utils import identify, log
10 from mitoinstaller.user_install import (USER_JSON_PATH, go_pro,
11 try_create_user_json_file)
12
13
14 def initial_install_step_create_user():
15
16 if not os.path.exists(USER_JSON_PATH):
17 try_create_user_json_file(is_pro=('--pro' in sys.argv))
18
19 if not ('--pro' in sys.argv):
20 # Only try and log if we're not pro
21 identify()
22 log('install_started', {
23 'mitoinstaller_version': __version__
24 })
25 else:
26 # If the user is going pro, make sure they are set to pro
27 go_pro()
28
29 def initial_install_step_add_env_for_which_jupyter():
30 """
31 This install steps checks, up front, which very of jupyter we should
32 launch: lab or notebook. It then stores this as an enviornment variable
33 so that the final installer steps can launch it.
34
35 We do this up front, so that we can see which packages that user has
36 installed before installing Mito.
37 """
38 set_prefered_jupyter_env_variable()
39
40
41 INITIAL_INSTALLER_STEPS = [
42 InstallerStep(
43 'Create mito user',
44 initial_install_step_create_user
45 ),
46 InstallerStep(
47 'Upgrade mitoinstaller',
48 upgrade_mito_installer,
49 optional=True
50 ),
51 InstallerStep(
52 'Setting up enviornment',
53 initial_install_step_add_env_for_which_jupyter,
54 ),
55 ]
56
[end of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitoinstaller/mitoinstaller/__main__.py b/mitoinstaller/mitoinstaller/__main__.py
--- a/mitoinstaller/mitoinstaller/__main__.py
+++ b/mitoinstaller/mitoinstaller/__main__.py
@@ -1,6 +1,6 @@
"""
The Mito Installer package contains utils for installing
-Mito within your Python enviornment.
+Mito within your Python environment.
Long term, we aim to meet:
1. This package has minimal dependencies, both for speed of download and the ultimate portability.
diff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
@@ -29,7 +29,7 @@
def initial_install_step_add_env_for_which_jupyter():
"""
This install steps checks, up front, which very of jupyter we should
- launch: lab or notebook. It then stores this as an enviornment variable
+ launch: lab or notebook. It then stores this as an environment variable
so that the final installer steps can launch it.
We do this up front, so that we can see which packages that user has
@@ -49,7 +49,7 @@
optional=True
),
InstallerStep(
- 'Setting up enviornment',
+ 'Setting up environment',
initial_install_step_add_env_for_which_jupyter,
),
]
|
{"golden_diff": "diff --git a/mitoinstaller/mitoinstaller/__main__.py b/mitoinstaller/mitoinstaller/__main__.py\n--- a/mitoinstaller/mitoinstaller/__main__.py\n+++ b/mitoinstaller/mitoinstaller/__main__.py\n@@ -1,6 +1,6 @@\n \"\"\"\n The Mito Installer package contains utils for installing\n-Mito within your Python enviornment.\n+Mito within your Python environment.\n \n Long term, we aim to meet:\n 1. This package has minimal dependencies, both for speed of download and the ultimate portability.\ndiff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n@@ -29,7 +29,7 @@\n def initial_install_step_add_env_for_which_jupyter():\n \"\"\"\n This install steps checks, up front, which very of jupyter we should\n- launch: lab or notebook. It then stores this as an enviornment variable\n+ launch: lab or notebook. It then stores this as an environment variable\n so that the final installer steps can launch it. \n \n We do this up front, so that we can see which packages that user has \n@@ -49,7 +49,7 @@\n optional=True\n ),\n InstallerStep(\n- 'Setting up enviornment',\n+ 'Setting up environment',\n initial_install_step_add_env_for_which_jupyter,\n ),\n ]\n", "issue": "spelling mistake during mitoinstaller install\n**Describe the bug**\r\nSmall issue, very minor, found a spelling mistake when running mitoinstaller install, \r\n\r\nStarting install...\r\nCreate mito user\r\nUpgrade mitoinstaller\r\nSetting up **enviornment** <--- **environment**\r\nCheck dependencies\r\nRemove mitosheet3 if present\r\nInstall mitosheet\r\nThis might take a few moments...\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. run python -m mitoinstaller install\r\n\r\nPlease include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.\r\n\r\n**Expected behavior**\r\nshould be corrected to \"environment\"\r\n\r\n**Screenshots**\r\n\r\n\r\n**Desktop (please complete the following information):**\r\nN/A\r\n\r\n**Additional context**\r\nN/A\r\n\n", "before_files": [{"content": "\"\"\"\nThe Mito Installer package contains utils for installing\nMito within your Python enviornment.\n\nLong term, we aim to meet:\n1. This package has minimal dependencies, both for speed of download and the ultimate portability.\n2. The installation attempts to fail as early as possible, and to give the user as much help\n help as possible while doing so.\n\"\"\"\nfrom colorama import init\nfrom termcolor import colored # type: ignore\n\nfrom mitoinstaller.install import do_install\n\n\ndef main() -> None:\n \"\"\"\n The main function of the Mito installer, this function is responsible\n for installing and upgrading the `mitosheet` package.\n\n To install Mito:\n python -m mitoinstaller install\n\n To upgrade Mito:\n python -m mitoinstaller upgrade\n\n To install Mito from TestPyPi\n python -m mitoinstaller install --test-pypi\n \"\"\"\n import sys\n init()\n\n if len(sys.argv) > 1:\n command = sys.argv[1]\n else:\n command = ''\n\n if command == 'install' or command == 'upgrade':\n do_install()\n elif command == 'uninstall':\n print('To uninstall, run,', colored('`pip uninstall mitosheet`', 'green'))\n else:\n # NOTE: we don't add upgrade_to_jupyterlab_3 to the help.\n # We only send this command to the users who need to know this (namely, those that need to upgrade)\n print('\\nProper usage is', colored('`python -m mitoinstaller install`', 'green'), 'or', colored('`python -m mitoinstaller upgrade`', 'green'), '\\n\\nTry running the command ', colored('`python -m mitoinstaller install`', 'green'), '\\n')\n \n\nif __name__ == '__main__':\n main()\n", "path": "mitoinstaller/mitoinstaller/__main__.py"}, {"content": "import importlib\nimport os\nimport sys\n\nfrom mitoinstaller import __version__\nfrom mitoinstaller.commands import upgrade_mito_installer\nfrom mitoinstaller.installer_steps.installer_step import InstallerStep\nfrom mitoinstaller.jupyter_utils import set_prefered_jupyter_env_variable\nfrom mitoinstaller.log_utils import identify, log\nfrom mitoinstaller.user_install import (USER_JSON_PATH, go_pro,\n try_create_user_json_file)\n\n\ndef initial_install_step_create_user():\n\n if not os.path.exists(USER_JSON_PATH):\n try_create_user_json_file(is_pro=('--pro' in sys.argv))\n\n if not ('--pro' in sys.argv):\n # Only try and log if we're not pro\n identify()\n log('install_started', {\n 'mitoinstaller_version': __version__\n })\n else:\n # If the user is going pro, make sure they are set to pro\n go_pro()\n\ndef initial_install_step_add_env_for_which_jupyter():\n \"\"\"\n This install steps checks, up front, which very of jupyter we should\n launch: lab or notebook. It then stores this as an enviornment variable\n so that the final installer steps can launch it. \n\n We do this up front, so that we can see which packages that user has \n installed before installing Mito.\n \"\"\"\n set_prefered_jupyter_env_variable()\n\n\nINITIAL_INSTALLER_STEPS = [\n InstallerStep(\n 'Create mito user',\n initial_install_step_create_user\n ),\n InstallerStep(\n 'Upgrade mitoinstaller',\n upgrade_mito_installer,\n optional=True\n ),\n InstallerStep(\n 'Setting up enviornment',\n initial_install_step_add_env_for_which_jupyter,\n ),\n]\n", "path": "mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py"}]}
| 1,815 | 366 |
gh_patches_debug_55591
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-10633
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Pillow 10.0.0
### Is your proposal related to a problem?
Pillow 10.0.0 [has just been released.](https://github.com/python-pillow/Pillow/releases/tag/10.0.0) Wagtail 5.0.2 [restricts Pillow support to <10.0.0.](https://github.com/wagtail/wagtail/blob/a68f69f2d7f46943cc23b7f65349448b23044869/setup.py#L30)
Adding support for the new Pillow release is desired.
### Describe the solution you'd like
Add support for Pillow 10.0.0
### Describe alternatives you've considered
Not applicable.
### Additional context
This is a relevant dependency to the project, and to sites running it.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from wagtail import __version__
4 from wagtail.utils.setup import assets, check_bdist_egg, sdist
5
6 try:
7 from setuptools import find_packages, setup
8 except ImportError:
9 from distutils.core import setup
10
11
12 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
13 # in multiprocessing/util.py _exit_function when setup.py exits
14 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
15 try:
16 import multiprocessing # noqa: F401
17 except ImportError:
18 pass
19
20
21 install_requires = [
22 "Django>=3.2,<4.3",
23 "django-modelcluster>=6.0,<7.0",
24 "django-permissionedforms>=0.1,<1.0",
25 "django-taggit>=2.0,<5.0",
26 "django-treebeard>=4.5.1,<5.0",
27 "djangorestframework>=3.11.1,<4.0",
28 "django-filter>=2.2,<24",
29 "draftjs_exporter>=2.1.5,<3.0",
30 "Pillow>=4.0.0,<10.0.0",
31 "beautifulsoup4>=4.8,<4.12",
32 "html5lib>=0.999,<2",
33 "Willow>=1.5,<1.6",
34 "requests>=2.11.1,<3.0",
35 "l18n>=2018.5",
36 "openpyxl>=3.0.10,<4.0",
37 "anyascii>=0.1.5",
38 "telepath>=0.1.1,<1",
39 ]
40
41 # Testing dependencies
42 testing_extras = [
43 # Required for running the tests
44 "python-dateutil>=2.7",
45 "pytz>=2014.7",
46 "elasticsearch>=5.0,<6.0",
47 "Jinja2>=3.0,<3.2",
48 "boto3>=1.16,<1.17",
49 "freezegun>=0.3.8",
50 "azure-mgmt-cdn>=12.0,<13.0",
51 "azure-mgmt-frontdoor>=1.0,<1.1",
52 "django-pattern-library>=0.7,<0.8",
53 # For coverage and PEP8 linting
54 "coverage>=3.7.0",
55 "black==22.3.0",
56 "doc8==0.8.1",
57 "ruff==0.0.272",
58 # For enforcing string formatting mechanism in source files
59 "semgrep==1.3.0",
60 # For templates linting
61 "curlylint==0.13.1",
62 # For template indenting
63 "djhtml==1.5.2",
64 # for validating string formats in .po translation files
65 "polib>=1.1,<2.0",
66 # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)
67 "factory-boy>=3.2",
68 ]
69
70 # Documentation dependencies
71 documentation_extras = [
72 "pyenchant>=3.1.1,<4",
73 "sphinxcontrib-spelling>=5.4.0,<6",
74 "Sphinx>=1.5.2",
75 "sphinx-autobuild>=0.6.0",
76 "sphinx-wagtail-theme==6.0.0",
77 "myst_parser==0.18.1",
78 "sphinx_copybutton>=0.5,<1.0",
79 ]
80
81 setup(
82 name="wagtail",
83 version=__version__,
84 description="A Django content management system.",
85 author="Wagtail core team + contributors",
86 author_email="[email protected]", # For support queries, please see https://docs.wagtail.org/en/stable/support.html
87 url="https://wagtail.org/",
88 project_urls={
89 "Documentation": "https://docs.wagtail.org",
90 "Source": "https://github.com/wagtail/wagtail",
91 },
92 packages=find_packages(),
93 include_package_data=True,
94 license="BSD",
95 long_description="Wagtail is an open source content management \
96 system built on Django, with a strong community and commercial support. \
97 It’s focused on user experience, and offers precise control for \
98 designers and developers.\n\n\
99 For more details, see https://wagtail.org, https://docs.wagtail.org and \
100 https://github.com/wagtail/wagtail/.",
101 classifiers=[
102 "Development Status :: 5 - Production/Stable",
103 "Environment :: Web Environment",
104 "Intended Audience :: Developers",
105 "License :: OSI Approved :: BSD License",
106 "Operating System :: OS Independent",
107 "Programming Language :: Python",
108 "Programming Language :: Python :: 3",
109 "Programming Language :: Python :: 3.7",
110 "Programming Language :: Python :: 3.8",
111 "Programming Language :: Python :: 3.9",
112 "Programming Language :: Python :: 3.10",
113 "Programming Language :: Python :: 3.11",
114 "Framework :: Django",
115 "Framework :: Django :: 3.2",
116 "Framework :: Django :: 4.1",
117 "Framework :: Django :: 4.2",
118 "Framework :: Wagtail",
119 "Topic :: Internet :: WWW/HTTP :: Site Management",
120 ],
121 python_requires=">=3.7",
122 install_requires=install_requires,
123 extras_require={"testing": testing_extras, "docs": documentation_extras},
124 entry_points="""
125 [console_scripts]
126 wagtail=wagtail.bin.wagtail:main
127 """,
128 zip_safe=False,
129 cmdclass={
130 "sdist": sdist,
131 "bdist_egg": check_bdist_egg,
132 "assets": assets,
133 },
134 )
135
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@
"djangorestframework>=3.11.1,<4.0",
"django-filter>=2.2,<24",
"draftjs_exporter>=2.1.5,<3.0",
- "Pillow>=4.0.0,<10.0.0",
+ "Pillow>=9.1.0,<11.0.0",
"beautifulsoup4>=4.8,<4.12",
"html5lib>=0.999,<2",
"Willow>=1.5,<1.6",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,7 +27,7 @@\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<24\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n- \"Pillow>=4.0.0,<10.0.0\",\n+ \"Pillow>=9.1.0,<11.0.0\",\n \"beautifulsoup4>=4.8,<4.12\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.5,<1.6\",\n", "issue": "Add support for Pillow 10.0.0\n### Is your proposal related to a problem?\r\n\r\nPillow 10.0.0 [has just been released.](https://github.com/python-pillow/Pillow/releases/tag/10.0.0) Wagtail 5.0.2 [restricts Pillow support to <10.0.0.](https://github.com/wagtail/wagtail/blob/a68f69f2d7f46943cc23b7f65349448b23044869/setup.py#L30)\r\n\r\nAdding support for the new Pillow release is desired.\r\n\r\n### Describe the solution you'd like\r\n\r\nAdd support for Pillow 10.0.0\r\n\r\n\r\n### Describe alternatives you've considered\r\n\r\nNot applicable.\r\n\r\n### Additional context\r\n\r\nThis is a relevant dependency to the project, and to sites running it.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa: F401\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=3.2,<4.3\",\n \"django-modelcluster>=6.0,<7.0\",\n \"django-permissionedforms>=0.1,<1.0\",\n \"django-taggit>=2.0,<5.0\",\n \"django-treebeard>=4.5.1,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<24\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n \"Pillow>=4.0.0,<10.0.0\",\n \"beautifulsoup4>=4.8,<4.12\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.5,<1.6\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"openpyxl>=3.0.10,<4.0\",\n \"anyascii>=0.1.5\",\n \"telepath>=0.1.1,<1\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n \"python-dateutil>=2.7\",\n \"pytz>=2014.7\",\n \"elasticsearch>=5.0,<6.0\",\n \"Jinja2>=3.0,<3.2\",\n \"boto3>=1.16,<1.17\",\n \"freezegun>=0.3.8\",\n \"azure-mgmt-cdn>=12.0,<13.0\",\n \"azure-mgmt-frontdoor>=1.0,<1.1\",\n \"django-pattern-library>=0.7,<0.8\",\n # For coverage and PEP8 linting\n \"coverage>=3.7.0\",\n \"black==22.3.0\",\n \"doc8==0.8.1\",\n \"ruff==0.0.272\",\n # For enforcing string formatting mechanism in source files\n \"semgrep==1.3.0\",\n # For templates linting\n \"curlylint==0.13.1\",\n # For template indenting\n \"djhtml==1.5.2\",\n # for validating string formats in .po translation files\n \"polib>=1.1,<2.0\",\n # For wagtail.test.utils.wagtail_factories (used for streamfield migration toolkit)\n \"factory-boy>=3.2\",\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n \"pyenchant>=3.1.1,<4\",\n \"sphinxcontrib-spelling>=5.4.0,<6\",\n \"Sphinx>=1.5.2\",\n \"sphinx-autobuild>=0.6.0\",\n \"sphinx-wagtail-theme==6.0.0\",\n \"myst_parser==0.18.1\",\n \"sphinx_copybutton>=0.5,<1.0\",\n]\n\nsetup(\n name=\"wagtail\",\n version=__version__,\n description=\"A Django content management system.\",\n author=\"Wagtail core team + contributors\",\n author_email=\"[email protected]\", # For support queries, please see https://docs.wagtail.org/en/stable/support.html\n url=\"https://wagtail.org/\",\n project_urls={\n \"Documentation\": \"https://docs.wagtail.org\",\n \"Source\": \"https://github.com/wagtail/wagtail\",\n },\n packages=find_packages(),\n include_package_data=True,\n license=\"BSD\",\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.org, https://docs.wagtail.org and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Framework :: Django\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.1\",\n \"Framework :: Django :: 4.2\",\n \"Framework :: Wagtail\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\",\n ],\n python_requires=\">=3.7\",\n install_requires=install_requires,\n extras_require={\"testing\": testing_extras, \"docs\": documentation_extras},\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n \"sdist\": sdist,\n \"bdist_egg\": check_bdist_egg,\n \"assets\": assets,\n },\n)\n", "path": "setup.py"}]}
| 2,340 | 159 |
gh_patches_debug_26313
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-439
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New Bernoulli parameterization lead to subtle bug in AIR model
When I merged #416 into a local branch it broke the AIR example.
Here's a simplified instance of the problem:
```python
import torch
from torch.autograd import Variable
from pyro.distributions import Bernoulli
from torch.nn.functional import sigmoid
p = Variable(torch.Tensor([0]), requires_grad=True)
b = Bernoulli(sigmoid(p) * 0.0)
log_pdf = b.batch_log_pdf(Variable(torch.Tensor([0])))
log_pdf.sum().backward()
print(p.grad)
```
Prior to #416 this returned a `grad` of zero as expected, but it now returns `nan`:
```
pyro$ git rev-parse --short HEAD
71bca18
pyro$ python3 bern.py
Variable containing:
-0
[torch.FloatTensor of size 1]
pyro$ git rev-parse --short HEAD
a85525a
pyro$ python3 bern.py
Variable containing:
nan
[torch.FloatTensor of size 1]
```
I suspect that the problem is that converting between probabilities and log odds introduces an intemediate `-inf`, which messes up autograd.
I may be able to adjust the model to work around this, but either way, should this be considered a bug? (It seems like it could trip other people up, and chasing down the source of the `nan`s is tricky.)
</issue>
<code>
[start of pyro/distributions/util.py]
1 import torch
2 import torch.nn.functional as F
3 from torch.autograd import Variable
4
5
6 def log_gamma(xx):
7 if isinstance(xx, Variable):
8 ttype = xx.data.type()
9 elif isinstance(xx, torch.Tensor):
10 ttype = xx.type()
11 gamma_coeff = [
12 76.18009172947146,
13 -86.50532032941677,
14 24.01409824083091,
15 -1.231739572450155,
16 0.1208650973866179e-2,
17 -0.5395239384953e-5,
18 ]
19 magic1 = 1.000000000190015
20 magic2 = 2.5066282746310005
21 x = xx - 1.0
22 t = x + 5.5
23 t = t - (x + 0.5) * torch.log(t)
24 ser = Variable(torch.ones(x.size()).type(ttype)) * magic1
25 for c in gamma_coeff:
26 x = x + 1.0
27 ser = ser + torch.pow(x / c, -1)
28 return torch.log(ser * magic2) - t
29
30
31 def log_beta(t):
32 """
33 Computes log Beta function.
34
35 :param t:
36 :type t: torch.autograd.Variable of dimension 1 or 2
37 :rtype: torch.autograd.Variable of float (if t.dim() == 1) or torch.Tensor (if t.dim() == 2)
38 """
39 assert t.dim() in (1, 2)
40 if t.dim() == 1:
41 numer = torch.sum(log_gamma(t))
42 denom = log_gamma(torch.sum(t))
43 else:
44 numer = torch.sum(log_gamma(t), 1)
45 denom = log_gamma(torch.sum(t, 1))
46 return numer - denom
47
48
49 def move_to_same_host_as(source, destin):
50 """
51 Returns source or a copy of `source` such that `source.is_cuda == `destin.is_cuda`.
52 """
53 return source.cuda() if destin.is_cuda else source.cpu()
54
55
56 def torch_zeros_like(x):
57 """
58 Polyfill for `torch.zeros_like()`.
59 """
60 # Work around https://github.com/pytorch/pytorch/issues/2906
61 if isinstance(x, Variable):
62 return Variable(torch_zeros_like(x.data))
63 # Support Pytorch before https://github.com/pytorch/pytorch/pull/2489
64 try:
65 return torch.zeros_like(x)
66 except AttributeError:
67 return torch.zeros(x.size()).type_as(x)
68
69
70 def torch_ones_like(x):
71 """
72 Polyfill for `torch.ones_like()`.
73 """
74 # Work around https://github.com/pytorch/pytorch/issues/2906
75 if isinstance(x, Variable):
76 return Variable(torch_ones_like(x.data))
77 # Support Pytorch before https://github.com/pytorch/pytorch/pull/2489
78 try:
79 return torch.ones_like(x)
80 except AttributeError:
81 return torch.ones(x.size()).type_as(x)
82
83
84 def torch_eye(n, m=None, out=None):
85 """
86 Like `torch.eye()`, but works with cuda tensors.
87 """
88 if m is None:
89 m = n
90 try:
91 return torch.eye(n, m, out=out)
92 except TypeError:
93 # Only catch errors due to torch.eye() not being availble for cuda tensors.
94 module = torch.Tensor.__module__ if out is None else type(out).__module__
95 if module != 'torch.cuda':
96 raise
97 Tensor = getattr(torch, torch.Tensor.__name__)
98 cpu_out = Tensor(n, m)
99 cuda_out = torch.eye(m, n, out=cpu_out).cuda()
100 return cuda_out if out is None else out.copy_(cuda_out)
101
102
103 def torch_multinomial(input, num_samples, replacement=False):
104 """
105 Like `torch.multinomial()` but works with cuda tensors.
106 Does not support keyword argument `out`.
107 """
108 if input.is_cuda:
109 return torch_multinomial(input.cpu(), num_samples, replacement).cuda()
110 else:
111 return torch.multinomial(input, num_samples, replacement)
112
113
114 def softmax(x, dim=-1):
115 """
116 TODO: change to use the default pyTorch implementation when available
117 Source: https://discuss.pytorch.org/t/why-softmax-function-cant-specify-the-dimension-to-operate/2637
118 :param x: tensor
119 :param dim: Dimension to apply the softmax function to. The elements of the tensor in this
120 dimension must sum to 1.
121 :return: tensor having the same dimension as `x` rescaled along dim
122 """
123 input_size = x.size()
124
125 trans_input = x.transpose(dim, len(input_size) - 1)
126 trans_size = trans_input.size()
127
128 input_2d = trans_input.contiguous().view(-1, trans_size[-1])
129
130 soft_max_2d = F.softmax(input_2d)
131
132 soft_max_nd = soft_max_2d.view(*trans_size)
133 return soft_max_nd.transpose(dim, len(input_size) - 1)
134
135
136 def get_probs_and_logits(ps=None, logits=None, is_multidimensional=True):
137 """
138 Convert probability values to logits, or vice-versa. Either `ps` or
139 `logits` should be specified, but not both.
140
141 :param ps: tensor of probabilities. Should be in the interval *[0, 1]*.
142 If, `is_multidimensional = True`, then must be normalized along
143 axis -1.
144 :param logits: tensor of logit values.
145 :param is_multidimensional: determines the computation of ps from logits,
146 and vice-versa. For the multi-dimensional case, logit values are
147 assumed to be non-normalized log probabilities, whereas for the uni-
148 dimensional case, it specifically refers to log odds.
149 :return: tuple containing raw probabilities and logits as tensors
150 """
151 assert (ps is None) != (logits is None)
152 if is_multidimensional:
153 if ps is None:
154 ps = softmax(logits, -1)
155 else:
156 logits = torch.log(ps)
157 else:
158 if ps is None:
159 ps = F.sigmoid(logits)
160 else:
161 logits = torch.log(ps) - torch.log1p(-ps)
162 return ps, logits
163
[end of pyro/distributions/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/distributions/util.py b/pyro/distributions/util.py
--- a/pyro/distributions/util.py
+++ b/pyro/distributions/util.py
@@ -133,6 +133,15 @@
return soft_max_nd.transpose(dim, len(input_size) - 1)
+def _get_clamping_buffer(tensor):
+ clamp_eps = 1e-6
+ if isinstance(tensor, Variable):
+ tensor = tensor.data
+ if isinstance(tensor, (torch.DoubleTensor, torch.cuda.DoubleTensor)):
+ clamp_eps = 1e-15
+ return clamp_eps
+
+
def get_probs_and_logits(ps=None, logits=None, is_multidimensional=True):
"""
Convert probability values to logits, or vice-versa. Either `ps` or
@@ -149,14 +158,17 @@
:return: tuple containing raw probabilities and logits as tensors
"""
assert (ps is None) != (logits is None)
+ if ps is not None:
+ eps = _get_clamping_buffer(ps)
+ ps_clamped = ps.clamp(min=eps, max=1 - eps)
if is_multidimensional:
if ps is None:
ps = softmax(logits, -1)
else:
- logits = torch.log(ps)
+ logits = torch.log(ps_clamped)
else:
if ps is None:
ps = F.sigmoid(logits)
else:
- logits = torch.log(ps) - torch.log1p(-ps)
+ logits = torch.log(ps_clamped) - torch.log1p(-ps_clamped)
return ps, logits
|
{"golden_diff": "diff --git a/pyro/distributions/util.py b/pyro/distributions/util.py\n--- a/pyro/distributions/util.py\n+++ b/pyro/distributions/util.py\n@@ -133,6 +133,15 @@\n return soft_max_nd.transpose(dim, len(input_size) - 1)\n \n \n+def _get_clamping_buffer(tensor):\n+ clamp_eps = 1e-6\n+ if isinstance(tensor, Variable):\n+ tensor = tensor.data\n+ if isinstance(tensor, (torch.DoubleTensor, torch.cuda.DoubleTensor)):\n+ clamp_eps = 1e-15\n+ return clamp_eps\n+\n+\n def get_probs_and_logits(ps=None, logits=None, is_multidimensional=True):\n \"\"\"\n Convert probability values to logits, or vice-versa. Either `ps` or\n@@ -149,14 +158,17 @@\n :return: tuple containing raw probabilities and logits as tensors\n \"\"\"\n assert (ps is None) != (logits is None)\n+ if ps is not None:\n+ eps = _get_clamping_buffer(ps)\n+ ps_clamped = ps.clamp(min=eps, max=1 - eps)\n if is_multidimensional:\n if ps is None:\n ps = softmax(logits, -1)\n else:\n- logits = torch.log(ps)\n+ logits = torch.log(ps_clamped)\n else:\n if ps is None:\n ps = F.sigmoid(logits)\n else:\n- logits = torch.log(ps) - torch.log1p(-ps)\n+ logits = torch.log(ps_clamped) - torch.log1p(-ps_clamped)\n return ps, logits\n", "issue": "New Bernoulli parameterization lead to subtle bug in AIR model\nWhen I merged #416 into a local branch it broke the AIR example.\r\n\r\nHere's a simplified instance of the problem:\r\n\r\n```python\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom pyro.distributions import Bernoulli\r\nfrom torch.nn.functional import sigmoid\r\n\r\np = Variable(torch.Tensor([0]), requires_grad=True)\r\nb = Bernoulli(sigmoid(p) * 0.0)\r\nlog_pdf = b.batch_log_pdf(Variable(torch.Tensor([0])))\r\nlog_pdf.sum().backward()\r\nprint(p.grad)\r\n```\r\n\r\nPrior to #416 this returned a `grad` of zero as expected, but it now returns `nan`:\r\n\r\n```\r\npyro$ git rev-parse --short HEAD\r\n71bca18\r\npyro$ python3 bern.py \r\nVariable containing:\r\n-0\r\n[torch.FloatTensor of size 1]\r\n\r\npyro$ git rev-parse --short HEAD\r\na85525a\r\npyro$ python3 bern.py \r\nVariable containing:\r\nnan\r\n[torch.FloatTensor of size 1]\r\n```\r\n\r\nI suspect that the problem is that converting between probabilities and log odds introduces an intemediate `-inf`, which messes up autograd.\r\n\r\nI may be able to adjust the model to work around this, but either way, should this be considered a bug? (It seems like it could trip other people up, and chasing down the source of the `nan`s is tricky.)\r\n\n", "before_files": [{"content": "import torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\ndef log_gamma(xx):\n if isinstance(xx, Variable):\n ttype = xx.data.type()\n elif isinstance(xx, torch.Tensor):\n ttype = xx.type()\n gamma_coeff = [\n 76.18009172947146,\n -86.50532032941677,\n 24.01409824083091,\n -1.231739572450155,\n 0.1208650973866179e-2,\n -0.5395239384953e-5,\n ]\n magic1 = 1.000000000190015\n magic2 = 2.5066282746310005\n x = xx - 1.0\n t = x + 5.5\n t = t - (x + 0.5) * torch.log(t)\n ser = Variable(torch.ones(x.size()).type(ttype)) * magic1\n for c in gamma_coeff:\n x = x + 1.0\n ser = ser + torch.pow(x / c, -1)\n return torch.log(ser * magic2) - t\n\n\ndef log_beta(t):\n \"\"\"\n Computes log Beta function.\n\n :param t:\n :type t: torch.autograd.Variable of dimension 1 or 2\n :rtype: torch.autograd.Variable of float (if t.dim() == 1) or torch.Tensor (if t.dim() == 2)\n \"\"\"\n assert t.dim() in (1, 2)\n if t.dim() == 1:\n numer = torch.sum(log_gamma(t))\n denom = log_gamma(torch.sum(t))\n else:\n numer = torch.sum(log_gamma(t), 1)\n denom = log_gamma(torch.sum(t, 1))\n return numer - denom\n\n\ndef move_to_same_host_as(source, destin):\n \"\"\"\n Returns source or a copy of `source` such that `source.is_cuda == `destin.is_cuda`.\n \"\"\"\n return source.cuda() if destin.is_cuda else source.cpu()\n\n\ndef torch_zeros_like(x):\n \"\"\"\n Polyfill for `torch.zeros_like()`.\n \"\"\"\n # Work around https://github.com/pytorch/pytorch/issues/2906\n if isinstance(x, Variable):\n return Variable(torch_zeros_like(x.data))\n # Support Pytorch before https://github.com/pytorch/pytorch/pull/2489\n try:\n return torch.zeros_like(x)\n except AttributeError:\n return torch.zeros(x.size()).type_as(x)\n\n\ndef torch_ones_like(x):\n \"\"\"\n Polyfill for `torch.ones_like()`.\n \"\"\"\n # Work around https://github.com/pytorch/pytorch/issues/2906\n if isinstance(x, Variable):\n return Variable(torch_ones_like(x.data))\n # Support Pytorch before https://github.com/pytorch/pytorch/pull/2489\n try:\n return torch.ones_like(x)\n except AttributeError:\n return torch.ones(x.size()).type_as(x)\n\n\ndef torch_eye(n, m=None, out=None):\n \"\"\"\n Like `torch.eye()`, but works with cuda tensors.\n \"\"\"\n if m is None:\n m = n\n try:\n return torch.eye(n, m, out=out)\n except TypeError:\n # Only catch errors due to torch.eye() not being availble for cuda tensors.\n module = torch.Tensor.__module__ if out is None else type(out).__module__\n if module != 'torch.cuda':\n raise\n Tensor = getattr(torch, torch.Tensor.__name__)\n cpu_out = Tensor(n, m)\n cuda_out = torch.eye(m, n, out=cpu_out).cuda()\n return cuda_out if out is None else out.copy_(cuda_out)\n\n\ndef torch_multinomial(input, num_samples, replacement=False):\n \"\"\"\n Like `torch.multinomial()` but works with cuda tensors.\n Does not support keyword argument `out`.\n \"\"\"\n if input.is_cuda:\n return torch_multinomial(input.cpu(), num_samples, replacement).cuda()\n else:\n return torch.multinomial(input, num_samples, replacement)\n\n\ndef softmax(x, dim=-1):\n \"\"\"\n TODO: change to use the default pyTorch implementation when available\n Source: https://discuss.pytorch.org/t/why-softmax-function-cant-specify-the-dimension-to-operate/2637\n :param x: tensor\n :param dim: Dimension to apply the softmax function to. The elements of the tensor in this\n dimension must sum to 1.\n :return: tensor having the same dimension as `x` rescaled along dim\n \"\"\"\n input_size = x.size()\n\n trans_input = x.transpose(dim, len(input_size) - 1)\n trans_size = trans_input.size()\n\n input_2d = trans_input.contiguous().view(-1, trans_size[-1])\n\n soft_max_2d = F.softmax(input_2d)\n\n soft_max_nd = soft_max_2d.view(*trans_size)\n return soft_max_nd.transpose(dim, len(input_size) - 1)\n\n\ndef get_probs_and_logits(ps=None, logits=None, is_multidimensional=True):\n \"\"\"\n Convert probability values to logits, or vice-versa. Either `ps` or\n `logits` should be specified, but not both.\n\n :param ps: tensor of probabilities. Should be in the interval *[0, 1]*.\n If, `is_multidimensional = True`, then must be normalized along\n axis -1.\n :param logits: tensor of logit values.\n :param is_multidimensional: determines the computation of ps from logits,\n and vice-versa. For the multi-dimensional case, logit values are\n assumed to be non-normalized log probabilities, whereas for the uni-\n dimensional case, it specifically refers to log odds.\n :return: tuple containing raw probabilities and logits as tensors\n \"\"\"\n assert (ps is None) != (logits is None)\n if is_multidimensional:\n if ps is None:\n ps = softmax(logits, -1)\n else:\n logits = torch.log(ps)\n else:\n if ps is None:\n ps = F.sigmoid(logits)\n else:\n logits = torch.log(ps) - torch.log1p(-ps)\n return ps, logits\n", "path": "pyro/distributions/util.py"}]}
| 2,684 | 363 |
gh_patches_debug_1231
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-583
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Python 3.9
Python 3.9 will be released 2020-10-05.
Here are some steps before its release:
* Start testing with prerelease
After release:
* Ensure tests run with released version
* Add 3.9 PyPI classifier
* Enable PYthon wheel building in release
</issue>
<code>
[start of setup.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from setuptools import Extension, find_packages, setup
8
9 with open("README.md", "r") as fp:
10 long_description = fp.read()
11
12 packages = find_packages("src")
13 if sys.version_info < (3, 6):
14 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
15
16 compile_extensions = (
17 # Python 3+
18 sys.version_info >= (3,)
19 # Not Jython
20 and not sys.platform.startswith("java")
21 # Not PyPy
22 and "__pypy__" not in sys.builtin_module_names
23 # Not explicitly disabled
24 and (os.environ.get("SCOUT_DISABLE_EXTENSIONS", "") == "")
25 )
26 if compile_extensions:
27 ext_modules = [
28 Extension(
29 name=str("scout_apm.core._objtrace"),
30 sources=[str("src/scout_apm/core/_objtrace.c")],
31 optional=True,
32 )
33 ]
34 else:
35 ext_modules = []
36
37 setup(
38 name="scout_apm",
39 version="2.16.2",
40 description="Scout Application Performance Monitoring Agent",
41 long_description=long_description,
42 long_description_content_type="text/markdown",
43 url="https://github.com/scoutapp/scout_apm_python",
44 project_urls={
45 "Documentation": "https://docs.scoutapm.com/#python-agent",
46 "Changelog": (
47 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
48 ),
49 },
50 author="Scout",
51 author_email="[email protected]",
52 license="MIT",
53 zip_safe=False,
54 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
55 packages=packages,
56 package_dir={str(""): str("src")},
57 ext_modules=ext_modules,
58 entry_points={
59 "console_scripts": [
60 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
61 ]
62 },
63 install_requires=[
64 'asgiref ; python_version >= "3.5"',
65 'importlib-metadata ; python_version < "3.8"',
66 "psutil>=5,<6",
67 'urllib3[secure] < 1.25 ; python_version < "3.5"',
68 'urllib3[secure] < 2 ; python_version >= "3.5"',
69 "wrapt>=1.10,<2.0",
70 ],
71 keywords="apm performance monitoring development",
72 classifiers=[
73 "Development Status :: 5 - Production/Stable",
74 "Framework :: Bottle",
75 "Framework :: Django",
76 "Framework :: Django :: 1.8",
77 "Framework :: Django :: 1.9",
78 "Framework :: Django :: 1.10",
79 "Framework :: Django :: 1.11",
80 "Framework :: Django :: 2.0",
81 "Framework :: Django :: 2.1",
82 "Framework :: Django :: 2.2",
83 "Framework :: Django :: 3.0",
84 "Framework :: Django :: 3.1",
85 "Framework :: Flask",
86 "Framework :: Pyramid",
87 "Intended Audience :: Developers",
88 "Topic :: System :: Monitoring",
89 "License :: OSI Approved :: MIT License",
90 "Operating System :: MacOS",
91 "Operating System :: POSIX",
92 "Operating System :: POSIX :: Linux",
93 "Programming Language :: Python :: 2",
94 "Programming Language :: Python :: 2.7",
95 "Programming Language :: Python :: 3",
96 "Programming Language :: Python :: 3.4",
97 "Programming Language :: Python :: 3.5",
98 "Programming Language :: Python :: 3.6",
99 "Programming Language :: Python :: 3.7",
100 "Programming Language :: Python :: 3.8",
101 ],
102 )
103
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -98,5 +98,6 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
],
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -98,5 +98,6 @@\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n ],\n )\n", "issue": "Support Python 3.9\nPython 3.9 will be released 2020-10-05.\r\n\r\nHere are some steps before its release:\r\n\r\n* Start testing with prerelease\r\n\r\nAfter release:\r\n* Ensure tests run with released version\r\n* Add 3.9 PyPI classifier\r\n* Enable PYthon wheel building in release\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.16.2\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=\"apm performance monitoring development\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
| 1,660 | 84 |
gh_patches_debug_37571
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-4133
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
What should `pip install dbt` do after v1?
Split off from https://github.com/dbt-labs/dbt-core/issues/3968
Everyone will be able and encouraged to do the following after v1:
- `pip install dbt-core`
- `pip install dbt-<adapter>`
The big question is, what to do with the PyPi package named `dbt`, defined in this repo as a thing _separate_ from `dbt-core`? Note that this is just a cosmetic consideration—it shouldn't change anything about how we're packaging/distributing the underlying components—we just know that a lot of people are still using `pip install dbt`. Starting in v1, this could:
1. **Full backwards compatibility, with warning:** Raise a deprecation warning, then install `dbt-core`, `dbt-postgres`, `dbt-redshift`, `dbt-bigquery`. This might be tricky if we've released a newer patch/prerelease of dbt-core than the others, but I think our compatibility operator (`~=`) could work here
2. **Very limited backwards compatibility, with warning**: Raise a deprecation warning, then install only the code contained in this repository (`dbt-core` + `dbt-postgres`, or `dbt-core` only), knowing that most people will encounter errors and have to switch)
3. **Raise an explicit error:** If all of that is too tricky to figure out, we should keep it simple and just raise a good old fashioned error message when someone tries `pip install dbt` (unqualified) or `pip install dbt==1.0.0`: "Going forward, you must install dbt-<adapter`"
I'm leaning toward the first option right now. I'm very open to other opinions.
</issue>
<code>
[start of plugins/postgres/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 PSYCOPG2_MESSAGE = '''
23 No package name override was set.
24 Using 'psycopg2-binary' package to satisfy 'psycopg2'
25
26 If you experience segmentation faults, silent crashes, or installation errors,
27 consider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to
28 'psycopg2'. It may require a compiler toolchain and development libraries!
29 '''.strip()
30
31
32 def _dbt_psycopg2_name():
33 # if the user chose something, use that
34 package_name = os.getenv('DBT_PSYCOPG2_NAME', '')
35 if package_name:
36 return package_name
37
38 # default to psycopg2-binary for all OSes/versions
39 print(PSYCOPG2_MESSAGE)
40 return 'psycopg2-binary'
41
42
43 package_name = "dbt-postgres"
44 package_version = "1.0.0b2"
45 description = """The postgres adpter plugin for dbt (data build tool)"""
46
47 this_directory = os.path.abspath(os.path.dirname(__file__))
48 with open(os.path.join(this_directory, 'README.md')) as f:
49 long_description = f.read()
50
51 DBT_PSYCOPG2_NAME = _dbt_psycopg2_name()
52
53 setup(
54 name=package_name,
55 version=package_version,
56 description=description,
57 long_description=description,
58 long_description_content_type='text/markdown',
59 author="dbt Labs",
60 author_email="[email protected]",
61 url="https://github.com/dbt-labs/dbt-core",
62 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
63 package_data={
64 'dbt': [
65 'include/postgres/dbt_project.yml',
66 'include/postgres/sample_profiles.yml',
67 'include/postgres/macros/*.sql',
68 'include/postgres/macros/**/*.sql',
69 ]
70 },
71 install_requires=[
72 'dbt-core=={}'.format(package_version),
73 '{}~=2.8'.format(DBT_PSYCOPG2_NAME),
74 ],
75 zip_safe=False,
76 classifiers=[
77 'Development Status :: 5 - Production/Stable',
78
79 'License :: OSI Approved :: Apache Software License',
80
81 'Operating System :: Microsoft :: Windows',
82 'Operating System :: MacOS :: MacOS X',
83 'Operating System :: POSIX :: Linux',
84
85 'Programming Language :: Python :: 3.6',
86 'Programming Language :: Python :: 3.7',
87 'Programming Language :: Python :: 3.8',
88 'Programming Language :: Python :: 3.9',
89 ],
90 python_requires=">=3.6.2",
91 )
92
[end of plugins/postgres/setup.py]
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21 this_directory = os.path.abspath(os.path.dirname(__file__))
22 with open(os.path.join(this_directory, 'README.md')) as f:
23 long_description = f.read()
24
25
26 package_name = "dbt"
27 package_version = "1.0.0b2"
28 description = """With dbt, data analysts and engineers can build analytics \
29 the way engineers build applications."""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35
36 description=description,
37 long_description=long_description,
38 long_description_content_type='text/markdown',
39
40 author="dbt Labs",
41 author_email="[email protected]",
42 url="https://github.com/dbt-labs/dbt-core",
43 packages=[],
44 install_requires=[
45 'dbt-core=={}'.format(package_version),
46 'dbt-postgres=={}'.format(package_version),
47 ],
48 zip_safe=False,
49 classifiers=[
50 'Development Status :: 5 - Production/Stable',
51
52 'License :: OSI Approved :: Apache Software License',
53
54 'Operating System :: Microsoft :: Windows',
55 'Operating System :: MacOS :: MacOS X',
56 'Operating System :: POSIX :: Linux',
57
58 'Programming Language :: Python :: 3.6',
59 'Programming Language :: Python :: 3.7',
60 'Programming Language :: Python :: 3.8',
61 'Programming Language :: Python :: 3.9',
62 ],
63 python_requires=">=3.6.2",
64 )
65
[end of setup.py]
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 6):
6 print('Error: dbt does not support this version of Python.')
7 print('Please upgrade to Python 3.6 or higher.')
8 sys.exit(1)
9
10
11 from setuptools import setup
12 try:
13 from setuptools import find_namespace_packages
14 except ImportError:
15 # the user has a downlevel version of setuptools.
16 print('Error: dbt requires setuptools v40.1.0 or higher.')
17 print('Please upgrade setuptools with "pip install --upgrade setuptools" '
18 'and try again')
19 sys.exit(1)
20
21
22 def read(fname):
23 return open(os.path.join(os.path.dirname(__file__), fname)).read()
24
25
26 package_name = "dbt-core"
27 package_version = "1.0.0b2"
28 description = """dbt (data build tool) is a command line tool that helps \
29 analysts and engineers transform data in their warehouse more effectively"""
30
31
32 setup(
33 name=package_name,
34 version=package_version,
35 description=description,
36 long_description=description,
37 author="dbt Labs",
38 author_email="[email protected]",
39 url="https://github.com/dbt-labs/dbt-core",
40 packages=find_namespace_packages(include=['dbt', 'dbt.*']),
41 include_package_data = True,
42 test_suite='test',
43 entry_points={
44 'console_scripts': [
45 'dbt = dbt.main:main',
46 ],
47 },
48 scripts=[
49 'scripts/dbt',
50 ],
51 install_requires=[
52 'Jinja2==2.11.3',
53 'agate>=1.6,<1.6.2',
54 'click>=8,<9',
55 'colorama>=0.3.9,<0.4.5',
56 'dataclasses>=0.6,<0.9;python_version<"3.7"',
57 'hologram==0.0.14',
58 'isodate>=0.6,<0.7',
59 'logbook>=1.5,<1.6',
60 'mashumaro==2.5',
61 'minimal-snowplow-tracker==0.0.2',
62 'networkx>=2.3,<3',
63 'packaging>=20.9,<22.0',
64 'sqlparse>=0.2.3,<0.5',
65 'dbt-extractor==0.4.0',
66 'typing-extensions>=3.7.4,<3.11',
67 'werkzeug>=1,<3',
68 # the following are all to match snowflake-connector-python
69 'requests<3.0.0',
70 'idna>=2.5,<4',
71 'cffi>=1.9,<2.0.0',
72 ],
73 zip_safe=False,
74 classifiers=[
75 'Development Status :: 5 - Production/Stable',
76
77 'License :: OSI Approved :: Apache Software License',
78
79 'Operating System :: Microsoft :: Windows',
80 'Operating System :: MacOS :: MacOS X',
81 'Operating System :: POSIX :: Linux',
82
83 'Programming Language :: Python :: 3.6',
84 'Programming Language :: Python :: 3.7',
85 'Programming Language :: Python :: 3.8',
86 'Programming Language :: Python :: 3.9',
87 ],
88 python_requires=">=3.6.3",
89 )
90
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -19,21 +19,23 @@
sys.exit(1)
-def read(fname):
- return open(os.path.join(os.path.dirname(__file__), fname)).read()
+this_directory = os.path.abspath(os.path.dirname(__file__))
+with open(os.path.join(this_directory, 'README.md')) as f:
+ long_description = f.read()
package_name = "dbt-core"
package_version = "1.0.0b2"
-description = """dbt (data build tool) is a command line tool that helps \
-analysts and engineers transform data in their warehouse more effectively"""
+description = """With dbt, data analysts and engineers can build analytics \
+the way engineers build applications."""
setup(
name=package_name,
version=package_version,
description=description,
- long_description=description,
+ long_description=long_description,
+ long_description_content_type='text/markdown',
author="dbt Labs",
author_email="[email protected]",
url="https://github.com/dbt-labs/dbt-core",
diff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py
--- a/plugins/postgres/setup.py
+++ b/plugins/postgres/setup.py
@@ -54,7 +54,7 @@
name=package_name,
version=package_version,
description=description,
- long_description=description,
+ long_description=long_description,
long_description_content_type='text/markdown',
author="dbt Labs",
author_email="[email protected]",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -2,6 +2,18 @@
import os
import sys
+
+if 'sdist' not in sys.argv:
+ print('')
+ print('As of v1.0.0, `pip install dbt` is no longer supported.')
+ print('Instead, please use either:')
+ print(' - `pip install dbt-core`, for core functionality')
+ print(' - `pip install dbt-<adapter>`, to use dbt with your database, platform, or query engine')
+ print('See full list: https://docs.getdbt.com/docs/available-adapters')
+ print('')
+ sys.exit(1)
+
+
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
@@ -40,14 +52,9 @@
author="dbt Labs",
author_email="[email protected]",
url="https://github.com/dbt-labs/dbt-core",
- packages=[],
- install_requires=[
- 'dbt-core=={}'.format(package_version),
- 'dbt-postgres=={}'.format(package_version),
- ],
zip_safe=False,
classifiers=[
- 'Development Status :: 5 - Production/Stable',
+ 'Development Status :: 7 - Inactive',
'License :: OSI Approved :: Apache Software License',
|
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -19,21 +19,23 @@\n sys.exit(1)\n \n \n-def read(fname):\n- return open(os.path.join(os.path.dirname(__file__), fname)).read()\n+this_directory = os.path.abspath(os.path.dirname(__file__))\n+with open(os.path.join(this_directory, 'README.md')) as f:\n+ long_description = f.read()\n \n \n package_name = \"dbt-core\"\n package_version = \"1.0.0b2\"\n-description = \"\"\"dbt (data build tool) is a command line tool that helps \\\n-analysts and engineers transform data in their warehouse more effectively\"\"\"\n+description = \"\"\"With dbt, data analysts and engineers can build analytics \\\n+the way engineers build applications.\"\"\"\n \n \n setup(\n name=package_name,\n version=package_version,\n description=description,\n- long_description=description,\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\ndiff --git a/plugins/postgres/setup.py b/plugins/postgres/setup.py\n--- a/plugins/postgres/setup.py\n+++ b/plugins/postgres/setup.py\n@@ -54,7 +54,7 @@\n name=package_name,\n version=package_version,\n description=description,\n- long_description=description,\n+ long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -2,6 +2,18 @@\n import os\n import sys\n \n+\n+if 'sdist' not in sys.argv:\n+ print('')\n+ print('As of v1.0.0, `pip install dbt` is no longer supported.')\n+ print('Instead, please use either:')\n+ print(' - `pip install dbt-core`, for core functionality')\n+ print(' - `pip install dbt-<adapter>`, to use dbt with your database, platform, or query engine')\n+ print('See full list: https://docs.getdbt.com/docs/available-adapters')\n+ print('')\n+ sys.exit(1)\n+\n+\n if sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n@@ -40,14 +52,9 @@\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n- packages=[],\n- install_requires=[\n- 'dbt-core=={}'.format(package_version),\n- 'dbt-postgres=={}'.format(package_version),\n- ],\n zip_safe=False,\n classifiers=[\n- 'Development Status :: 5 - Production/Stable',\n+ 'Development Status :: 7 - Inactive',\n \n 'License :: OSI Approved :: Apache Software License',\n", "issue": "What should `pip install dbt` do after v1?\nSplit off from https://github.com/dbt-labs/dbt-core/issues/3968\r\n\r\nEveryone will be able and encouraged to do the following after v1:\r\n- `pip install dbt-core`\r\n- `pip install dbt-<adapter>`\r\n\r\nThe big question is, what to do with the PyPi package named `dbt`, defined in this repo as a thing _separate_ from `dbt-core`? Note that this is just a cosmetic consideration\u2014it shouldn't change anything about how we're packaging/distributing the underlying components\u2014we just know that a lot of people are still using `pip install dbt`. Starting in v1, this could:\r\n\r\n1. **Full backwards compatibility, with warning:** Raise a deprecation warning, then install `dbt-core`, `dbt-postgres`, `dbt-redshift`, `dbt-bigquery`. This might be tricky if we've released a newer patch/prerelease of dbt-core than the others, but I think our compatibility operator (`~=`) could work here\r\n2. **Very limited backwards compatibility, with warning**: Raise a deprecation warning, then install only the code contained in this repository (`dbt-core` + `dbt-postgres`, or `dbt-core` only), knowing that most people will encounter errors and have to switch)\r\n3. **Raise an explicit error:** If all of that is too tricky to figure out, we should keep it simple and just raise a good old fashioned error message when someone tries `pip install dbt` (unqualified) or `pip install dbt==1.0.0`: \"Going forward, you must install dbt-<adapter`\"\r\n\r\nI'm leaning toward the first option right now. I'm very open to other opinions.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\nPSYCOPG2_MESSAGE = '''\nNo package name override was set.\nUsing 'psycopg2-binary' package to satisfy 'psycopg2'\n\nIf you experience segmentation faults, silent crashes, or installation errors,\nconsider retrying with the 'DBT_PSYCOPG2_NAME' environment variable set to\n'psycopg2'. It may require a compiler toolchain and development libraries!\n'''.strip()\n\n\ndef _dbt_psycopg2_name():\n # if the user chose something, use that\n package_name = os.getenv('DBT_PSYCOPG2_NAME', '')\n if package_name:\n return package_name\n\n # default to psycopg2-binary for all OSes/versions\n print(PSYCOPG2_MESSAGE)\n return 'psycopg2-binary'\n\n\npackage_name = \"dbt-postgres\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"The postgres adpter plugin for dbt (data build tool)\"\"\"\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\nDBT_PSYCOPG2_NAME = _dbt_psycopg2_name()\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n long_description_content_type='text/markdown',\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n package_data={\n 'dbt': [\n 'include/postgres/dbt_project.yml',\n 'include/postgres/sample_profiles.yml',\n 'include/postgres/macros/*.sql',\n 'include/postgres/macros/**/*.sql',\n ]\n },\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n '{}~=2.8'.format(DBT_PSYCOPG2_NAME),\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "plugins/postgres/setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, 'README.md')) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n\n description=description,\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=[],\n install_requires=[\n 'dbt-core=={}'.format(package_version),\n 'dbt-postgres=={}'.format(package_version),\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.2\",\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 6):\n print('Error: dbt does not support this version of Python.')\n print('Please upgrade to Python 3.6 or higher.')\n sys.exit(1)\n\n\nfrom setuptools import setup\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print('Error: dbt requires setuptools v40.1.0 or higher.')\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" '\n 'and try again')\n sys.exit(1)\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.0.0b2\"\ndescription = \"\"\"dbt (data build tool) is a command line tool that helps \\\nanalysts and engineers transform data in their warehouse more effectively\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=['dbt', 'dbt.*']),\n include_package_data = True,\n test_suite='test',\n entry_points={\n 'console_scripts': [\n 'dbt = dbt.main:main',\n ],\n },\n scripts=[\n 'scripts/dbt',\n ],\n install_requires=[\n 'Jinja2==2.11.3',\n 'agate>=1.6,<1.6.2',\n 'click>=8,<9',\n 'colorama>=0.3.9,<0.4.5',\n 'dataclasses>=0.6,<0.9;python_version<\"3.7\"',\n 'hologram==0.0.14',\n 'isodate>=0.6,<0.7',\n 'logbook>=1.5,<1.6',\n 'mashumaro==2.5',\n 'minimal-snowplow-tracker==0.0.2',\n 'networkx>=2.3,<3',\n 'packaging>=20.9,<22.0',\n 'sqlparse>=0.2.3,<0.5',\n 'dbt-extractor==0.4.0',\n 'typing-extensions>=3.7.4,<3.11',\n 'werkzeug>=1,<3',\n # the following are all to match snowflake-connector-python\n 'requests<3.0.0',\n 'idna>=2.5,<4',\n 'cffi>=1.9,<2.0.0',\n ],\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n\n 'License :: OSI Approved :: Apache Software License',\n\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX :: Linux',\n\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n python_requires=\">=3.6.3\",\n)\n", "path": "core/setup.py"}]}
| 3,280 | 698 |
gh_patches_debug_34386
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-1678
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use function annotation syntax for Type Hints.
After dropping Python 2.7 support at #710, we can define type hints with function annotation syntax.
~~Do you have a plan to update the coding style guideline?~~
https://github.com/optuna/optuna/wiki/Coding-Style-Conventions
## Progress
- [x] `optuna/integration/sklearn.py` (#1735)
- [x] `optuna/study.py` - assigned to harpy
## Note to the questioner
We still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5.
</issue>
<code>
[start of optuna/samplers/_random.py]
1 import numpy
2
3 from optuna import distributions
4 from optuna.samplers import BaseSampler
5 from optuna import type_checking
6
7 if type_checking.TYPE_CHECKING:
8 from typing import Any # NOQA
9 from typing import Dict # NOQA
10 from typing import Optional # NOQA
11
12 from optuna.distributions import BaseDistribution # NOQA
13 from optuna.study import Study # NOQA
14 from optuna.trial import FrozenTrial # NOQA
15
16
17 class RandomSampler(BaseSampler):
18 """Sampler using random sampling.
19
20 This sampler is based on *independent sampling*.
21 See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.
22
23 Example:
24
25 .. testcode::
26
27 import optuna
28 from optuna.samplers import RandomSampler
29
30 def objective(trial):
31 x = trial.suggest_uniform('x', -5, 5)
32 return x**2
33
34 study = optuna.create_study(sampler=RandomSampler())
35 study.optimize(objective, n_trials=10)
36
37 Args:
38 seed: Seed for random number generator.
39 """
40
41 def __init__(self, seed=None):
42 # type: (Optional[int]) -> None
43
44 self._rng = numpy.random.RandomState(seed)
45
46 def reseed_rng(self) -> None:
47
48 self._rng = numpy.random.RandomState()
49
50 def infer_relative_search_space(self, study, trial):
51 # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
52
53 return {}
54
55 def sample_relative(self, study, trial, search_space):
56 # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
57
58 return {}
59
60 def sample_independent(self, study, trial, param_name, param_distribution):
61 # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any
62
63 if isinstance(param_distribution, distributions.UniformDistribution):
64 return self._rng.uniform(param_distribution.low, param_distribution.high)
65 elif isinstance(param_distribution, distributions.LogUniformDistribution):
66 log_low = numpy.log(param_distribution.low)
67 log_high = numpy.log(param_distribution.high)
68 return float(numpy.exp(self._rng.uniform(log_low, log_high)))
69 elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):
70 q = param_distribution.q
71 r = param_distribution.high - param_distribution.low
72 # [low, high] is shifted to [0, r] to align sampled values at regular intervals.
73 low = 0 - 0.5 * q
74 high = r + 0.5 * q
75 s = self._rng.uniform(low, high)
76 v = numpy.round(s / q) * q + param_distribution.low
77 # v may slightly exceed range due to round-off errors.
78 return float(min(max(v, param_distribution.low), param_distribution.high))
79 elif isinstance(param_distribution, distributions.IntUniformDistribution):
80 # [low, high] is shifted to [0, r] to align sampled values at regular intervals.
81 r = (param_distribution.high - param_distribution.low) / param_distribution.step
82 # numpy.random.randint includes low but excludes high.
83 s = self._rng.randint(0, r + 1)
84 v = s * param_distribution.step + param_distribution.low
85 return int(v)
86 elif isinstance(param_distribution, distributions.IntLogUniformDistribution):
87 log_low = numpy.log(param_distribution.low - 0.5)
88 log_high = numpy.log(param_distribution.high + 0.5)
89 s = numpy.exp(self._rng.uniform(log_low, log_high))
90 v = numpy.round(s)
91 return int(min(max(v, param_distribution.low), param_distribution.high))
92 elif isinstance(param_distribution, distributions.CategoricalDistribution):
93 choices = param_distribution.choices
94 index = self._rng.randint(0, len(choices))
95 return choices[index]
96 else:
97 raise NotImplementedError
98
[end of optuna/samplers/_random.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/samplers/_random.py b/optuna/samplers/_random.py
--- a/optuna/samplers/_random.py
+++ b/optuna/samplers/_random.py
@@ -1,17 +1,14 @@
+from typing import Any
+from typing import Dict
+from typing import Optional
+
import numpy
from optuna import distributions
+from optuna.distributions import BaseDistribution
from optuna.samplers import BaseSampler
-from optuna import type_checking
-
-if type_checking.TYPE_CHECKING:
- from typing import Any # NOQA
- from typing import Dict # NOQA
- from typing import Optional # NOQA
-
- from optuna.distributions import BaseDistribution # NOQA
- from optuna.study import Study # NOQA
- from optuna.trial import FrozenTrial # NOQA
+from optuna.study import Study
+from optuna.trial import FrozenTrial
class RandomSampler(BaseSampler):
@@ -38,8 +35,7 @@
seed: Seed for random number generator.
"""
- def __init__(self, seed=None):
- # type: (Optional[int]) -> None
+ def __init__(self, seed: Optional[int] = None) -> None:
self._rng = numpy.random.RandomState(seed)
@@ -47,18 +43,25 @@
self._rng = numpy.random.RandomState()
- def infer_relative_search_space(self, study, trial):
- # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]
+ def infer_relative_search_space(
+ self, study: Study, trial: FrozenTrial
+ ) -> Dict[str, BaseDistribution]:
return {}
- def sample_relative(self, study, trial, search_space):
- # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]
+ def sample_relative(
+ self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
+ ) -> Dict[str, Any]:
return {}
- def sample_independent(self, study, trial, param_name, param_distribution):
- # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any
+ def sample_independent(
+ self,
+ study: Study,
+ trial: FrozenTrial,
+ param_name: str,
+ param_distribution: distributions.BaseDistribution,
+ ) -> Any:
if isinstance(param_distribution, distributions.UniformDistribution):
return self._rng.uniform(param_distribution.low, param_distribution.high)
|
{"golden_diff": "diff --git a/optuna/samplers/_random.py b/optuna/samplers/_random.py\n--- a/optuna/samplers/_random.py\n+++ b/optuna/samplers/_random.py\n@@ -1,17 +1,14 @@\n+from typing import Any\n+from typing import Dict\n+from typing import Optional\n+\n import numpy\n \n from optuna import distributions\n+from optuna.distributions import BaseDistribution\n from optuna.samplers import BaseSampler\n-from optuna import type_checking\n-\n-if type_checking.TYPE_CHECKING:\n- from typing import Any # NOQA\n- from typing import Dict # NOQA\n- from typing import Optional # NOQA\n-\n- from optuna.distributions import BaseDistribution # NOQA\n- from optuna.study import Study # NOQA\n- from optuna.trial import FrozenTrial # NOQA\n+from optuna.study import Study\n+from optuna.trial import FrozenTrial\n \n \n class RandomSampler(BaseSampler):\n@@ -38,8 +35,7 @@\n seed: Seed for random number generator.\n \"\"\"\n \n- def __init__(self, seed=None):\n- # type: (Optional[int]) -> None\n+ def __init__(self, seed: Optional[int] = None) -> None:\n \n self._rng = numpy.random.RandomState(seed)\n \n@@ -47,18 +43,25 @@\n \n self._rng = numpy.random.RandomState()\n \n- def infer_relative_search_space(self, study, trial):\n- # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n+ def infer_relative_search_space(\n+ self, study: Study, trial: FrozenTrial\n+ ) -> Dict[str, BaseDistribution]:\n \n return {}\n \n- def sample_relative(self, study, trial, search_space):\n- # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n+ def sample_relative(\n+ self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n+ ) -> Dict[str, Any]:\n \n return {}\n \n- def sample_independent(self, study, trial, param_name, param_distribution):\n- # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any\n+ def sample_independent(\n+ self,\n+ study: Study,\n+ trial: FrozenTrial,\n+ param_name: str,\n+ param_distribution: distributions.BaseDistribution,\n+ ) -> Any:\n \n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._rng.uniform(param_distribution.low, param_distribution.high)\n", "issue": "Use function annotation syntax for Type Hints.\nAfter dropping Python 2.7 support at #710, we can define type hints with function annotation syntax. \r\n~~Do you have a plan to update the coding style guideline?~~\r\nhttps://github.com/optuna/optuna/wiki/Coding-Style-Conventions\r\n\r\n## Progress\r\n\r\n- [x] `optuna/integration/sklearn.py` (#1735)\r\n- [x] `optuna/study.py` - assigned to harpy\r\n\r\n## Note to the questioner\r\n\r\nWe still cannot use variable annotation syntax introduced by [PEP 526](https://www.python.org/dev/peps/pep-0526/) because we supports Python 3.5.\n", "before_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers import BaseSampler\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.study import Study # NOQA\n from optuna.trial import FrozenTrial # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n This sampler is based on *independent sampling*.\n See also :class:`~optuna.samplers.BaseSampler` for more details of 'independent sampling'.\n\n Example:\n\n .. testcode::\n\n import optuna\n from optuna.samplers import RandomSampler\n\n def objective(trial):\n x = trial.suggest_uniform('x', -5, 5)\n return x**2\n\n study = optuna.create_study(sampler=RandomSampler())\n study.optimize(objective, n_trials=10)\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self._rng = numpy.random.RandomState(seed)\n\n def reseed_rng(self) -> None:\n\n self._rng = numpy.random.RandomState()\n\n def infer_relative_search_space(self, study, trial):\n # type: (Study, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (Study, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (Study, FrozenTrial, str, distributions.BaseDistribution) -> Any\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self._rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self._rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self._rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n r = (param_distribution.high - param_distribution.low) / param_distribution.step\n # numpy.random.randint includes low but excludes high.\n s = self._rng.randint(0, r + 1)\n v = s * param_distribution.step + param_distribution.low\n return int(v)\n elif isinstance(param_distribution, distributions.IntLogUniformDistribution):\n log_low = numpy.log(param_distribution.low - 0.5)\n log_high = numpy.log(param_distribution.high + 0.5)\n s = numpy.exp(self._rng.uniform(log_low, log_high))\n v = numpy.round(s)\n return int(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n index = self._rng.randint(0, len(choices))\n return choices[index]\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/_random.py"}]}
| 1,726 | 583 |
gh_patches_debug_41921
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-1862
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_url_for_time_range function in stereo.py in dataretriever not working correctly.
The following query:-
``` python
from sunpy.time.timerange import TimeRange
from sunpy.net.vso.attrs import Time, Instrument
from sunpy.net.dataretriever.client import QueryResponse
import sunpy.net.dataretriever.sources.stereo as stereo
LCClient = stereo.HETClient()
urls = LCClient._get_url_for_timerange(TimeRange('2008/12/01','2010/12/01'),'ahead', 15*u.min)
```
Should return a non-empty list of urls but instead returns an empty list. Possible problem stems from the implementation of scraper.py in sunpy.util. The scraper doesn't work as intended on http://www.srl.caltech.edu/STEREO/DATA/HET.
</issue>
<code>
[start of sunpy/util/scraper.py]
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import datetime
5 import re
6
7 from bs4 import BeautifulSoup
8 from sunpy.extern import six
9 from sunpy.extern.six.moves import range, zip
10 from sunpy.extern.six.moves.urllib.request import urlopen
11
12 __all__ = ['Scraper']
13
14 # regular expressions to convert datetime format
15 TIME_CONVERSIONS = {'%Y': '\d{4}', '%y': '\d{2}',
16 '%b': '[A-Z]..', '%B': '\W', '%m': '\d{2}',
17 '%d': '\d{2}', '%j': '\d{3}',
18 '%H': '\d{2}', '%I': '\d{2}',
19 '%M': '\d{2}',
20 '%S': '\d{2}'}
21
22 class Scraper(object):
23 """
24 A Scraper to scrap web data archives based on dates.
25
26 Parameters
27 ----------
28 pattern : string
29 A string containing the url with the date encoded as
30 datetime formats, and any other parameter as kwargs
31 as string format.
32
33 Attributes
34 ----------
35 pattern : string
36 A converted string with the kwargs.
37 now : datetime.datetime
38 The pattern with the actual date.
39
40 Examples
41 --------
42 >>> # Downloading data from SolarMonitor.org
43 >>> from sunpy.util.scraper import Scraper
44 >>> solmon_pattern = ('http://solarmonitor.org/data/'
45 '%Y/%m/%d/fits/{instrument}/'
46 '{instrument}_{wave:05d}_fd_%Y%m%d_%H%M%S.fts.gz')
47 >>> solmon = Scraper(solmon_pattern, instrument = 'swap', wave = 174)
48 >>> print(solmon.pattern)
49 http://solarmonitor.org/data/%Y/%m/%d/fits/swap/swap_00174_fd_%Y%m%d_%H%M%S.fts.gz
50 >>> print(solmon.now)
51 http://solarmonitor.org/data/2012/01/25/fits/swap/swap_00174_fd_20120125_173301.fts.gz
52
53 Notes
54 -----
55 The now attribute does not return an existent file, but just how the
56 pattern looks with the actual time.
57 """
58 def __init__(self, pattern, **kwargs):
59 self.pattern = pattern.format(**kwargs)
60 self.now = datetime.datetime.now().strftime(self.pattern)
61
62 def matches(self, filepath, date):
63 return date.strftime(self.pattern) == filepath
64
65 def range(self, timerange):
66 """
67 Gets the directories for a certain range of time
68 (i.e. using `~sunpy.time.TimeRange`).
69
70 Parameters
71 ----------
72
73 timerange : `~sunpy.time.timerange.TimeRange`
74 Time interval where to find the directories for a given
75 pattern.
76
77 Returns
78 -------
79
80 directories : list of strings
81 List of all the possible directories valid for the time
82 range given. Notice that these directories may not exist
83 in the archive.
84 """
85 #find directory structure - without file names
86 directorypattern = os.path.dirname(self.pattern) + '/'
87 #TODO what if there's not slashes?
88 rangedelta = timerange.dt
89 timestep = self._smallerPattern(directorypattern)
90 if timestep is None:
91 return [directorypattern]
92 else:
93 # Number of elements in the time range (including end)
94 n_steps = rangedelta.total_seconds()/timestep.total_seconds()
95 TotalTimeElements = int(round(n_steps)) + 1
96 directories = [(timerange.start + n * timestep).strftime(directorypattern)
97 for n in range(TotalTimeElements)] #todo if date <= endate
98 return directories
99
100 def _URL_followsPattern(self, url):
101 """Check whether the url provided follows the pattern"""
102 pattern = self.pattern
103 for k,v in six.iteritems(TIME_CONVERSIONS):
104 pattern = pattern.replace(k, v)
105 matches = re.match(pattern, url)
106 if matches:
107 return matches.end() == matches.endpos == len(self.now)
108 return False
109
110 def _extractDateURL(self, url):
111 """Extracts the date from a particular url following the pattern"""
112 # url_to_list substitutes '.' and '_' for '/' to then create
113 # a list of all the blocks in times - assuming they are all
114 # separated with either '.', '_' or '/'
115 url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
116 pattern_list = url_to_list(self.pattern)
117 url_list = url_to_list(url)
118
119 time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',
120 '%H', '%I', '%M', '%S']
121 final_date = []
122 final_pattern = []
123 # Find in directory and filename
124 for pattern_elem, url_elem in zip(pattern_list, url_list):
125 time_formats = [x for x in time_order if x in pattern_elem]
126 if len(time_formats) > 0:
127 final_date.append(url_elem)
128 final_pattern.append(pattern_elem)
129 for time_bit in time_formats:
130 time_order.remove(time_bit)
131 # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']
132 # Make all as single strings
133 date_together = ''.join(final_date)
134 pattern_together = ''.join(final_pattern)
135 re_together = pattern_together
136 for k, v in six.iteritems(TIME_CONVERSIONS):
137 re_together = re_together.replace(k, v)
138
139 # Create new empty lists
140 final_date = list()
141 final_pattern = list()
142 for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\')[1:]):
143 regexp = '\\{}'.format(r)
144 pattern = '%{}'.format(p)
145 date_part = re.match(regexp, date_together)
146 date_together = date_together[:date_part.start()] + \
147 date_together[date_part.end():]
148 if pattern not in final_pattern:
149 final_pattern.append('%{}'.format(p))
150 final_date.append(date_part.group())
151 return datetime.datetime.strptime(' '.join(final_date),
152 ' '.join(final_pattern))
153
154 def filelist(self, timerange):
155 """
156 Returns the list of existent files in the archive for the
157 given time range.
158
159 Parameters
160 ----------
161
162 timerange : `~sunpy.time.TimeRange`
163 Time interval where to find the directories for a given
164 pattern.
165
166 Returns
167 -------
168
169 filesurls : list of strings
170 List of all the files found between the time range given.
171
172 Examples
173 --------
174 >>> from sunpy.time import TimeRange
175 >>> timerange = TimeRange('2015-01-01','2015-01-01T16:00:00')
176 >>> print(solmon.filelist(timerange))
177 ['http://solarmonitor.org/data/2015/01/01/fits/swap/swap_00174_fd_20150101_025423.fts.gz']
178 """
179 directories = self.range(timerange)
180 filesurls = []
181 for directory in directories:
182 try:
183 opn = urlopen(directory)
184 try:
185 soup = BeautifulSoup(opn)
186 for link in soup.find_all("a"):
187 href = link.get("href")
188 if href.endswith(self.pattern.split('.')[-1]):
189 fullpath = directory + href
190 if self._URL_followsPattern(fullpath):
191 datehref = self._extractDateURL(fullpath)
192 if (datehref >= timerange.start and
193 datehref <= timerange.end):
194 filesurls.append(fullpath)
195 finally:
196 opn.close()
197 except:
198 pass
199 return filesurls
200
201 def _smallerPattern(self, directoryPattern):
202 """Obtain the smaller time step for the given pattern"""
203 try:
204 if "%S" in directoryPattern:
205 return datetime.timedelta(seconds=1)
206 elif "%M" in directoryPattern:
207 return datetime.timedelta(minutes=1)
208 elif any(hour in directoryPattern for hour in ["%H", "%I"]):
209 return datetime.timedelta(hours=1)
210 elif any(day in directoryPattern for day in ["%d", "%j"]):
211 return datetime.timedelta(days=1)
212 elif any(month in directoryPattern for month in ["%b","%B","%m"]):
213 return datetime.timedelta(days=31)
214 elif any(year in directoryPattern for year in ["%Y", "%y"]):
215 return datetime.timedelta(days=365)
216 else:
217 return None
218 except:
219 raise
220
221
[end of sunpy/util/scraper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sunpy/util/scraper.py b/sunpy/util/scraper.py
--- a/sunpy/util/scraper.py
+++ b/sunpy/util/scraper.py
@@ -12,12 +12,13 @@
__all__ = ['Scraper']
# regular expressions to convert datetime format
+# added `%e` as for milliseconds `%f/1000`
TIME_CONVERSIONS = {'%Y': '\d{4}', '%y': '\d{2}',
- '%b': '[A-Z]..', '%B': '\W', '%m': '\d{2}',
+ '%b': '[A-Z][a-z]{2}', '%B': '\W', '%m': '\d{2}',
'%d': '\d{2}', '%j': '\d{3}',
'%H': '\d{2}', '%I': '\d{2}',
'%M': '\d{2}',
- '%S': '\d{2}'}
+ '%S': '\d{2}', '%e': '\d{3}', '%f': '\d{6}'}
class Scraper(object):
"""
@@ -57,7 +58,13 @@
"""
def __init__(self, pattern, **kwargs):
self.pattern = pattern.format(**kwargs)
- self.now = datetime.datetime.now().strftime(self.pattern)
+ milliseconds = re.search('\%e', self.pattern)
+ if not milliseconds:
+ self.now = datetime.datetime.now().strftime(self.pattern)
+ else:
+ now = datetime.datetime.now()
+ milliseconds_ = int(now.microsecond / 1000.)
+ self.now = now.strftime(self.pattern[0:milliseconds.start()] + str(milliseconds_) + self.pattern[milliseconds.end():])
def matches(self, filepath, date):
return date.strftime(self.pattern) == filepath
@@ -115,9 +122,8 @@
url_to_list = lambda txt: re.sub(r'\.|_', '/', txt).split('/')
pattern_list = url_to_list(self.pattern)
url_list = url_to_list(url)
-
time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',
- '%H', '%I', '%M', '%S']
+ '%H', '%I', '%M', '%S', '%e', '%f']
final_date = []
final_pattern = []
# Find in directory and filename
@@ -139,10 +145,13 @@
# Create new empty lists
final_date = list()
final_pattern = list()
+ re_together = re_together.replace('[A-Z]', '\\[A-Z]')
for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\')[1:]):
- regexp = '\\{}'.format(r)
+ if p == 'e':
+ continue
+ regexp = '\\{}'.format(r) if not r.startswith('[') else r
pattern = '%{}'.format(p)
- date_part = re.match(regexp, date_together)
+ date_part = re.search(regexp, date_together)
date_together = date_together[:date_part.start()] + \
date_together[date_part.end():]
if pattern not in final_pattern:
@@ -182,7 +191,7 @@
try:
opn = urlopen(directory)
try:
- soup = BeautifulSoup(opn)
+ soup = BeautifulSoup(opn, "lxml")
for link in soup.find_all("a"):
href = link.get("href")
if href.endswith(self.pattern.split('.')[-1]):
|
{"golden_diff": "diff --git a/sunpy/util/scraper.py b/sunpy/util/scraper.py\n--- a/sunpy/util/scraper.py\n+++ b/sunpy/util/scraper.py\n@@ -12,12 +12,13 @@\n __all__ = ['Scraper']\n \n # regular expressions to convert datetime format\n+# added `%e` as for milliseconds `%f/1000`\n TIME_CONVERSIONS = {'%Y': '\\d{4}', '%y': '\\d{2}',\n- '%b': '[A-Z]..', '%B': '\\W', '%m': '\\d{2}',\n+ '%b': '[A-Z][a-z]{2}', '%B': '\\W', '%m': '\\d{2}',\n '%d': '\\d{2}', '%j': '\\d{3}',\n '%H': '\\d{2}', '%I': '\\d{2}',\n '%M': '\\d{2}',\n- '%S': '\\d{2}'}\n+ '%S': '\\d{2}', '%e': '\\d{3}', '%f': '\\d{6}'}\n \n class Scraper(object):\n \"\"\"\n@@ -57,7 +58,13 @@\n \"\"\"\n def __init__(self, pattern, **kwargs):\n self.pattern = pattern.format(**kwargs)\n- self.now = datetime.datetime.now().strftime(self.pattern)\n+ milliseconds = re.search('\\%e', self.pattern)\n+ if not milliseconds:\n+ self.now = datetime.datetime.now().strftime(self.pattern)\n+ else:\n+ now = datetime.datetime.now()\n+ milliseconds_ = int(now.microsecond / 1000.)\n+ self.now = now.strftime(self.pattern[0:milliseconds.start()] + str(milliseconds_) + self.pattern[milliseconds.end():])\n \n def matches(self, filepath, date):\n return date.strftime(self.pattern) == filepath\n@@ -115,9 +122,8 @@\n url_to_list = lambda txt: re.sub(r'\\.|_', '/', txt).split('/')\n pattern_list = url_to_list(self.pattern)\n url_list = url_to_list(url)\n-\n time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',\n- '%H', '%I', '%M', '%S']\n+ '%H', '%I', '%M', '%S', '%e', '%f']\n final_date = []\n final_pattern = []\n # Find in directory and filename\n@@ -139,10 +145,13 @@\n # Create new empty lists\n final_date = list()\n final_pattern = list()\n+ re_together = re_together.replace('[A-Z]', '\\\\[A-Z]')\n for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\\\')[1:]):\n- regexp = '\\\\{}'.format(r)\n+ if p == 'e':\n+ continue\n+ regexp = '\\\\{}'.format(r) if not r.startswith('[') else r\n pattern = '%{}'.format(p)\n- date_part = re.match(regexp, date_together)\n+ date_part = re.search(regexp, date_together)\n date_together = date_together[:date_part.start()] + \\\n date_together[date_part.end():]\n if pattern not in final_pattern:\n@@ -182,7 +191,7 @@\n try:\n opn = urlopen(directory)\n try:\n- soup = BeautifulSoup(opn)\n+ soup = BeautifulSoup(opn, \"lxml\")\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if href.endswith(self.pattern.split('.')[-1]):\n", "issue": "get_url_for_time_range function in stereo.py in dataretriever not working correctly.\nThe following query:-\n\n``` python\nfrom sunpy.time.timerange import TimeRange\nfrom sunpy.net.vso.attrs import Time, Instrument\nfrom sunpy.net.dataretriever.client import QueryResponse\nimport sunpy.net.dataretriever.sources.stereo as stereo\n\nLCClient = stereo.HETClient()\nurls = LCClient._get_url_for_timerange(TimeRange('2008/12/01','2010/12/01'),'ahead', 15*u.min)\n\n```\n\nShould return a non-empty list of urls but instead returns an empty list. Possible problem stems from the implementation of scraper.py in sunpy.util. The scraper doesn't work as intended on http://www.srl.caltech.edu/STEREO/DATA/HET.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport datetime\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom sunpy.extern import six\nfrom sunpy.extern.six.moves import range, zip\nfrom sunpy.extern.six.moves.urllib.request import urlopen\n\n__all__ = ['Scraper']\n\n# regular expressions to convert datetime format\nTIME_CONVERSIONS = {'%Y': '\\d{4}', '%y': '\\d{2}',\n '%b': '[A-Z]..', '%B': '\\W', '%m': '\\d{2}',\n '%d': '\\d{2}', '%j': '\\d{3}',\n '%H': '\\d{2}', '%I': '\\d{2}',\n '%M': '\\d{2}',\n '%S': '\\d{2}'}\n\nclass Scraper(object):\n \"\"\"\n A Scraper to scrap web data archives based on dates.\n\n Parameters\n ----------\n pattern : string\n A string containing the url with the date encoded as\n datetime formats, and any other parameter as kwargs\n as string format.\n\n Attributes\n ----------\n pattern : string\n A converted string with the kwargs.\n now : datetime.datetime\n The pattern with the actual date.\n\n Examples\n --------\n >>> # Downloading data from SolarMonitor.org\n >>> from sunpy.util.scraper import Scraper\n >>> solmon_pattern = ('http://solarmonitor.org/data/'\n '%Y/%m/%d/fits/{instrument}/'\n '{instrument}_{wave:05d}_fd_%Y%m%d_%H%M%S.fts.gz')\n >>> solmon = Scraper(solmon_pattern, instrument = 'swap', wave = 174)\n >>> print(solmon.pattern)\n http://solarmonitor.org/data/%Y/%m/%d/fits/swap/swap_00174_fd_%Y%m%d_%H%M%S.fts.gz\n >>> print(solmon.now)\n http://solarmonitor.org/data/2012/01/25/fits/swap/swap_00174_fd_20120125_173301.fts.gz\n\n Notes\n -----\n The now attribute does not return an existent file, but just how the\n pattern looks with the actual time.\n \"\"\"\n def __init__(self, pattern, **kwargs):\n self.pattern = pattern.format(**kwargs)\n self.now = datetime.datetime.now().strftime(self.pattern)\n\n def matches(self, filepath, date):\n return date.strftime(self.pattern) == filepath\n\n def range(self, timerange):\n \"\"\"\n Gets the directories for a certain range of time\n (i.e. using `~sunpy.time.TimeRange`).\n\n Parameters\n ----------\n\n timerange : `~sunpy.time.timerange.TimeRange`\n Time interval where to find the directories for a given\n pattern.\n\n Returns\n -------\n\n directories : list of strings\n List of all the possible directories valid for the time\n range given. Notice that these directories may not exist\n in the archive.\n \"\"\"\n #find directory structure - without file names\n directorypattern = os.path.dirname(self.pattern) + '/'\n #TODO what if there's not slashes?\n rangedelta = timerange.dt\n timestep = self._smallerPattern(directorypattern)\n if timestep is None:\n return [directorypattern]\n else:\n # Number of elements in the time range (including end)\n n_steps = rangedelta.total_seconds()/timestep.total_seconds()\n TotalTimeElements = int(round(n_steps)) + 1\n directories = [(timerange.start + n * timestep).strftime(directorypattern)\n for n in range(TotalTimeElements)] #todo if date <= endate\n return directories\n\n def _URL_followsPattern(self, url):\n \"\"\"Check whether the url provided follows the pattern\"\"\"\n pattern = self.pattern\n for k,v in six.iteritems(TIME_CONVERSIONS):\n pattern = pattern.replace(k, v)\n matches = re.match(pattern, url)\n if matches:\n return matches.end() == matches.endpos == len(self.now)\n return False\n\n def _extractDateURL(self, url):\n \"\"\"Extracts the date from a particular url following the pattern\"\"\"\n # url_to_list substitutes '.' and '_' for '/' to then create\n # a list of all the blocks in times - assuming they are all\n # separated with either '.', '_' or '/'\n url_to_list = lambda txt: re.sub(r'\\.|_', '/', txt).split('/')\n pattern_list = url_to_list(self.pattern)\n url_list = url_to_list(url)\n\n time_order = ['%Y', '%y', '%b', '%B', '%m', '%d', '%j',\n '%H', '%I', '%M', '%S']\n final_date = []\n final_pattern = []\n # Find in directory and filename\n for pattern_elem, url_elem in zip(pattern_list, url_list):\n time_formats = [x for x in time_order if x in pattern_elem]\n if len(time_formats) > 0:\n final_date.append(url_elem)\n final_pattern.append(pattern_elem)\n for time_bit in time_formats:\n time_order.remove(time_bit)\n # Find and remove repeated elements eg: %Y in ['%Y', '%Y%m%d']\n # Make all as single strings\n date_together = ''.join(final_date)\n pattern_together = ''.join(final_pattern)\n re_together = pattern_together\n for k, v in six.iteritems(TIME_CONVERSIONS):\n re_together = re_together.replace(k, v)\n\n # Create new empty lists\n final_date = list()\n final_pattern = list()\n for p,r in zip(pattern_together.split('%')[1:], re_together.split('\\\\')[1:]):\n regexp = '\\\\{}'.format(r)\n pattern = '%{}'.format(p)\n date_part = re.match(regexp, date_together)\n date_together = date_together[:date_part.start()] + \\\n date_together[date_part.end():]\n if pattern not in final_pattern:\n final_pattern.append('%{}'.format(p))\n final_date.append(date_part.group())\n return datetime.datetime.strptime(' '.join(final_date),\n ' '.join(final_pattern))\n\n def filelist(self, timerange):\n \"\"\"\n Returns the list of existent files in the archive for the\n given time range.\n\n Parameters\n ----------\n\n timerange : `~sunpy.time.TimeRange`\n Time interval where to find the directories for a given\n pattern.\n\n Returns\n -------\n\n filesurls : list of strings\n List of all the files found between the time range given.\n\n Examples\n --------\n >>> from sunpy.time import TimeRange\n >>> timerange = TimeRange('2015-01-01','2015-01-01T16:00:00')\n >>> print(solmon.filelist(timerange))\n ['http://solarmonitor.org/data/2015/01/01/fits/swap/swap_00174_fd_20150101_025423.fts.gz']\n \"\"\"\n directories = self.range(timerange)\n filesurls = []\n for directory in directories:\n try:\n opn = urlopen(directory)\n try:\n soup = BeautifulSoup(opn)\n for link in soup.find_all(\"a\"):\n href = link.get(\"href\")\n if href.endswith(self.pattern.split('.')[-1]):\n fullpath = directory + href\n if self._URL_followsPattern(fullpath):\n datehref = self._extractDateURL(fullpath)\n if (datehref >= timerange.start and\n datehref <= timerange.end):\n filesurls.append(fullpath)\n finally:\n opn.close()\n except:\n pass\n return filesurls\n\n def _smallerPattern(self, directoryPattern):\n \"\"\"Obtain the smaller time step for the given pattern\"\"\"\n try:\n if \"%S\" in directoryPattern:\n return datetime.timedelta(seconds=1)\n elif \"%M\" in directoryPattern:\n return datetime.timedelta(minutes=1)\n elif any(hour in directoryPattern for hour in [\"%H\", \"%I\"]):\n return datetime.timedelta(hours=1)\n elif any(day in directoryPattern for day in [\"%d\", \"%j\"]):\n return datetime.timedelta(days=1)\n elif any(month in directoryPattern for month in [\"%b\",\"%B\",\"%m\"]):\n return datetime.timedelta(days=31)\n elif any(year in directoryPattern for year in [\"%Y\", \"%y\"]):\n return datetime.timedelta(days=365)\n else:\n return None\n except:\n raise\n\n", "path": "sunpy/util/scraper.py"}]}
| 3,189 | 805 |
gh_patches_debug_3298
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-887
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
utils.convert_tensor considers `device = 0` to be no device
## 🐛 Bug description
In utils.convert_tensor, this line appears:
`return tensor.to(device=device, non_blocking=non_blocking) if device else tensor`
This means that for `device = 0` (as returned from `torch.cuda.current_device`) no conversion is applied, which can be very confusing. I might add a PR for that tomorrow, unless people tell me there's a reason to leave that line as it is.
For reproduction:
```python
import torch
from ignite.utils import convert_tensor
mytens = torch.zeros(2)
device = torch.cuda.current_device()
converted_tens = convert_tensor(mytens, device)
assert converted_tens.device == device
```
## Environment
- PyTorch Version (e.g., 1.4): 1.4
- Ignite Version (e.g., 0.3.0): 0.3
- OS (e.g., Linux): Windows 10
- How you installed Ignite (`conda`, `pip`, source): conda
- Python version: 3.7.6
</issue>
<code>
[start of ignite/utils.py]
1 import collections.abc as collections
2 import logging
3 from typing import Union, Optional, Callable, Any, Type, Tuple
4
5 import torch
6
7 __all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger"]
8
9
10 def convert_tensor(
11 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],
12 device: Optional[Union[str, torch.device]] = None,
13 non_blocking: bool = False,
14 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
15 """Move tensors to relevant device."""
16
17 def _func(tensor: torch.Tensor) -> torch.Tensor:
18 return tensor.to(device=device, non_blocking=non_blocking) if device else tensor
19
20 return apply_to_tensor(input_, _func)
21
22
23 def apply_to_tensor(
24 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
25 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
26 """Apply a function on a tensor or mapping, or sequence of tensors.
27 """
28 return apply_to_type(input_, torch.Tensor, func)
29
30
31 def apply_to_type(
32 input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],
33 input_type: Union[Type, Tuple[Type[Any], Any]],
34 func: Callable,
35 ) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:
36 """Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.
37 """
38 if isinstance(input_, input_type):
39 return func(input_)
40 elif isinstance(input_, (str, bytes)):
41 return input_
42 elif isinstance(input_, collections.Mapping):
43 return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})
44 elif isinstance(input_, tuple) and hasattr(input_, "_fields"): # namedtuple
45 return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))
46 elif isinstance(input_, collections.Sequence):
47 return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])
48 else:
49 raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_))))
50
51
52 def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:
53 """Convert a tensor of indices of any shape `(N, ...)` to a
54 tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the
55 input's device`.
56 """
57 onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)
58 return onehot.scatter_(1, indices.unsqueeze(1), 1)
59
60
61 def setup_logger(
62 name: str,
63 level: int = logging.INFO,
64 format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
65 filepath: Optional[str] = None,
66 distributed_rank: int = 0,
67 ) -> logging.Logger:
68 """Setups logger: name, level, format etc.
69
70 Args:
71 name (str): new name for the logger.
72 level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG
73 format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`
74 filepath (str, optional): Optional logging file path. If not None, logs are written to the file.
75 distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.
76
77 Returns:
78 logging.Logger
79
80 For example, to improve logs readability when training with a trainer and evaluator:
81
82 .. code-block:: python
83
84 from ignite.utils import setup_logger
85
86 trainer = ...
87 evaluator = ...
88
89 trainer.logger = setup_logger("trainer")
90 evaluator.logger = setup_logger("evaluator")
91
92 trainer.run(data, max_epochs=10)
93
94 # Logs will look like
95 # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
96 # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
97 # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
98 # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
99 # ...
100
101 """
102 logger = logging.getLogger(name)
103
104 if distributed_rank > 0:
105 return logger
106
107 logger.setLevel(level)
108
109 # Remove previous handlers
110 if logger.hasHandlers():
111 for h in list(logger.handlers):
112 logger.removeHandler(h)
113
114 formatter = logging.Formatter(format)
115
116 ch = logging.StreamHandler()
117 ch.setLevel(level)
118 ch.setFormatter(formatter)
119 logger.addHandler(ch)
120
121 if filepath is not None:
122 fh = logging.FileHandler(filepath)
123 fh.setLevel(level)
124 fh.setFormatter(formatter)
125 logger.addHandler(fh)
126
127 return logger
128
[end of ignite/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/utils.py b/ignite/utils.py
--- a/ignite/utils.py
+++ b/ignite/utils.py
@@ -15,7 +15,7 @@
"""Move tensors to relevant device."""
def _func(tensor: torch.Tensor) -> torch.Tensor:
- return tensor.to(device=device, non_blocking=non_blocking) if device else tensor
+ return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor
return apply_to_tensor(input_, _func)
|
{"golden_diff": "diff --git a/ignite/utils.py b/ignite/utils.py\n--- a/ignite/utils.py\n+++ b/ignite/utils.py\n@@ -15,7 +15,7 @@\n \"\"\"Move tensors to relevant device.\"\"\"\n \n def _func(tensor: torch.Tensor) -> torch.Tensor:\n- return tensor.to(device=device, non_blocking=non_blocking) if device else tensor\n+ return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n \n return apply_to_tensor(input_, _func)\n", "issue": "utils.convert_tensor considers `device = 0` to be no device\n## \ud83d\udc1b Bug description\r\nIn utils.convert_tensor, this line appears:\r\n`return tensor.to(device=device, non_blocking=non_blocking) if device else tensor`\r\n\r\nThis means that for `device = 0` (as returned from `torch.cuda.current_device`) no conversion is applied, which can be very confusing. I might add a PR for that tomorrow, unless people tell me there's a reason to leave that line as it is.\r\n\r\nFor reproduction:\r\n```python\r\nimport torch\r\nfrom ignite.utils import convert_tensor\r\n\r\nmytens = torch.zeros(2)\r\ndevice = torch.cuda.current_device()\r\nconverted_tens = convert_tensor(mytens, device)\r\nassert converted_tens.device == device\r\n```\r\n\r\n## Environment\r\n\r\n - PyTorch Version (e.g., 1.4): 1.4\r\n - Ignite Version (e.g., 0.3.0): 0.3\r\n - OS (e.g., Linux): Windows 10\r\n - How you installed Ignite (`conda`, `pip`, source): conda\r\n - Python version: 3.7.6\r\n\r\n\n", "before_files": [{"content": "import collections.abc as collections\nimport logging\nfrom typing import Union, Optional, Callable, Any, Type, Tuple\n\nimport torch\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\"]\n\n\ndef convert_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device else tensor\n\n return apply_to_tensor(input_, _func)\n\n\ndef apply_to_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n \"\"\"\n return apply_to_type(input_, torch.Tensor, func)\n\n\ndef apply_to_type(\n input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.\n \"\"\"\n if isinstance(input_, input_type):\n return func(input_)\n elif isinstance(input_, (str, bytes)):\n return input_\n elif isinstance(input_, collections.Mapping):\n return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})\n elif isinstance(input_, tuple) and hasattr(input_, \"_fields\"): # namedtuple\n return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))\n elif isinstance(input_, collections.Sequence):\n return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])\n else:\n raise TypeError((\"input must contain {}, dicts or lists; found {}\".format(input_type, type(input_))))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the\n input's device`.\n \"\"\"\n onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: str,\n level: int = logging.INFO,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: int = 0,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name (str): new name for the logger.\n level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG\n format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`\n filepath (str, optional): Optional logging file path. If not None, logs are written to the file.\n distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.\n\n Returns:\n logging.Logger\n\n For example, to improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n \"\"\"\n logger = logging.getLogger(name)\n\n if distributed_rank > 0:\n return logger\n\n logger.setLevel(level)\n\n # Remove previous handlers\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n formatter = logging.Formatter(format)\n\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n", "path": "ignite/utils.py"}]}
| 2,235 | 113 |
gh_patches_debug_27528
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-2356
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Trainer(precision=16) fails with optim.lr_scheduler.ReduceLROnPlateau
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior:
1. Create a `pl.LightningModule` that returns your optimizer along with a `optim.lr_scheduler.ReduceLROnPlateau` scheduler from `configure_optimizers`
2. Create a `pl.Trainer` wit `precision=16`
3. Run your training (i.e., `trainer.fit(model)`)
4. See error
```console
Traceback (most recent call last):
File "main.py", line 65, in <module>
main()
File "main.py", line 61, in main
trainer.fit(model)
File "/workspace/pytorch-lightning/pytorch_lightning/trainer/trainer.py", line 889, in fit
self.dp_train(model)
File "/workspace/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py", line 223, in dp_train
self.reinit_scheduler_properties(optimizers, self.lr_schedulers)
File "/workspace/pytorch-lightning/pytorch_lightning/trainer/optimizers.py", line 122, in reinit_scheduler_properties
scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
UnboundLocalError: local variable 'idx' referenced before assignment
```
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<!-- #### Code sample -->
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
<!-- ### Expected behavior -->
<!-- A clear and concise description of what you expected to happen. -->
<!-- ### Environment
Please copy and paste the output from our
[environment collection script](https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py)
(or fill out the checklist below manually).
You can get the script and run it with:
```
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
# For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
```
- PyTorch Version (1.5):
- OS (Linux):
### Additional context
-->
<!-- Add any other context about the problem here. -->
The error occurs in `pytorch-lightning/pytorch_lightning/trainer/optimizers.py", line 122`.
```python
def reinit_scheduler_properties(self, optimizers: list, schedulers: list):
# Reinitialize optimizer.step properties added by schedulers
for scheduler in schedulers:
for optimizer in optimizers:
scheduler = scheduler['scheduler']
# check that we dont mix users optimizers and schedulers
if scheduler.optimizer == optimizer:
# Find the mro belonging to the base lr scheduler class
for i, mro in enumerate(scheduler.__class__.__mro__):
if mro == optim.lr_scheduler._LRScheduler:
idx = i
scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
```
The `idx` local variable is unassigned because `optim.lr_scheduler.ReduceLROnPlateau` is not a subclass of `optim.lr_scheduler._LRScheduler`.
I could work around the error by adding a specific check for `optim.lr_scheduler.ReduceLROnPlateau` but I'm not sure if this is a good solution.
```python
def reinit_scheduler_properties(self, optimizers: list, schedulers: list):
# Reinitialize optimizer.step properties added by schedulers
for scheduler in schedulers:
for optimizer in optimizers:
scheduler = scheduler['scheduler']
# check that we dont mix users optimizers and schedulers
if scheduler.optimizer == optimizer:
# Find the mro belonging to the base lr scheduler class
for i, mro in enumerate(scheduler.__class__.__mro__):
if mro == optim.lr_scheduler._LRScheduler:
idx = i
elif mro == optim.lr_scheduler.ReduceLROnPlateau:
idx = i
scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
```
### Related issue in PyTorch:
ReduceLROnPlateau parent class is not _LRScheduler #21981
https://github.com/pytorch/pytorch/issues/21981
</issue>
<code>
[start of pytorch_lightning/trainer/optimizers.py]
1 from abc import ABC
2 from typing import List, Tuple
3
4 import torch
5 from torch import optim
6 from torch.optim.optimizer import Optimizer
7
8 from pytorch_lightning.core.lightning import LightningModule
9 from pytorch_lightning.utilities import rank_zero_warn
10
11
12 class TrainerOptimizersMixin(ABC):
13
14 def init_optimizers(
15 self,
16 model: LightningModule
17 ) -> Tuple[List, List, List]:
18 optim_conf = model.configure_optimizers()
19
20 if optim_conf is None:
21 rank_zero_warn('`LightningModule.configure_optimizers` returned `None`, '
22 'this fit will run with no optimizer', UserWarning)
23 optim_conf = _MockOptimizer()
24
25 # single output, single optimizer
26 if isinstance(optim_conf, Optimizer):
27 return [optim_conf], [], []
28
29 # two lists, optimizer + lr schedulers
30 elif isinstance(optim_conf, (list, tuple)) and len(optim_conf) == 2 \
31 and isinstance(optim_conf[0], list):
32 optimizers, lr_schedulers = optim_conf
33 lr_schedulers = self.configure_schedulers(lr_schedulers)
34 return optimizers, lr_schedulers, []
35
36 # single dictionary
37 elif isinstance(optim_conf, dict):
38 optimizer = optim_conf["optimizer"]
39 lr_scheduler = optim_conf.get("lr_scheduler", [])
40 if lr_scheduler:
41 lr_schedulers = self.configure_schedulers([lr_scheduler])
42 else:
43 lr_schedulers = []
44 return [optimizer], lr_schedulers, []
45
46 # multiple dictionaries
47 elif isinstance(optim_conf, (list, tuple)) and isinstance(optim_conf[0], dict):
48 optimizers = [opt_dict["optimizer"] for opt_dict in optim_conf]
49 # take only lr wif exists and ot they are defined - not None
50 lr_schedulers = [
51 opt_dict["lr_scheduler"] for opt_dict in optim_conf if opt_dict.get("lr_scheduler")
52 ]
53 # take only freq wif exists and ot they are defined - not None
54 optimizer_frequencies = [
55 opt_dict["frequency"] for opt_dict in optim_conf if opt_dict.get("frequency") is not None
56 ]
57
58 # clean scheduler list
59 if lr_schedulers:
60 lr_schedulers = self.configure_schedulers(lr_schedulers)
61 # assert that if frequencies are present, they are given for all optimizers
62 if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):
63 raise ValueError("A frequency must be given to each optimizer.")
64 return optimizers, lr_schedulers, optimizer_frequencies
65
66 # single list or tuple, multiple optimizer
67 elif isinstance(optim_conf, (list, tuple)):
68 return list(optim_conf), [], []
69
70 # unknown configuration
71 else:
72 raise ValueError(
73 'Unknown configuration for model optimizers.'
74 ' Output from `model.configure_optimizers()` should either be:'
75 ' * single output, single `torch.optim.Optimizer`'
76 ' * single output, list of `torch.optim.Optimizer`'
77 ' * single output, a dictionary with `optimizer` key (`torch.optim.Optimizer`)'
78 ' and an optional `lr_scheduler` key (`torch.optim.lr_scheduler`)'
79 ' * two outputs, first being a list of `torch.optim.Optimizer` second being'
80 ' a list of `torch.optim.lr_scheduler`'
81 ' * multiple outputs, dictionaries as described with an optional `frequency` key (int)')
82
83 def configure_schedulers(self, schedulers: list):
84 # Convert each scheduler into dict structure with relevant information
85 lr_schedulers = []
86 default_config = {'interval': 'epoch', # default every epoch
87 'frequency': 1, # default every epoch/batch
88 'reduce_on_plateau': False, # most often not ReduceLROnPlateau scheduler
89 'monitor': 'val_loss'} # default value to monitor for ReduceLROnPlateau
90 for scheduler in schedulers:
91 if isinstance(scheduler, dict):
92 if 'scheduler' not in scheduler:
93 raise ValueError('Lr scheduler should have key `scheduler`',
94 ' with item being a lr scheduler')
95 scheduler['reduce_on_plateau'] = isinstance(
96 scheduler['scheduler'], optim.lr_scheduler.ReduceLROnPlateau)
97
98 lr_schedulers.append({**default_config, **scheduler})
99
100 elif isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):
101 lr_schedulers.append({**default_config, 'scheduler': scheduler,
102 'reduce_on_plateau': True})
103
104 elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):
105 lr_schedulers.append({**default_config, 'scheduler': scheduler})
106 else:
107 raise ValueError(f'Input {scheduler} to lr schedulers '
108 'is a invalid input.')
109 return lr_schedulers
110
111 def reinit_scheduler_properties(self, optimizers: list, schedulers: list):
112 # Reinitialize optimizer.step properties added by schedulers
113 for scheduler in schedulers:
114 for optimizer in optimizers:
115 scheduler = scheduler['scheduler']
116 # check that we dont mix users optimizers and schedulers
117 if scheduler.optimizer == optimizer:
118 # Find the mro belonging to the base lr scheduler class
119 for i, mro in enumerate(scheduler.__class__.__mro__):
120 if mro == optim.lr_scheduler._LRScheduler:
121 idx = i
122 scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
123
124
125 class _MockOptimizer(Optimizer):
126 """The `_MockOptimizer` will be used inplace of an optimizer in the event that `None`
127 is returned from `configure_optimizers`.
128 """
129
130 def __init__(self):
131 super().__init__([torch.zeros(1)], {})
132
133 def add_param_group(self, param_group):
134 pass # Do Nothing
135
136 def load_state_dict(self, state_dict):
137 pass # Do Nothing
138
139 def state_dict(self):
140 return {} # Return Empty
141
142 def step(self, closure=None):
143 if closure is not None:
144 closure()
145
146 def zero_grad(self):
147 pass # Do Nothing
148
149 def __repr__(self):
150 return 'No Optimizer'
151
[end of pytorch_lightning/trainer/optimizers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_lightning/trainer/optimizers.py b/pytorch_lightning/trainer/optimizers.py
--- a/pytorch_lightning/trainer/optimizers.py
+++ b/pytorch_lightning/trainer/optimizers.py
@@ -111,15 +111,25 @@
def reinit_scheduler_properties(self, optimizers: list, schedulers: list):
# Reinitialize optimizer.step properties added by schedulers
for scheduler in schedulers:
+ scheduler = scheduler['scheduler']
+
for optimizer in optimizers:
- scheduler = scheduler['scheduler']
# check that we dont mix users optimizers and schedulers
if scheduler.optimizer == optimizer:
# Find the mro belonging to the base lr scheduler class
for i, mro in enumerate(scheduler.__class__.__mro__):
- if mro == optim.lr_scheduler._LRScheduler:
+ if (
+ mro == optim.lr_scheduler._LRScheduler
+ or mro == optim.lr_scheduler.ReduceLROnPlateau
+ ):
idx = i
- scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
+ state = scheduler.state_dict()
+ else:
+ state = None
+
+ scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)
+ if state is not None:
+ scheduler.load_state_dict(state)
class _MockOptimizer(Optimizer):
|
{"golden_diff": "diff --git a/pytorch_lightning/trainer/optimizers.py b/pytorch_lightning/trainer/optimizers.py\n--- a/pytorch_lightning/trainer/optimizers.py\n+++ b/pytorch_lightning/trainer/optimizers.py\n@@ -111,15 +111,25 @@\n def reinit_scheduler_properties(self, optimizers: list, schedulers: list):\n # Reinitialize optimizer.step properties added by schedulers\n for scheduler in schedulers:\n+ scheduler = scheduler['scheduler']\n+\n for optimizer in optimizers:\n- scheduler = scheduler['scheduler']\n # check that we dont mix users optimizers and schedulers\n if scheduler.optimizer == optimizer:\n # Find the mro belonging to the base lr scheduler class\n for i, mro in enumerate(scheduler.__class__.__mro__):\n- if mro == optim.lr_scheduler._LRScheduler:\n+ if (\n+ mro == optim.lr_scheduler._LRScheduler\n+ or mro == optim.lr_scheduler.ReduceLROnPlateau\n+ ):\n idx = i\n- scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)\n+ state = scheduler.state_dict()\n+ else:\n+ state = None\n+\n+ scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)\n+ if state is not None:\n+ scheduler.load_state_dict(state)\n \n \n class _MockOptimizer(Optimizer):\n", "issue": "Trainer(precision=16) fails with optim.lr_scheduler.ReduceLROnPlateau\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Create a `pl.LightningModule` that returns your optimizer along with a `optim.lr_scheduler.ReduceLROnPlateau` scheduler from `configure_optimizers`\r\n2. Create a `pl.Trainer` wit `precision=16`\r\n3. Run your training (i.e., `trainer.fit(model)`)\r\n4. See error\r\n\r\n```console\r\nTraceback (most recent call last): \r\n File \"main.py\", line 65, in <module> \r\n main() \r\n File \"main.py\", line 61, in main \r\n trainer.fit(model) \r\n File \"/workspace/pytorch-lightning/pytorch_lightning/trainer/trainer.py\", line 889, in fit \r\n self.dp_train(model) \r\n File \"/workspace/pytorch-lightning/pytorch_lightning/trainer/distrib_parts.py\", line 223, in dp_train \r\n self.reinit_scheduler_properties(optimizers, self.lr_schedulers) \r\n File \"/workspace/pytorch-lightning/pytorch_lightning/trainer/optimizers.py\", line 122, in reinit_scheduler_properties \r\n scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer) \r\nUnboundLocalError: local variable 'idx' referenced before assignment \r\n```\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n<!-- #### Code sample -->\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n<!-- ### Expected behavior -->\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n<!-- ### Environment\r\n\r\nPlease copy and paste the output from our\r\n[environment collection script](https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py)\r\n(or fill out the checklist below manually).\r\n\r\nYou can get the script and run it with:\r\n```\r\nwget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py\r\n# For security purposes, please check the contents of collect_env_details.py before running it.\r\npython collect_env_details.py\r\n```\r\n - PyTorch Version (1.5):\r\n - OS (Linux):\r\n\r\n### Additional context\r\n-->\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\r\nThe error occurs in `pytorch-lightning/pytorch_lightning/trainer/optimizers.py\", line 122`.\r\n\r\n```python\r\ndef reinit_scheduler_properties(self, optimizers: list, schedulers: list):\r\n # Reinitialize optimizer.step properties added by schedulers\r\n for scheduler in schedulers:\r\n for optimizer in optimizers:\r\n scheduler = scheduler['scheduler']\r\n # check that we dont mix users optimizers and schedulers\r\n if scheduler.optimizer == optimizer:\r\n # Find the mro belonging to the base lr scheduler class\r\n for i, mro in enumerate(scheduler.__class__.__mro__):\r\n if mro == optim.lr_scheduler._LRScheduler:\r\n idx = i\r\n scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)\r\n```\r\nThe `idx` local variable is unassigned because `optim.lr_scheduler.ReduceLROnPlateau` is not a subclass of `optim.lr_scheduler._LRScheduler`.\r\n\r\nI could work around the error by adding a specific check for `optim.lr_scheduler.ReduceLROnPlateau` but I'm not sure if this is a good solution.\r\n\r\n```python\r\ndef reinit_scheduler_properties(self, optimizers: list, schedulers: list):\r\n # Reinitialize optimizer.step properties added by schedulers\r\n for scheduler in schedulers:\r\n for optimizer in optimizers:\r\n scheduler = scheduler['scheduler']\r\n # check that we dont mix users optimizers and schedulers\r\n if scheduler.optimizer == optimizer:\r\n # Find the mro belonging to the base lr scheduler class\r\n for i, mro in enumerate(scheduler.__class__.__mro__):\r\n if mro == optim.lr_scheduler._LRScheduler:\r\n idx = i\r\n elif mro == optim.lr_scheduler.ReduceLROnPlateau:\r\n idx = i\r\n scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)\r\n```\r\n\r\n### Related issue in PyTorch:\r\nReduceLROnPlateau parent class is not _LRScheduler #21981\r\nhttps://github.com/pytorch/pytorch/issues/21981\n", "before_files": [{"content": "from abc import ABC\nfrom typing import List, Tuple\n\nimport torch\nfrom torch import optim\nfrom torch.optim.optimizer import Optimizer\n\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.utilities import rank_zero_warn\n\n\nclass TrainerOptimizersMixin(ABC):\n\n def init_optimizers(\n self,\n model: LightningModule\n ) -> Tuple[List, List, List]:\n optim_conf = model.configure_optimizers()\n\n if optim_conf is None:\n rank_zero_warn('`LightningModule.configure_optimizers` returned `None`, '\n 'this fit will run with no optimizer', UserWarning)\n optim_conf = _MockOptimizer()\n\n # single output, single optimizer\n if isinstance(optim_conf, Optimizer):\n return [optim_conf], [], []\n\n # two lists, optimizer + lr schedulers\n elif isinstance(optim_conf, (list, tuple)) and len(optim_conf) == 2 \\\n and isinstance(optim_conf[0], list):\n optimizers, lr_schedulers = optim_conf\n lr_schedulers = self.configure_schedulers(lr_schedulers)\n return optimizers, lr_schedulers, []\n\n # single dictionary\n elif isinstance(optim_conf, dict):\n optimizer = optim_conf[\"optimizer\"]\n lr_scheduler = optim_conf.get(\"lr_scheduler\", [])\n if lr_scheduler:\n lr_schedulers = self.configure_schedulers([lr_scheduler])\n else:\n lr_schedulers = []\n return [optimizer], lr_schedulers, []\n\n # multiple dictionaries\n elif isinstance(optim_conf, (list, tuple)) and isinstance(optim_conf[0], dict):\n optimizers = [opt_dict[\"optimizer\"] for opt_dict in optim_conf]\n # take only lr wif exists and ot they are defined - not None\n lr_schedulers = [\n opt_dict[\"lr_scheduler\"] for opt_dict in optim_conf if opt_dict.get(\"lr_scheduler\")\n ]\n # take only freq wif exists and ot they are defined - not None\n optimizer_frequencies = [\n opt_dict[\"frequency\"] for opt_dict in optim_conf if opt_dict.get(\"frequency\") is not None\n ]\n\n # clean scheduler list\n if lr_schedulers:\n lr_schedulers = self.configure_schedulers(lr_schedulers)\n # assert that if frequencies are present, they are given for all optimizers\n if optimizer_frequencies and len(optimizer_frequencies) != len(optimizers):\n raise ValueError(\"A frequency must be given to each optimizer.\")\n return optimizers, lr_schedulers, optimizer_frequencies\n\n # single list or tuple, multiple optimizer\n elif isinstance(optim_conf, (list, tuple)):\n return list(optim_conf), [], []\n\n # unknown configuration\n else:\n raise ValueError(\n 'Unknown configuration for model optimizers.'\n ' Output from `model.configure_optimizers()` should either be:'\n ' * single output, single `torch.optim.Optimizer`'\n ' * single output, list of `torch.optim.Optimizer`'\n ' * single output, a dictionary with `optimizer` key (`torch.optim.Optimizer`)'\n ' and an optional `lr_scheduler` key (`torch.optim.lr_scheduler`)'\n ' * two outputs, first being a list of `torch.optim.Optimizer` second being'\n ' a list of `torch.optim.lr_scheduler`'\n ' * multiple outputs, dictionaries as described with an optional `frequency` key (int)')\n\n def configure_schedulers(self, schedulers: list):\n # Convert each scheduler into dict structure with relevant information\n lr_schedulers = []\n default_config = {'interval': 'epoch', # default every epoch\n 'frequency': 1, # default every epoch/batch\n 'reduce_on_plateau': False, # most often not ReduceLROnPlateau scheduler\n 'monitor': 'val_loss'} # default value to monitor for ReduceLROnPlateau\n for scheduler in schedulers:\n if isinstance(scheduler, dict):\n if 'scheduler' not in scheduler:\n raise ValueError('Lr scheduler should have key `scheduler`',\n ' with item being a lr scheduler')\n scheduler['reduce_on_plateau'] = isinstance(\n scheduler['scheduler'], optim.lr_scheduler.ReduceLROnPlateau)\n\n lr_schedulers.append({**default_config, **scheduler})\n\n elif isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):\n lr_schedulers.append({**default_config, 'scheduler': scheduler,\n 'reduce_on_plateau': True})\n\n elif isinstance(scheduler, optim.lr_scheduler._LRScheduler):\n lr_schedulers.append({**default_config, 'scheduler': scheduler})\n else:\n raise ValueError(f'Input {scheduler} to lr schedulers '\n 'is a invalid input.')\n return lr_schedulers\n\n def reinit_scheduler_properties(self, optimizers: list, schedulers: list):\n # Reinitialize optimizer.step properties added by schedulers\n for scheduler in schedulers:\n for optimizer in optimizers:\n scheduler = scheduler['scheduler']\n # check that we dont mix users optimizers and schedulers\n if scheduler.optimizer == optimizer:\n # Find the mro belonging to the base lr scheduler class\n for i, mro in enumerate(scheduler.__class__.__mro__):\n if mro == optim.lr_scheduler._LRScheduler:\n idx = i\n scheduler.__class__.__mro__[idx].__init__(scheduler, optimizer)\n\n\nclass _MockOptimizer(Optimizer):\n \"\"\"The `_MockOptimizer` will be used inplace of an optimizer in the event that `None`\n is returned from `configure_optimizers`.\n \"\"\"\n\n def __init__(self):\n super().__init__([torch.zeros(1)], {})\n\n def add_param_group(self, param_group):\n pass # Do Nothing\n\n def load_state_dict(self, state_dict):\n pass # Do Nothing\n\n def state_dict(self):\n return {} # Return Empty\n\n def step(self, closure=None):\n if closure is not None:\n closure()\n\n def zero_grad(self):\n pass # Do Nothing\n\n def __repr__(self):\n return 'No Optimizer'\n", "path": "pytorch_lightning/trainer/optimizers.py"}]}
| 3,324 | 317 |
gh_patches_debug_37113
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LS always starts in first folder of workspace
LSP always starts a language server in the first project of your workspace, regardless of which one you're working on. For example, with the following workspace:

When I open any Rust files in `bserver`, RLS is still started in `LSP`, since it appears first in the list. This causes RLS to throw a warning:

and effectively breaks all useful functionality of the LSP plugin--nothing works, because RLS is staring at the wrong directory.
I'm still digging as to why this is, but it looks like the issue is [an oversight with branching right here](https://github.com/tomv564/LSP/blob/master/plugin/core/workspace.py#L16). I'll submit a PR shortly.
</issue>
<code>
[start of plugin/core/workspace.py]
1 import os
2 try:
3 from typing import List, Optional, Any
4 assert List and Optional and Any
5 except ImportError:
6 pass
7
8 from .logging import debug
9 # from .types import WindowLike
10
11
12 def get_project_path(window: 'Any') -> 'Optional[str]':
13 """
14 Returns the first project folder or the parent folder of the active view
15 """
16 if len(window.folders()):
17 folder_paths = window.folders()
18 return folder_paths[0]
19 else:
20 view = window.active_view()
21 if view:
22 filename = view.file_name()
23 if filename:
24 project_path = os.path.dirname(filename)
25 debug("Couldn't determine project directory since no folders are open!",
26 "Using", project_path, "as a fallback.")
27 return project_path
28 else:
29 debug("Couldn't determine project directory since no folders are open",
30 "and the current file isn't saved on the disk.")
31 return None
32 else:
33 debug("No view is active in current window")
34 return None # https://github.com/tomv564/LSP/issues/219
35
36
37 def get_common_parent(paths: 'List[str]') -> str:
38 """
39 Get the common parent directory of multiple paths.
40
41 Python 3.5+ includes os.path.commonpath which does this, however Sublime
42 currently embeds Python 3.3.
43 """
44 return os.path.commonprefix([path + '/' for path in paths]).rstrip('/')
45
46
47 def is_in_workspace(window: 'Any', file_path: str) -> bool:
48 workspace_path = get_project_path(window)
49 if workspace_path is None:
50 return False
51
52 common_dir = get_common_parent([workspace_path, file_path])
53 return workspace_path == common_dir
54
55
56 def enable_in_project(window, config_name: str) -> None:
57 project_data = window.project_data()
58 if isinstance(project_data, dict):
59 project_settings = project_data.setdefault('settings', dict())
60 project_lsp_settings = project_settings.setdefault('LSP', dict())
61 project_client_settings = project_lsp_settings.setdefault(config_name, dict())
62 project_client_settings['enabled'] = True
63 window.set_project_data(project_data)
64 else:
65 debug('non-dict returned in project_settings: ', project_data)
66
67
68 def disable_in_project(window, config_name: str) -> None:
69 project_data = window.project_data()
70 if isinstance(project_data, dict):
71 project_settings = project_data.setdefault('settings', dict())
72 project_lsp_settings = project_settings.setdefault('LSP', dict())
73 project_client_settings = project_lsp_settings.setdefault(config_name, dict())
74 project_client_settings['enabled'] = False
75 window.set_project_data(project_data)
76 else:
77 debug('non-dict returned in project_settings: ', project_data)
78
79
80 def get_project_config(window: 'Any') -> dict:
81 project_data = window.project_data() or dict()
82 if isinstance(project_data, dict):
83 project_settings = project_data.setdefault('settings', dict())
84 project_lsp_settings = project_settings.setdefault('LSP', dict())
85 return project_lsp_settings
86 else:
87 debug('non-dict returned in project_settings: ', project_data)
88 return dict()
89
[end of plugin/core/workspace.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/core/workspace.py b/plugin/core/workspace.py
--- a/plugin/core/workspace.py
+++ b/plugin/core/workspace.py
@@ -1,37 +1,69 @@
import os
try:
- from typing import List, Optional, Any
- assert List and Optional and Any
+ from typing import List, Optional, Any, Iterable
+ assert List and Optional and Any and Iterable
except ImportError:
pass
from .logging import debug
-# from .types import WindowLike
+from .types import ViewLike
+
+
+def get_filename_from_view(view: ViewLike) -> 'Optional[str]':
+ if not view:
+ debug("No view is active in current window")
+ return None # https://github.com/tomv564/LSP/issues/219
+ filename = view.file_name()
+ if not filename:
+ debug("Couldn't determine project directory since no folders are open",
+ "and the current file isn't saved on the disk.")
+ return filename
+
+
+def get_directory_name(view: ViewLike) -> 'Optional[str]':
+ filename = get_filename_from_view(view)
+ if filename:
+ project_path = os.path.dirname(filename)
+ return project_path
+ return None
+
+
+def find_path_among_multi_folders(folders: 'Iterable[str]',
+ view: ViewLike) -> 'Optional[str]':
+ filename = get_filename_from_view(view)
+ if not filename:
+ return None
+ folders = [os.path.realpath(f) for f in folders]
+ file = view.file_name()
+ if not file:
+ return None
+ file = os.path.realpath(file)
+ while file not in folders:
+ file = os.path.dirname(file)
+ if os.path.dirname(file) == file:
+ # We're at the root of the filesystem.
+ file = None
+ break
+ debug('project path is', file)
+ return file
def get_project_path(window: 'Any') -> 'Optional[str]':
"""
- Returns the first project folder or the parent folder of the active view
+ Returns the project folder or the parent folder of the active view
"""
- if len(window.folders()):
+ if not window:
+ return None
+ num_folders = len(window.folders())
+ if num_folders == 0:
+ return get_directory_name(window.active_view())
+ elif num_folders == 1:
folder_paths = window.folders()
return folder_paths[0]
- else:
- view = window.active_view()
- if view:
- filename = view.file_name()
- if filename:
- project_path = os.path.dirname(filename)
- debug("Couldn't determine project directory since no folders are open!",
- "Using", project_path, "as a fallback.")
- return project_path
- else:
- debug("Couldn't determine project directory since no folders are open",
- "and the current file isn't saved on the disk.")
- return None
- else:
- debug("No view is active in current window")
- return None # https://github.com/tomv564/LSP/issues/219
+ else: # num_folders > 1
+ return find_path_among_multi_folders(
+ window.folders(),
+ window.active_view())
def get_common_parent(paths: 'List[str]') -> str:
|
{"golden_diff": "diff --git a/plugin/core/workspace.py b/plugin/core/workspace.py\n--- a/plugin/core/workspace.py\n+++ b/plugin/core/workspace.py\n@@ -1,37 +1,69 @@\n import os\n try:\n- from typing import List, Optional, Any\n- assert List and Optional and Any\n+ from typing import List, Optional, Any, Iterable\n+ assert List and Optional and Any and Iterable\n except ImportError:\n pass\n \n from .logging import debug\n-# from .types import WindowLike\n+from .types import ViewLike\n+\n+\n+def get_filename_from_view(view: ViewLike) -> 'Optional[str]':\n+ if not view:\n+ debug(\"No view is active in current window\")\n+ return None # https://github.com/tomv564/LSP/issues/219\n+ filename = view.file_name()\n+ if not filename:\n+ debug(\"Couldn't determine project directory since no folders are open\",\n+ \"and the current file isn't saved on the disk.\")\n+ return filename\n+\n+\n+def get_directory_name(view: ViewLike) -> 'Optional[str]':\n+ filename = get_filename_from_view(view)\n+ if filename:\n+ project_path = os.path.dirname(filename)\n+ return project_path\n+ return None\n+\n+\n+def find_path_among_multi_folders(folders: 'Iterable[str]',\n+ view: ViewLike) -> 'Optional[str]':\n+ filename = get_filename_from_view(view)\n+ if not filename:\n+ return None\n+ folders = [os.path.realpath(f) for f in folders]\n+ file = view.file_name()\n+ if not file:\n+ return None\n+ file = os.path.realpath(file)\n+ while file not in folders:\n+ file = os.path.dirname(file)\n+ if os.path.dirname(file) == file:\n+ # We're at the root of the filesystem.\n+ file = None\n+ break\n+ debug('project path is', file)\n+ return file\n \n \n def get_project_path(window: 'Any') -> 'Optional[str]':\n \"\"\"\n- Returns the first project folder or the parent folder of the active view\n+ Returns the project folder or the parent folder of the active view\n \"\"\"\n- if len(window.folders()):\n+ if not window:\n+ return None\n+ num_folders = len(window.folders())\n+ if num_folders == 0:\n+ return get_directory_name(window.active_view())\n+ elif num_folders == 1:\n folder_paths = window.folders()\n return folder_paths[0]\n- else:\n- view = window.active_view()\n- if view:\n- filename = view.file_name()\n- if filename:\n- project_path = os.path.dirname(filename)\n- debug(\"Couldn't determine project directory since no folders are open!\",\n- \"Using\", project_path, \"as a fallback.\")\n- return project_path\n- else:\n- debug(\"Couldn't determine project directory since no folders are open\",\n- \"and the current file isn't saved on the disk.\")\n- return None\n- else:\n- debug(\"No view is active in current window\")\n- return None # https://github.com/tomv564/LSP/issues/219\n+ else: # num_folders > 1\n+ return find_path_among_multi_folders(\n+ window.folders(),\n+ window.active_view())\n \n \n def get_common_parent(paths: 'List[str]') -> str:\n", "issue": "LS always starts in first folder of workspace\nLSP always starts a language server in the first project of your workspace, regardless of which one you're working on. For example, with the following workspace:\r\n\r\n\r\n\r\nWhen I open any Rust files in `bserver`, RLS is still started in `LSP`, since it appears first in the list. This causes RLS to throw a warning:\r\n\r\n\r\n\r\nand effectively breaks all useful functionality of the LSP plugin--nothing works, because RLS is staring at the wrong directory.\r\n\r\nI'm still digging as to why this is, but it looks like the issue is [an oversight with branching right here](https://github.com/tomv564/LSP/blob/master/plugin/core/workspace.py#L16). I'll submit a PR shortly.\n", "before_files": [{"content": "import os\ntry:\n from typing import List, Optional, Any\n assert List and Optional and Any\nexcept ImportError:\n pass\n\nfrom .logging import debug\n# from .types import WindowLike\n\n\ndef get_project_path(window: 'Any') -> 'Optional[str]':\n \"\"\"\n Returns the first project folder or the parent folder of the active view\n \"\"\"\n if len(window.folders()):\n folder_paths = window.folders()\n return folder_paths[0]\n else:\n view = window.active_view()\n if view:\n filename = view.file_name()\n if filename:\n project_path = os.path.dirname(filename)\n debug(\"Couldn't determine project directory since no folders are open!\",\n \"Using\", project_path, \"as a fallback.\")\n return project_path\n else:\n debug(\"Couldn't determine project directory since no folders are open\",\n \"and the current file isn't saved on the disk.\")\n return None\n else:\n debug(\"No view is active in current window\")\n return None # https://github.com/tomv564/LSP/issues/219\n\n\ndef get_common_parent(paths: 'List[str]') -> str:\n \"\"\"\n Get the common parent directory of multiple paths.\n\n Python 3.5+ includes os.path.commonpath which does this, however Sublime\n currently embeds Python 3.3.\n \"\"\"\n return os.path.commonprefix([path + '/' for path in paths]).rstrip('/')\n\n\ndef is_in_workspace(window: 'Any', file_path: str) -> bool:\n workspace_path = get_project_path(window)\n if workspace_path is None:\n return False\n\n common_dir = get_common_parent([workspace_path, file_path])\n return workspace_path == common_dir\n\n\ndef enable_in_project(window, config_name: str) -> None:\n project_data = window.project_data()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n project_client_settings = project_lsp_settings.setdefault(config_name, dict())\n project_client_settings['enabled'] = True\n window.set_project_data(project_data)\n else:\n debug('non-dict returned in project_settings: ', project_data)\n\n\ndef disable_in_project(window, config_name: str) -> None:\n project_data = window.project_data()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n project_client_settings = project_lsp_settings.setdefault(config_name, dict())\n project_client_settings['enabled'] = False\n window.set_project_data(project_data)\n else:\n debug('non-dict returned in project_settings: ', project_data)\n\n\ndef get_project_config(window: 'Any') -> dict:\n project_data = window.project_data() or dict()\n if isinstance(project_data, dict):\n project_settings = project_data.setdefault('settings', dict())\n project_lsp_settings = project_settings.setdefault('LSP', dict())\n return project_lsp_settings\n else:\n debug('non-dict returned in project_settings: ', project_data)\n return dict()\n", "path": "plugin/core/workspace.py"}]}
| 1,705 | 764 |
gh_patches_debug_27524
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1202
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
iminuit v1.5.0 breaks optimization tests
# Description
With the release of [`iminuit` `v1.5.0`](https://github.com/scikit-hep/iminuit/releases/tag/v1.5.0) on 2020-09-17 the nightly tests are failing in `test_optim.py`. Specifically
https://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/tests/test_optim.py#L47
is failing with errors of
```pytb
try:
assert result.success
except AssertionError:
log.error(result)
> raise exceptions.FailedMinimization(result)
E pyhf.exceptions.FailedMinimization: Optimization failed. Estimated distance to minimum too large.
src/pyhf/optimize/mixins.py:52: FailedMinimization
------------------------------ Captured log call -------------------------------
ERROR pyhf.optimize.mixins:mixins.py:51 fun: 15.5887451171875
hess_inv: array([[1., 1.],
[1., 1.]])
message: 'Optimization failed. Estimated distance to minimum too large.'
minuit: <iminuit._libiminuit.Minuit object at 0x5619c82f90a0>
nfev: 110
njev: 0
success: False
unc: None
x: array([0.97325551, 0.91712703])
```
where the `pyhf.exceptions.FailedMinimization` being raised comes from the `raise exceptions.FailedMinimization(result)` in
https://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/mixins.py#L31-L53
which are of course coming from
https://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/opt_minuit.py#L122-L132
in
https://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/opt_minuit.py#L69
# Steps to Reproduce
Run the tests using current master.

To show that this is definitley an issue with `iminuit` `v1.5.0+`
```
$ python -m pip install --upgrade "iminuit<1.5.0"
$ pip list | grep iminuit
iminuit 1.4.9
$ python -m pytest -sx tests/test_optim.py
```
passes but
```
$ python -m pip install --upgrade iminuit
$ pip list | grep iminuit
iminuit 1.5.1
$ python -m pytest -sx tests/test_optim.py
```
fails.
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of src/pyhf/optimize/opt_minuit.py]
1 """Minuit Optimizer Class."""
2 from .. import default_backend, exceptions
3 from .mixins import OptimizerMixin
4 import scipy
5 import iminuit
6
7
8 class minuit_optimizer(OptimizerMixin):
9 """
10 Optimizer that uses iminuit.Minuit.migrad.
11 """
12
13 __slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']
14
15 def __init__(self, *args, **kwargs):
16 """
17 Create MINUIT Optimizer.
18
19 .. note::
20
21 ``errordef`` should be 1.0 for a least-squares cost function and 0.5
22 for negative log-likelihood function. See page 37 of
23 http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes
24 called ``UP`` in the ``MINUIT`` docs.
25
26
27 Args:
28 errordef (:obj:`float`): See minuit docs. Default is 1.0.
29 steps (:obj:`int`): Number of steps for the bounds. Default is 1000.
30 strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.
31 tolerance (:obj:`float`): tolerance for termination. See specific optimizer for detailed meaning. Default is 0.1.
32 """
33 self.name = 'minuit'
34 self.errordef = kwargs.pop('errordef', 1)
35 self.steps = kwargs.pop('steps', 1000)
36 self.strategy = kwargs.pop('strategy', None)
37 self.tolerance = kwargs.pop('tolerance', 0.1)
38 super().__init__(*args, **kwargs)
39
40 def _get_minimizer(
41 self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False
42 ):
43
44 step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]
45 fixed_vals = fixed_vals or []
46 # Minuit wants True/False for each parameter
47 fixed_bools = [False] * len(init_pars)
48 for index, val in fixed_vals:
49 fixed_bools[index] = True
50 init_pars[index] = val
51 step_sizes[index] = 0.0
52
53 # Minuit requires jac=callable
54 if do_grad:
55 wrapped_objective = lambda pars: objective_and_grad(pars)[0] # noqa: E731
56 jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731
57 else:
58 wrapped_objective = objective_and_grad
59 jac = None
60
61 kwargs = dict(
62 fcn=wrapped_objective,
63 grad=jac,
64 start=init_pars,
65 error=step_sizes,
66 limit=init_bounds,
67 fix=fixed_bools,
68 print_level=self.verbose,
69 errordef=self.errordef,
70 )
71 return iminuit.Minuit.from_array_func(**kwargs)
72
73 def _minimize(
74 self,
75 minimizer,
76 func,
77 x0,
78 do_grad=False,
79 bounds=None,
80 fixed_vals=None,
81 return_uncertainties=False,
82 options={},
83 ):
84
85 """
86 Same signature as :func:`scipy.optimize.minimize`.
87
88 Note: an additional `minuit` is injected into the fitresult to get the
89 underlying minimizer.
90
91 Minimizer Options:
92 maxiter (:obj:`int`): maximum number of iterations. Default is 100000.
93 return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.
94 strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.
95
96 Returns:
97 fitresult (scipy.optimize.OptimizeResult): the fit result
98 """
99 maxiter = options.pop('maxiter', self.maxiter)
100 return_uncertainties = options.pop('return_uncertainties', False)
101 # 0: Fast, user-provided gradient
102 # 1: Default, no user-provided gradient
103 strategy = options.pop(
104 'strategy', self.strategy if self.strategy else not do_grad
105 )
106 tolerance = options.pop('tolerance', self.tolerance)
107 if options:
108 raise exceptions.Unsupported(
109 f"Unsupported options were passed in: {list(options.keys())}."
110 )
111
112 minimizer.strategy = strategy
113 minimizer.tol = tolerance
114 minimizer.migrad(ncall=maxiter)
115 # Following lines below come from:
116 # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
117 message = "Optimization terminated successfully."
118 if not minimizer.valid:
119 message = "Optimization failed."
120 fmin = minimizer.fmin
121 if fmin.has_reached_call_limit:
122 message += " Call limit was reached."
123 if fmin.is_above_max_edm:
124 message += " Estimated distance to minimum too large."
125
126 n = len(x0)
127 hess_inv = default_backend.ones((n, n))
128 if minimizer.valid:
129 # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
130 minimizer.hesse()
131 hess_inv = minimizer.np_covariance()
132
133 unc = None
134 if return_uncertainties:
135 unc = minimizer.np_errors()
136
137 return scipy.optimize.OptimizeResult(
138 x=minimizer.np_values(),
139 unc=unc,
140 success=minimizer.valid,
141 fun=minimizer.fval,
142 hess_inv=hess_inv,
143 message=message,
144 nfev=minimizer.ncalls,
145 njev=minimizer.ngrads,
146 minuit=minimizer,
147 )
148
[end of src/pyhf/optimize/opt_minuit.py]
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
11 'xmlio': ['uproot3~=3.14'], # Future proof against uproot4 API changes
12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted({'matplotlib', 'requests'})
23 extras_require['lint'] = sorted({'flake8', 'black'})
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'pytest~=6.0',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'jupyter',
43 'graphviz',
44 'jsonpatch',
45 ]
46 )
47 )
48 extras_require['docs'] = sorted(
49 {
50 'sphinx>=3.1.2',
51 'sphinxcontrib-bibtex',
52 'sphinx-click',
53 'sphinx_rtd_theme',
54 'nbsphinx',
55 'ipywidgets',
56 'sphinx-issues',
57 'sphinx-copybutton>0.2.9',
58 }
59 )
60 extras_require['develop'] = sorted(
61 set(
62 extras_require['docs']
63 + extras_require['lint']
64 + extras_require['test']
65 + [
66 'nbdime',
67 'bump2version',
68 'ipython',
69 'pre-commit',
70 'check-manifest',
71 'codemetapy>=0.3.4',
72 'twine',
73 ]
74 )
75 )
76 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
77
78
79 setup(
80 extras_require=extras_require,
81 use_scm_version=lambda: {'local_scheme': lambda version: ''},
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
'torch': ['torch~=1.2'],
'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],
'xmlio': ['uproot3~=3.14'], # Future proof against uproot4 API changes
- 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch
+ 'minuit': ['iminuit~=1.5.3'],
}
extras_require['backends'] = sorted(
set(
diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -113,7 +113,7 @@
minimizer.tol = tolerance
minimizer.migrad(ncall=maxiter)
# Following lines below come from:
- # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125
+ # https://github.com/scikit-hep/iminuit/blob/64acac11cfa2fb91ccbd02d1b3c51f8a9e2cc484/src/iminuit/_minimize.py#L102-L121
message = "Optimization terminated successfully."
if not minimizer.valid:
message = "Optimization failed."
@@ -141,7 +141,7 @@
fun=minimizer.fval,
hess_inv=hess_inv,
message=message,
- nfev=minimizer.ncalls,
- njev=minimizer.ngrads,
+ nfev=minimizer.ncalls_total,
+ njev=minimizer.ngrads_total,
minuit=minimizer,
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': ['uproot3~=3.14'], # Future proof against uproot4 API changes\n- 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n+ 'minuit': ['iminuit~=1.5.3'],\n }\n extras_require['backends'] = sorted(\n set(\ndiff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -113,7 +113,7 @@\n minimizer.tol = tolerance\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n- # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n+ # https://github.com/scikit-hep/iminuit/blob/64acac11cfa2fb91ccbd02d1b3c51f8a9e2cc484/src/iminuit/_minimize.py#L102-L121\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n@@ -141,7 +141,7 @@\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n- nfev=minimizer.ncalls,\n- njev=minimizer.ngrads,\n+ nfev=minimizer.ncalls_total,\n+ njev=minimizer.ngrads_total,\n minuit=minimizer,\n )\n", "issue": "iminuit v1.5.0 breaks optimization tests\n# Description\r\n\r\nWith the release of [`iminuit` `v1.5.0`](https://github.com/scikit-hep/iminuit/releases/tag/v1.5.0) on 2020-09-17 the nightly tests are failing in `test_optim.py`. Specifically\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/tests/test_optim.py#L47\r\n\r\nis failing with errors of \r\n\r\n```pytb\r\n try:\r\n assert result.success\r\n except AssertionError:\r\n log.error(result)\r\n> raise exceptions.FailedMinimization(result)\r\nE pyhf.exceptions.FailedMinimization: Optimization failed. Estimated distance to minimum too large.\r\n\r\nsrc/pyhf/optimize/mixins.py:52: FailedMinimization\r\n------------------------------ Captured log call -------------------------------\r\nERROR pyhf.optimize.mixins:mixins.py:51 fun: 15.5887451171875\r\n hess_inv: array([[1., 1.],\r\n [1., 1.]])\r\n message: 'Optimization failed. Estimated distance to minimum too large.'\r\n minuit: <iminuit._libiminuit.Minuit object at 0x5619c82f90a0>\r\n nfev: 110\r\n njev: 0\r\n success: False\r\n unc: None\r\n x: array([0.97325551, 0.91712703])\r\n```\r\n\r\nwhere the `pyhf.exceptions.FailedMinimization` being raised comes from the `raise exceptions.FailedMinimization(result)` in\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/mixins.py#L31-L53\r\n\r\nwhich are of course coming from\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/opt_minuit.py#L122-L132\r\n\r\nin\r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/8a6ee36da4f566d8a37df01e20201098aa1f8a54/src/pyhf/optimize/opt_minuit.py#L69\r\n\r\n# Steps to Reproduce\r\n\r\nRun the tests using current master.\r\n\r\n\r\n\r\nTo show that this is definitley an issue with `iminuit` `v1.5.0+`\r\n\r\n```\r\n$ python -m pip install --upgrade \"iminuit<1.5.0\"\r\n$ pip list | grep iminuit\r\niminuit 1.4.9\r\n$ python -m pytest -sx tests/test_optim.py\r\n```\r\n\r\npasses but\r\n\r\n```\r\n$ python -m pip install --upgrade iminuit\r\n$ pip list | grep iminuit\r\niminuit 1.5.1\r\n$ python -m pytest -sx tests/test_optim.py\r\n```\r\n\r\nfails.\r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom .. import default_backend, exceptions\nfrom .mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that uses iminuit.Minuit.migrad.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create MINUIT Optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.5\n for negative log-likelihood function. See page 37 of\n http://hep.fi.infn.it/minuit.pdf. This parameter is sometimes\n called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is 1.0.\n steps (:obj:`int`): Number of steps for the bounds. Default is 1000.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is None.\n tolerance (:obj:`float`): tolerance for termination. See specific optimizer for detailed meaning. Default is 0.1.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n self.strategy = kwargs.pop('strategy', None)\n self.tolerance = kwargs.pop('tolerance', 0.1)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self, objective_and_grad, init_pars, init_bounds, fixed_vals=None, do_grad=False\n ):\n\n step_sizes = [(b[1] - b[0]) / float(self.steps) for b in init_bounds]\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n step_sizes[index] = 0.0\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0] # noqa: E731\n jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n kwargs = dict(\n fcn=wrapped_objective,\n grad=jac,\n start=init_pars,\n error=step_sizes,\n limit=init_bounds,\n fix=fixed_bools,\n print_level=self.verbose,\n errordef=self.errordef,\n )\n return iminuit.Minuit.from_array_func(**kwargs)\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n return_uncertainties=False,\n options={},\n ):\n\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n maxiter (:obj:`int`): maximum number of iterations. Default is 100000.\n return_uncertainties (:obj:`bool`): Return uncertainties on the fitted parameters. Default is off.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is to configure in response to `do_grad`.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n return_uncertainties = options.pop('return_uncertainties', False)\n # 0: Fast, user-provided gradient\n # 1: Default, no user-provided gradient\n strategy = options.pop(\n 'strategy', self.strategy if self.strategy else not do_grad\n )\n tolerance = options.pop('tolerance', self.tolerance)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options.keys())}.\"\n )\n\n minimizer.strategy = strategy\n minimizer.tol = tolerance\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/22f6ed7146c1d1f3274309656d8c04461dde5ba3/src/iminuit/_minimize.py#L106-L125\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n n = len(x0)\n hess_inv = default_backend.ones((n, n))\n if minimizer.valid:\n # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n minimizer.hesse()\n hess_inv = minimizer.np_covariance()\n\n unc = None\n if return_uncertainties:\n unc = minimizer.np_errors()\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.np_values(),\n unc=unc,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.ncalls,\n njev=minimizer.ngrads,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}, {"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.2.4', 'jaxlib~=0.1.56'],\n 'xmlio': ['uproot3~=3.14'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['lint'] = sorted({'flake8', 'black'})\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n {\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n }\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + [\n 'nbdime',\n 'bump2version',\n 'ipython',\n 'pre-commit',\n 'check-manifest',\n 'codemetapy>=0.3.4',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 3,838 | 493 |
gh_patches_debug_13955
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-3337
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The fetch vat rates button should not be a GET method
### What I'm trying to achieve
Not to allow GET methods to fetch vat rates.
### Steps to reproduce the problem
1. Go to configuration -> Taxes ;
2. The fetch tax rates button, is a GET button.
### What I expected to happen
Get a POST instead of a GET, which is safer against attacks.
### Describe a proposed solution
Drop the button link on the dashboard for a submit button or a modal.
</issue>
<code>
[start of saleor/dashboard/taxes/views.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib import messages
5 from django.contrib.auth.decorators import permission_required
6 from django.core.exceptions import ImproperlyConfigured
7 from django.core.management import call_command
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.response import TemplateResponse
10 from django.utils.translation import pgettext_lazy
11 from django_countries.fields import Country
12 from django_prices_vatlayer.models import VAT
13
14 from ...core import TaxRateType
15 from ...core.utils import get_paginator_items
16 from ...core.utils.taxes import get_taxes_for_country
17 from ...dashboard.taxes.filters import TaxFilter
18 from ...dashboard.taxes.forms import TaxesConfigurationForm
19 from ...dashboard.views import staff_member_required
20
21 logger = logging.getLogger(__name__)
22
23
24 @staff_member_required
25 def tax_list(request):
26 taxes = VAT.objects.order_by('country_code')
27 tax_filter = TaxFilter(request.GET, queryset=taxes)
28 taxes = get_paginator_items(
29 tax_filter.qs, settings.DASHBOARD_PAGINATE_BY, request.GET.get('page'))
30 ctx = {
31 'taxes': taxes, 'filter_set': tax_filter,
32 'is_empty': not tax_filter.queryset.exists()}
33 return TemplateResponse(request, 'dashboard/taxes/list.html', ctx)
34
35
36 @staff_member_required
37 def tax_details(request, country_code):
38 tax = get_object_or_404(VAT, country_code=country_code)
39 tax_rates = get_taxes_for_country(Country(country_code))
40 translations = dict(TaxRateType.CHOICES)
41 tax_rates = [
42 (translations.get(rate_name, rate_name), tax['value'])
43 for rate_name, tax in tax_rates.items()]
44 ctx = {'tax': tax, 'tax_rates': sorted(tax_rates)}
45 return TemplateResponse(request, 'dashboard/taxes/details.html', ctx)
46
47
48 @staff_member_required
49 @permission_required('site.manage_settings')
50 def configure_taxes(request):
51 site_settings = request.site.settings
52 taxes_form = TaxesConfigurationForm(
53 request.POST or None, instance=site_settings)
54 if taxes_form.is_valid():
55 taxes_form.save()
56 msg = pgettext_lazy('Dashboard message', 'Updated taxes settings')
57 messages.success(request, msg)
58 return redirect('dashboard:taxes')
59 ctx = {'site': site_settings, 'taxes_form': taxes_form}
60 return TemplateResponse(request, 'dashboard/taxes/form.html', ctx)
61
62
63 @staff_member_required
64 @permission_required('site.manage_settings')
65 def fetch_tax_rates(request):
66 try:
67 call_command('get_vat_rates')
68 msg = pgettext_lazy(
69 'Dashboard message', 'Tax rates updated successfully')
70 messages.success(request, msg)
71 except ImproperlyConfigured as exc:
72 logger.exception(exc)
73 msg = pgettext_lazy(
74 'Dashboard message',
75 'Could not fetch tax rates. '
76 'Make sure you have supplied a valid API Access Key.<br/>'
77 'Check the server logs for more information about this error.')
78 messages.warning(request, msg)
79 return redirect('dashboard:taxes')
80
[end of saleor/dashboard/taxes/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/dashboard/taxes/views.py b/saleor/dashboard/taxes/views.py
--- a/saleor/dashboard/taxes/views.py
+++ b/saleor/dashboard/taxes/views.py
@@ -8,6 +8,7 @@
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
+from django.views.decorators.http import require_POST
from django_countries.fields import Country
from django_prices_vatlayer.models import VAT
@@ -61,6 +62,7 @@
@staff_member_required
+@require_POST
@permission_required('site.manage_settings')
def fetch_tax_rates(request):
try:
|
{"golden_diff": "diff --git a/saleor/dashboard/taxes/views.py b/saleor/dashboard/taxes/views.py\n--- a/saleor/dashboard/taxes/views.py\n+++ b/saleor/dashboard/taxes/views.py\n@@ -8,6 +8,7 @@\n from django.shortcuts import get_object_or_404, redirect\n from django.template.response import TemplateResponse\n from django.utils.translation import pgettext_lazy\n+from django.views.decorators.http import require_POST\n from django_countries.fields import Country\n from django_prices_vatlayer.models import VAT\n \n@@ -61,6 +62,7 @@\n \n \n @staff_member_required\n+@require_POST\n @permission_required('site.manage_settings')\n def fetch_tax_rates(request):\n try:\n", "issue": "The fetch vat rates button should not be a GET method\n### What I'm trying to achieve\r\nNot to allow GET methods to fetch vat rates.\r\n\r\n### Steps to reproduce the problem\r\n1. Go to configuration -> Taxes ;\r\n2. The fetch tax rates button, is a GET button.\r\n\r\n### What I expected to happen\r\nGet a POST instead of a GET, which is safer against attacks.\r\n\r\n### Describe a proposed solution\r\nDrop the button link on the dashboard for a submit button or a modal.\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.management import call_command\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import pgettext_lazy\nfrom django_countries.fields import Country\nfrom django_prices_vatlayer.models import VAT\n\nfrom ...core import TaxRateType\nfrom ...core.utils import get_paginator_items\nfrom ...core.utils.taxes import get_taxes_for_country\nfrom ...dashboard.taxes.filters import TaxFilter\nfrom ...dashboard.taxes.forms import TaxesConfigurationForm\nfrom ...dashboard.views import staff_member_required\n\nlogger = logging.getLogger(__name__)\n\n\n@staff_member_required\ndef tax_list(request):\n taxes = VAT.objects.order_by('country_code')\n tax_filter = TaxFilter(request.GET, queryset=taxes)\n taxes = get_paginator_items(\n tax_filter.qs, settings.DASHBOARD_PAGINATE_BY, request.GET.get('page'))\n ctx = {\n 'taxes': taxes, 'filter_set': tax_filter,\n 'is_empty': not tax_filter.queryset.exists()}\n return TemplateResponse(request, 'dashboard/taxes/list.html', ctx)\n\n\n@staff_member_required\ndef tax_details(request, country_code):\n tax = get_object_or_404(VAT, country_code=country_code)\n tax_rates = get_taxes_for_country(Country(country_code))\n translations = dict(TaxRateType.CHOICES)\n tax_rates = [\n (translations.get(rate_name, rate_name), tax['value'])\n for rate_name, tax in tax_rates.items()]\n ctx = {'tax': tax, 'tax_rates': sorted(tax_rates)}\n return TemplateResponse(request, 'dashboard/taxes/details.html', ctx)\n\n\n@staff_member_required\n@permission_required('site.manage_settings')\ndef configure_taxes(request):\n site_settings = request.site.settings\n taxes_form = TaxesConfigurationForm(\n request.POST or None, instance=site_settings)\n if taxes_form.is_valid():\n taxes_form.save()\n msg = pgettext_lazy('Dashboard message', 'Updated taxes settings')\n messages.success(request, msg)\n return redirect('dashboard:taxes')\n ctx = {'site': site_settings, 'taxes_form': taxes_form}\n return TemplateResponse(request, 'dashboard/taxes/form.html', ctx)\n\n\n@staff_member_required\n@permission_required('site.manage_settings')\ndef fetch_tax_rates(request):\n try:\n call_command('get_vat_rates')\n msg = pgettext_lazy(\n 'Dashboard message', 'Tax rates updated successfully')\n messages.success(request, msg)\n except ImproperlyConfigured as exc:\n logger.exception(exc)\n msg = pgettext_lazy(\n 'Dashboard message',\n 'Could not fetch tax rates. '\n 'Make sure you have supplied a valid API Access Key.<br/>'\n 'Check the server logs for more information about this error.')\n messages.warning(request, msg)\n return redirect('dashboard:taxes')\n", "path": "saleor/dashboard/taxes/views.py"}]}
| 1,449 | 152 |
gh_patches_debug_8446
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-18583
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extractor for yourporn.sexy is broken
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)
- Use the *Preview* tab to see what your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.03*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.03**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
```
$ youtube-dl -v https://yourporn.sexy/post/5bf56573616c2.html
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: [u'-v', u'https://yourporn.sexy/post/5bf56573616c2.html']
[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8
[debug] youtube-dl version 2018.12.03
[debug] Python version 2.7.10 (CPython) - Darwin-17.7.0-x86_64-i386-64bit
[debug] exe versions: ffmpeg 4.0.2, ffprobe 4.0.2
[debug] Proxy map: {}
[YourPorn] 5bf56573616c2: Downloading webpage
[debug] Default format spec: bestvideo+bestaudio/best
[debug] Invoking downloader on u'https://yourporn.sexy/cdn/c11/ldjJi9usRy26gVwhgzEn9w/1544086469/hk5sajembx0dd41hcp09ah8m3s2/25qb3fr5d605l7m316y1969c42k.mp4'
ERROR: Did not get any data blocks
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 162, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/Users/v-delta/.local/bin/youtube-dl/__main__.py", line 19, in <module>
youtube_dl.main()
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/__init__.py", line 472, in main
_real_main(argv)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/__init__.py", line 462, in _real_main
retcode = ydl.download(all_urls)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 2001, in download
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 803, in extract_info
return self.process_ie_result(ie_result, download, extra_info)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 857, in process_ie_result
return self.process_video_result(ie_result, download=download)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1635, in process_video_result
self.process_info(new_info)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1908, in process_info
success = dl(filename, info_dict)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 1847, in dl
return fd.download(name, info)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/common.py", line 364, in download
return self.real_download(filename, info_dict)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/http.py", line 342, in real_download
return download()
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/http.py", line 312, in download
self.report_error('Did not get any data blocks')
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/common.py", line 165, in report_error
self.ydl.report_error(*args, **kargs)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 620, in report_error
self.trouble(error_message, tb)
File "/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 582, in trouble
tb_data = traceback.format_list(traceback.extract_stack())
```
### Description of your *issue*, suggested solution and other information
The videos play fine in any browser, that is because somewehre the URL the extractor delivers is changed from
```
https://yourporn.sexy/cdn/c11/tlRIwnitpU4dxFtCUK1OMQ/1544087142/fx5xahe3b40kda1sc709a98q342/e51bafd56655a7m356z1s6tcv2i.mp4
```
to
```
https://yourporn.sexy/cdn2/c11/tlRIwnitpU4dxFtCUK1OMQ/1544087142/fx5xahe3b40kda1sc709a98q342/e51bafd56655a7m356z1s6tcv2i.mp4
```
A `2` is inserted after `/cdn`. I will create a pull request fixing this bug soon.
</issue>
<code>
[start of youtube_dl/extractor/yourporn.py]
1 from __future__ import unicode_literals
2
3 from .common import InfoExtractor
4 from ..utils import urljoin
5
6
7 class YourPornIE(InfoExtractor):
8 _VALID_URL = r'https?://(?:www\.)?yourporn\.sexy/post/(?P<id>[^/?#&.]+)'
9 _TEST = {
10 'url': 'https://yourporn.sexy/post/57ffcb2e1179b.html',
11 'md5': '6f8682b6464033d87acaa7a8ff0c092e',
12 'info_dict': {
13 'id': '57ffcb2e1179b',
14 'ext': 'mp4',
15 'title': 'md5:c9f43630bd968267672651ba905a7d35',
16 'thumbnail': r're:^https?://.*\.jpg$',
17 },
18 }
19
20 def _real_extract(self, url):
21 video_id = self._match_id(url)
22
23 webpage = self._download_webpage(url, video_id)
24
25 video_url = urljoin(url, self._parse_json(
26 self._search_regex(
27 r'data-vnfo=(["\'])(?P<data>{.+?})\1', webpage, 'data info',
28 group='data'),
29 video_id)[video_id]).replace('/cdn/', '/cdn2/')
30
31 title = (self._search_regex(
32 r'<[^>]+\bclass=["\']PostEditTA[^>]+>([^<]+)', webpage, 'title',
33 default=None) or self._og_search_description(webpage)).strip()
34 thumbnail = self._og_search_thumbnail(webpage)
35
36 return {
37 'id': video_id,
38 'url': video_url,
39 'title': title,
40 'thumbnail': thumbnail,
41 }
42
[end of youtube_dl/extractor/yourporn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/youtube_dl/extractor/yourporn.py b/youtube_dl/extractor/yourporn.py
--- a/youtube_dl/extractor/yourporn.py
+++ b/youtube_dl/extractor/yourporn.py
@@ -26,7 +26,7 @@
self._search_regex(
r'data-vnfo=(["\'])(?P<data>{.+?})\1', webpage, 'data info',
group='data'),
- video_id)[video_id]).replace('/cdn/', '/cdn2/')
+ video_id)[video_id]).replace('/cdn/', '/cdn3/')
title = (self._search_regex(
r'<[^>]+\bclass=["\']PostEditTA[^>]+>([^<]+)', webpage, 'title',
|
{"golden_diff": "diff --git a/youtube_dl/extractor/yourporn.py b/youtube_dl/extractor/yourporn.py\n--- a/youtube_dl/extractor/yourporn.py\n+++ b/youtube_dl/extractor/yourporn.py\n@@ -26,7 +26,7 @@\n self._search_regex(\n r'data-vnfo=([\"\\'])(?P<data>{.+?})\\1', webpage, 'data info',\n group='data'),\n- video_id)[video_id]).replace('/cdn/', '/cdn2/')\n+ video_id)[video_id]).replace('/cdn/', '/cdn3/')\n \n title = (self._search_regex(\n r'<[^>]+\\bclass=[\"\\']PostEditTA[^>]+>([^<]+)', webpage, 'title',\n", "issue": "Extractor for yourporn.sexy is broken\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like this: `[x]`)\r\n- Use the *Preview* tab to see what your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2018.12.03*. If it's not, read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2018.12.03**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through the [README](https://github.com/rg3/youtube-dl/blob/master/README.md), **most notably** the [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n- [x] Checked that provided video/audio/playlist URLs (if any) are alive and playable in a browser\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n```\r\n$ youtube-dl -v https://yourporn.sexy/post/5bf56573616c2.html\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: [u'-v', u'https://yourporn.sexy/post/5bf56573616c2.html']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out UTF-8, pref UTF-8\r\n[debug] youtube-dl version 2018.12.03\r\n[debug] Python version 2.7.10 (CPython) - Darwin-17.7.0-x86_64-i386-64bit\r\n[debug] exe versions: ffmpeg 4.0.2, ffprobe 4.0.2\r\n[debug] Proxy map: {}\r\n[YourPorn] 5bf56573616c2: Downloading webpage\r\n[debug] Default format spec: bestvideo+bestaudio/best\r\n[debug] Invoking downloader on u'https://yourporn.sexy/cdn/c11/ldjJi9usRy26gVwhgzEn9w/1544086469/hk5sajembx0dd41hcp09ah8m3s2/25qb3fr5d605l7m316y1969c42k.mp4'\r\n\r\n\r\nERROR: Did not get any data blocks\r\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 162, in _run_module_as_main\r\n \"__main__\", fname, loader, pkg_name)\r\n File \"/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/runpy.py\", line 72, in _run_code\r\n exec code in run_globals\r\n File \"/Users/v-delta/.local/bin/youtube-dl/__main__.py\", line 19, in <module>\r\n youtube_dl.main()\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/__init__.py\", line 472, in main\r\n _real_main(argv)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/__init__.py\", line 462, in _real_main\r\n retcode = ydl.download(all_urls)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 2001, in download\r\n url, force_generic_extractor=self.params.get('force_generic_extractor', False))\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 803, in extract_info\r\n return self.process_ie_result(ie_result, download, extra_info)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 857, in process_ie_result\r\n return self.process_video_result(ie_result, download=download)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1635, in process_video_result\r\n self.process_info(new_info)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1908, in process_info\r\n success = dl(filename, info_dict)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 1847, in dl\r\n return fd.download(name, info)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/common.py\", line 364, in download\r\n return self.real_download(filename, info_dict)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/http.py\", line 342, in real_download\r\n return download()\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/http.py\", line 312, in download\r\n self.report_error('Did not get any data blocks')\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/downloader/common.py\", line 165, in report_error\r\n self.ydl.report_error(*args, **kargs)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 620, in report_error\r\n self.trouble(error_message, tb)\r\n File \"/Users/v-delta/.local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 582, in trouble\r\n tb_data = traceback.format_list(traceback.extract_stack())\r\n```\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nThe videos play fine in any browser, that is because somewehre the URL the extractor delivers is changed from\r\n\r\n```\r\nhttps://yourporn.sexy/cdn/c11/tlRIwnitpU4dxFtCUK1OMQ/1544087142/fx5xahe3b40kda1sc709a98q342/e51bafd56655a7m356z1s6tcv2i.mp4\r\n```\r\n\r\nto\r\n\r\n```\r\nhttps://yourporn.sexy/cdn2/c11/tlRIwnitpU4dxFtCUK1OMQ/1544087142/fx5xahe3b40kda1sc709a98q342/e51bafd56655a7m356z1s6tcv2i.mp4\r\n```\r\n\r\nA `2` is inserted after `/cdn`. I will create a pull request fixing this bug soon.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import urljoin\n\n\nclass YourPornIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?yourporn\\.sexy/post/(?P<id>[^/?#&.]+)'\n _TEST = {\n 'url': 'https://yourporn.sexy/post/57ffcb2e1179b.html',\n 'md5': '6f8682b6464033d87acaa7a8ff0c092e',\n 'info_dict': {\n 'id': '57ffcb2e1179b',\n 'ext': 'mp4',\n 'title': 'md5:c9f43630bd968267672651ba905a7d35',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(url, video_id)\n\n video_url = urljoin(url, self._parse_json(\n self._search_regex(\n r'data-vnfo=([\"\\'])(?P<data>{.+?})\\1', webpage, 'data info',\n group='data'),\n video_id)[video_id]).replace('/cdn/', '/cdn2/')\n\n title = (self._search_regex(\n r'<[^>]+\\bclass=[\"\\']PostEditTA[^>]+>([^<]+)', webpage, 'title',\n default=None) or self._og_search_description(webpage)).strip()\n thumbnail = self._og_search_thumbnail(webpage)\n\n return {\n 'id': video_id,\n 'url': video_url,\n 'title': title,\n 'thumbnail': thumbnail,\n }\n", "path": "youtube_dl/extractor/yourporn.py"}]}
| 2,760 | 172 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.