problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_5568
|
rasdani/github-patches
|
git_diff
|
pyodide__pyodide-689
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo?
Wondering what "wv" means or if this should read "more complex things."
https://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79
</issue>
<code>
[start of pyodide_build/mkpkg.py]
1 #!/usr/bin/env python3
2
3 import argparse
4 import json
5 import os
6 import shutil
7 import urllib.request
8 from pathlib import Path
9
10 PACKAGES_ROOT = Path(__file__).parent.parent / 'packages'
11
12 SDIST_EXTENSIONS = []
13
14
15 def get_sdist_extensions():
16 if SDIST_EXTENSIONS:
17 return SDIST_EXTENSIONS
18
19 for format in shutil.get_unpack_formats():
20 for ext in format[1]:
21 SDIST_EXTENSIONS.append(ext)
22
23 return SDIST_EXTENSIONS
24
25
26 def get_sdist_url_entry(json_content):
27 sdist_extensions_tuple = tuple(get_sdist_extensions())
28
29 for entry in json_content['urls']:
30 if entry['filename'].endswith(sdist_extensions_tuple):
31 return entry
32
33 raise Exception('No sdist URL found for package %s (%s)' % (
34 json_content['info'].get('name'),
35 json_content['info'].get('package_url'),
36 ))
37
38
39 def make_package(package, version=None):
40 import yaml
41
42 version = ('/' + version) if version is not None else ''
43 url = f"https://pypi.org/pypi/{package}{version}/json"
44
45 with urllib.request.urlopen(url) as fd:
46 json_content = json.load(fd)
47
48 entry = get_sdist_url_entry(json_content)
49 download_url = entry['url']
50 sha256 = entry['digests']['sha256']
51 version = json_content['info']['version']
52
53 yaml_content = {
54 'package': {
55 'name': package,
56 'version': version
57 },
58 'source': {
59 'url': download_url,
60 'sha256': sha256
61 },
62 'test': {
63 'imports': [
64 package
65 ]
66 }
67 }
68
69 if not (PACKAGES_ROOT / package).is_dir():
70 os.makedirs(PACKAGES_ROOT / package)
71 with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:
72 yaml.dump(yaml_content, fd, default_flow_style=False)
73
74
75 def make_parser(parser):
76 parser.description = '''
77 Make a new pyodide package. Creates a simple template that will work
78 for most pure Python packages, but will have to be edited for more wv
79 complex things.'''.strip()
80 parser.add_argument(
81 'package', type=str, nargs=1,
82 help="The package name on PyPI")
83 parser.add_argument(
84 '--version', type=str, default=None,
85 help="Package version string, "
86 "e.g. v1.2.1 (defaults to latest stable release)")
87 return parser
88
89
90 def main(args):
91 package = args.package[0]
92 make_package(package, args.version)
93
94
95 if __name__ == '__main__':
96 parser = make_parser(argparse.ArgumentParser())
97 args = parser.parse_args()
98 main(args)
99
[end of pyodide_build/mkpkg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py
--- a/pyodide_build/mkpkg.py
+++ b/pyodide_build/mkpkg.py
@@ -75,7 +75,7 @@
def make_parser(parser):
parser.description = '''
Make a new pyodide package. Creates a simple template that will work
-for most pure Python packages, but will have to be edited for more wv
+for most pure Python packages, but will have to be edited for more
complex things.'''.strip()
parser.add_argument(
'package', type=str, nargs=1,
|
{"golden_diff": "diff --git a/pyodide_build/mkpkg.py b/pyodide_build/mkpkg.py\n--- a/pyodide_build/mkpkg.py\n+++ b/pyodide_build/mkpkg.py\n@@ -75,7 +75,7 @@\n def make_parser(parser):\n parser.description = '''\n Make a new pyodide package. Creates a simple template that will work\n-for most pure Python packages, but will have to be edited for more wv\n+for most pure Python packages, but will have to be edited for more\n complex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n", "issue": "Typo?\nWondering what \"wv\" means or if this should read \"more complex things.\"\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/163ab43b64180223d010cdcdcdecd17307cc5a45/pyodide_build/mkpkg.py#L77-L79\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport os\nimport shutil\nimport urllib.request\nfrom pathlib import Path\n\nPACKAGES_ROOT = Path(__file__).parent.parent / 'packages'\n\nSDIST_EXTENSIONS = []\n\n\ndef get_sdist_extensions():\n if SDIST_EXTENSIONS:\n return SDIST_EXTENSIONS\n\n for format in shutil.get_unpack_formats():\n for ext in format[1]:\n SDIST_EXTENSIONS.append(ext)\n\n return SDIST_EXTENSIONS\n\n\ndef get_sdist_url_entry(json_content):\n sdist_extensions_tuple = tuple(get_sdist_extensions())\n\n for entry in json_content['urls']:\n if entry['filename'].endswith(sdist_extensions_tuple):\n return entry\n\n raise Exception('No sdist URL found for package %s (%s)' % (\n json_content['info'].get('name'),\n json_content['info'].get('package_url'),\n ))\n\n\ndef make_package(package, version=None):\n import yaml\n\n version = ('/' + version) if version is not None else ''\n url = f\"https://pypi.org/pypi/{package}{version}/json\"\n\n with urllib.request.urlopen(url) as fd:\n json_content = json.load(fd)\n\n entry = get_sdist_url_entry(json_content)\n download_url = entry['url']\n sha256 = entry['digests']['sha256']\n version = json_content['info']['version']\n\n yaml_content = {\n 'package': {\n 'name': package,\n 'version': version\n },\n 'source': {\n 'url': download_url,\n 'sha256': sha256\n },\n 'test': {\n 'imports': [\n package\n ]\n }\n }\n\n if not (PACKAGES_ROOT / package).is_dir():\n os.makedirs(PACKAGES_ROOT / package)\n with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:\n yaml.dump(yaml_content, fd, default_flow_style=False)\n\n\ndef make_parser(parser):\n parser.description = '''\nMake a new pyodide package. Creates a simple template that will work\nfor most pure Python packages, but will have to be edited for more wv\ncomplex things.'''.strip()\n parser.add_argument(\n 'package', type=str, nargs=1,\n help=\"The package name on PyPI\")\n parser.add_argument(\n '--version', type=str, default=None,\n help=\"Package version string, \"\n \"e.g. v1.2.1 (defaults to latest stable release)\")\n return parser\n\n\ndef main(args):\n package = args.package[0]\n make_package(package, args.version)\n\n\nif __name__ == '__main__':\n parser = make_parser(argparse.ArgumentParser())\n args = parser.parse_args()\n main(args)\n", "path": "pyodide_build/mkpkg.py"}]}
| 1,424 | 137 |
gh_patches_debug_30846
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-1877
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider tmobile_us is broken
During the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))
</issue>
<code>
[start of locations/spiders/tmobile_us.py]
1 # -*- coding: utf-8 -*-
2 import json
3 from urllib.parse import urlencode
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10 DAY_MAPPING = {'Monday': 'Mo',
11 'Tuesday': 'Tu',
12 'Wednesday': 'We',
13 'Thursday': 'Th',
14 'Friday': 'Fr',
15 'Saturday': 'Sa',
16 'Sunday': 'Su'}
17
18 BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
19
20
21 class TMobileUSSpider(scrapy.Spider):
22 name = "tmobile_us"
23 item_attributes = { 'brand': "T-Mobile" }
24 allowed_domains = ["www.t-mobile.com"]
25 download_delay = 0.2
26
27 def parse_hours(self, store_hours):
28 opening_hours = OpeningHours()
29 if store_hours is None:
30 return
31
32 for store_day in store_hours:
33 day = DAY_MAPPING[store_day.get("day")]
34 open_time = store_day.get("opens")
35 close_time = store_day.get("closes")
36 if open_time is None and close_time is None:
37 continue
38 opening_hours.add_range(day=day,
39 open_time=open_time,
40 close_time=close_time,
41 time_format='%H:%M'
42 )
43
44 return opening_hours.as_opening_hours()
45
46 def start_requests(self):
47 url = BASE_URL
48
49 with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:
50
51 next(points) # Ignore the header
52 for point in points:
53 _, lat, lon = point.strip().split(',')
54
55 params = {
56 'latitude': '{}'.format(lat),
57 'longitude': '{}'.format(lon),
58 'count': '1000',
59 'radius': '25',
60 'ignoreLoadingBar': 'false'
61 }
62
63 yield scrapy.http.Request(url + urlencode(params), callback=self.parse)
64
65 def parse(self, response):
66 data = json.loads(response.body_as_unicode())
67
68 for store in data:
69 properties = {
70 'name': store["name"],
71 'ref': store["id"],
72 'addr_full': store["location"]["address"]["streetAddress"],
73 'city': store["location"]["address"]["addressLocality"],
74 'state': store["location"]["address"]["addressRegion"],
75 'postcode': store["location"]["address"]["postalCode"],
76 'phone': store.get("telephone"),
77 'website': store.get("url") or response.url,
78 'lat': float(store["location"]["latitude"]),
79 'lon': float(store["location"]["longitude"]),
80 }
81
82 hours = self.parse_hours(store.get("hours", []))
83 if hours:
84 properties["opening_hours"] = hours
85
86 yield GeojsonPointItem(**properties)
87
[end of locations/spiders/tmobile_us.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py
--- a/locations/spiders/tmobile_us.py
+++ b/locations/spiders/tmobile_us.py
@@ -15,12 +15,12 @@
'Saturday': 'Sa',
'Sunday': 'Su'}
-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'
+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'
class TMobileUSSpider(scrapy.Spider):
name = "tmobile_us"
- item_attributes = { 'brand': "T-Mobile" }
+ item_attributes = {'brand': "T-Mobile"}
allowed_domains = ["www.t-mobile.com"]
download_delay = 0.2
@@ -67,7 +67,7 @@
for store in data:
properties = {
- 'name': store["name"],
+ 'name': store.get("name"),
'ref': store["id"],
'addr_full': store["location"]["address"]["streetAddress"],
'city': store["location"]["address"]["addressLocality"],
@@ -75,8 +75,8 @@
'postcode': store["location"]["address"]["postalCode"],
'phone': store.get("telephone"),
'website': store.get("url") or response.url,
- 'lat': float(store["location"]["latitude"]),
- 'lon': float(store["location"]["longitude"]),
+ 'lat': store["location"]["latitude"],
+ 'lon': store["location"]["longitude"],
}
hours = self.parse_hours(store.get("hours", []))
|
{"golden_diff": "diff --git a/locations/spiders/tmobile_us.py b/locations/spiders/tmobile_us.py\n--- a/locations/spiders/tmobile_us.py\n+++ b/locations/spiders/tmobile_us.py\n@@ -15,12 +15,12 @@\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n \n-BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n+BASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/v2.1/getStoresByCoordinates?'\n \n \n class TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n- item_attributes = { 'brand': \"T-Mobile\" }\n+ item_attributes = {'brand': \"T-Mobile\"}\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n \n@@ -67,7 +67,7 @@\n \n for store in data:\n properties = {\n- 'name': store[\"name\"],\n+ 'name': store.get(\"name\"),\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n@@ -75,8 +75,8 @@\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n- 'lat': float(store[\"location\"][\"latitude\"]),\n- 'lon': float(store[\"location\"][\"longitude\"]),\n+ 'lat': store[\"location\"][\"latitude\"],\n+ 'lon': store[\"location\"][\"longitude\"],\n }\n \n hours = self.parse_hours(store.get(\"hours\", []))\n", "issue": "Spider tmobile_us is broken\nDuring the global build at 2021-05-26-14-42-23, spider **tmobile_us** failed with **7356 features** and **2 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/tmobile_us.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/tmobile_us.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nDAY_MAPPING = {'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\nBASE_URL = 'https://onmyj41p3c.execute-api.us-west-2.amazonaws.com/prod/getStoresByCoordinates?'\n\n\nclass TMobileUSSpider(scrapy.Spider):\n name = \"tmobile_us\"\n item_attributes = { 'brand': \"T-Mobile\" }\n allowed_domains = [\"www.t-mobile.com\"]\n download_delay = 0.2\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n if store_hours is None:\n return\n\n for store_day in store_hours:\n day = DAY_MAPPING[store_day.get(\"day\")]\n open_time = store_day.get(\"opens\")\n close_time = store_day.get(\"closes\")\n if open_time is None and close_time is None:\n continue\n opening_hours.add_range(day=day,\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M'\n )\n\n return opening_hours.as_opening_hours()\n\n def start_requests(self):\n url = BASE_URL\n\n with open('./locations/searchable_points/us_centroids_25mile_radius.csv') as points:\n\n next(points) # Ignore the header\n for point in points:\n _, lat, lon = point.strip().split(',')\n\n params = {\n 'latitude': '{}'.format(lat),\n 'longitude': '{}'.format(lon),\n 'count': '1000',\n 'radius': '25',\n 'ignoreLoadingBar': 'false'\n }\n\n yield scrapy.http.Request(url + urlencode(params), callback=self.parse)\n\n def parse(self, response):\n data = json.loads(response.body_as_unicode())\n\n for store in data:\n properties = {\n 'name': store[\"name\"],\n 'ref': store[\"id\"],\n 'addr_full': store[\"location\"][\"address\"][\"streetAddress\"],\n 'city': store[\"location\"][\"address\"][\"addressLocality\"],\n 'state': store[\"location\"][\"address\"][\"addressRegion\"],\n 'postcode': store[\"location\"][\"address\"][\"postalCode\"],\n 'phone': store.get(\"telephone\"),\n 'website': store.get(\"url\") or response.url,\n 'lat': float(store[\"location\"][\"latitude\"]),\n 'lon': float(store[\"location\"][\"longitude\"]),\n }\n\n hours = self.parse_hours(store.get(\"hours\", []))\n if hours:\n properties[\"opening_hours\"] = hours\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/tmobile_us.py"}]}
| 1,505 | 392 |
gh_patches_debug_16999
|
rasdani/github-patches
|
git_diff
|
microsoft__torchgeo-939
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Trainers: predict step
### Summary
This issue is to track progress on adding a `predict` step to all Trainers.
- [x] BYOLTask (#819)
- [x] ClassificationTask (#790)
- [x] MultiLabelClassificationTask (#792)
- [x] ObjectDetectionTask (#758)
- [x] RegressionTask (#818)
- [x] SemanticSegmentationTask (#939)
### Rationale
The default `predict` step does not know how to handle our batch dicts.
### Implementation
See implementations that have already been finished.
### Alternatives
_No response_
### Additional information
_No response_
</issue>
<code>
[start of torchgeo/trainers/segmentation.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Segmentation tasks."""
5
6 import warnings
7 from typing import Any, Dict, cast
8
9 import matplotlib.pyplot as plt
10 import pytorch_lightning as pl
11 import segmentation_models_pytorch as smp
12 import torch
13 import torch.nn as nn
14 from torch import Tensor
15 from torch.optim.lr_scheduler import ReduceLROnPlateau
16 from torch.utils.data import DataLoader
17 from torchmetrics import MetricCollection
18 from torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex
19
20 from ..datasets.utils import unbind_samples
21 from ..models import FCN
22
23 # https://github.com/pytorch/pytorch/issues/60979
24 # https://github.com/pytorch/pytorch/pull/61045
25 DataLoader.__module__ = "torch.utils.data"
26
27
28 class SemanticSegmentationTask(pl.LightningModule):
29 """LightningModule for semantic segmentation of images.
30
31 Supports `Segmentation Models Pytorch
32 <https://github.com/qubvel/segmentation_models.pytorch>`_
33 as an architecture choice in combination with any of these
34 `TIMM encoders <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_.
35 """
36
37 def config_task(self) -> None:
38 """Configures the task based on kwargs parameters passed to the constructor."""
39 if self.hyperparams["segmentation_model"] == "unet":
40 self.model = smp.Unet(
41 encoder_name=self.hyperparams["encoder_name"],
42 encoder_weights=self.hyperparams["encoder_weights"],
43 in_channels=self.hyperparams["in_channels"],
44 classes=self.hyperparams["num_classes"],
45 )
46 elif self.hyperparams["segmentation_model"] == "deeplabv3+":
47 self.model = smp.DeepLabV3Plus(
48 encoder_name=self.hyperparams["encoder_name"],
49 encoder_weights=self.hyperparams["encoder_weights"],
50 in_channels=self.hyperparams["in_channels"],
51 classes=self.hyperparams["num_classes"],
52 )
53 elif self.hyperparams["segmentation_model"] == "fcn":
54 self.model = FCN(
55 in_channels=self.hyperparams["in_channels"],
56 classes=self.hyperparams["num_classes"],
57 num_filters=self.hyperparams["num_filters"],
58 )
59 else:
60 raise ValueError(
61 f"Model type '{self.hyperparams['segmentation_model']}' is not valid. "
62 f"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'."
63 )
64
65 if self.hyperparams["loss"] == "ce":
66 ignore_value = -1000 if self.ignore_index is None else self.ignore_index
67 self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)
68 elif self.hyperparams["loss"] == "jaccard":
69 self.loss = smp.losses.JaccardLoss(
70 mode="multiclass", classes=self.hyperparams["num_classes"]
71 )
72 elif self.hyperparams["loss"] == "focal":
73 self.loss = smp.losses.FocalLoss(
74 "multiclass", ignore_index=self.ignore_index, normalized=True
75 )
76 else:
77 raise ValueError(
78 f"Loss type '{self.hyperparams['loss']}' is not valid. "
79 f"Currently, supports 'ce', 'jaccard' or 'focal' loss."
80 )
81
82 def __init__(self, **kwargs: Any) -> None:
83 """Initialize the LightningModule with a model and loss function.
84
85 Keyword Args:
86 segmentation_model: Name of the segmentation model type to use
87 encoder_name: Name of the encoder model backbone to use
88 encoder_weights: None or "imagenet" to use imagenet pretrained weights in
89 the encoder model
90 in_channels: Number of channels in input image
91 num_classes: Number of semantic classes to predict
92 loss: Name of the loss function
93 ignore_index: Optional integer class index to ignore in the loss and metrics
94 learning_rate: Learning rate for optimizer
95 learning_rate_schedule_patience: Patience for learning rate scheduler
96
97 Raises:
98 ValueError: if kwargs arguments are invalid
99
100 .. versionchanged:: 0.3
101 The *ignore_zeros* parameter was renamed to *ignore_index*.
102 """
103 super().__init__()
104
105 # Creates `self.hparams` from kwargs
106 self.save_hyperparameters() # type: ignore[operator]
107 self.hyperparams = cast(Dict[str, Any], self.hparams)
108
109 if not isinstance(kwargs["ignore_index"], (int, type(None))):
110 raise ValueError("ignore_index must be an int or None")
111 if (kwargs["ignore_index"] is not None) and (kwargs["loss"] == "jaccard"):
112 warnings.warn(
113 "ignore_index has no effect on training when loss='jaccard'",
114 UserWarning,
115 )
116 self.ignore_index = kwargs["ignore_index"]
117 self.config_task()
118
119 self.train_metrics = MetricCollection(
120 [
121 MulticlassAccuracy(
122 num_classes=self.hyperparams["num_classes"],
123 ignore_index=self.ignore_index,
124 mdmc_average="global",
125 ),
126 MulticlassJaccardIndex(
127 num_classes=self.hyperparams["num_classes"],
128 ignore_index=self.ignore_index,
129 ),
130 ],
131 prefix="train_",
132 )
133 self.val_metrics = self.train_metrics.clone(prefix="val_")
134 self.test_metrics = self.train_metrics.clone(prefix="test_")
135
136 def forward(self, *args: Any, **kwargs: Any) -> Any:
137 """Forward pass of the model.
138
139 Args:
140 x: tensor of data to run through the model
141
142 Returns:
143 output from the model
144 """
145 return self.model(*args, **kwargs)
146
147 def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
148 """Compute and return the training loss.
149
150 Args:
151 batch: the output of your DataLoader
152
153 Returns:
154 training loss
155 """
156 batch = args[0]
157 x = batch["image"]
158 y = batch["mask"]
159 y_hat = self(x)
160 y_hat_hard = y_hat.argmax(dim=1)
161
162 loss = self.loss(y_hat, y)
163
164 # by default, the train step logs every `log_every_n_steps` steps where
165 # `log_every_n_steps` is a parameter to the `Trainer` object
166 self.log("train_loss", loss, on_step=True, on_epoch=False)
167 self.train_metrics(y_hat_hard, y)
168
169 return cast(Tensor, loss)
170
171 def training_epoch_end(self, outputs: Any) -> None:
172 """Logs epoch level training metrics.
173
174 Args:
175 outputs: list of items returned by training_step
176 """
177 self.log_dict(self.train_metrics.compute())
178 self.train_metrics.reset()
179
180 def validation_step(self, *args: Any, **kwargs: Any) -> None:
181 """Compute validation loss and log example predictions.
182
183 Args:
184 batch: the output of your DataLoader
185 batch_idx: the index of this batch
186 """
187 batch = args[0]
188 batch_idx = args[1]
189 x = batch["image"]
190 y = batch["mask"]
191 y_hat = self(x)
192 y_hat_hard = y_hat.argmax(dim=1)
193
194 loss = self.loss(y_hat, y)
195
196 self.log("val_loss", loss, on_step=False, on_epoch=True)
197 self.val_metrics(y_hat_hard, y)
198
199 if batch_idx < 10:
200 try:
201 datamodule = self.trainer.datamodule # type: ignore[attr-defined]
202 batch["prediction"] = y_hat_hard
203 for key in ["image", "mask", "prediction"]:
204 batch[key] = batch[key].cpu()
205 sample = unbind_samples(batch)[0]
206 fig = datamodule.plot(sample)
207 summary_writer = self.logger.experiment # type: ignore[union-attr]
208 summary_writer.add_figure(
209 f"image/{batch_idx}", fig, global_step=self.global_step
210 )
211 plt.close()
212 except AttributeError:
213 pass
214
215 def validation_epoch_end(self, outputs: Any) -> None:
216 """Logs epoch level validation metrics.
217
218 Args:
219 outputs: list of items returned by validation_step
220 """
221 self.log_dict(self.val_metrics.compute())
222 self.val_metrics.reset()
223
224 def test_step(self, *args: Any, **kwargs: Any) -> None:
225 """Compute test loss.
226
227 Args:
228 batch: the output of your DataLoader
229 """
230 batch = args[0]
231 x = batch["image"]
232 y = batch["mask"]
233 y_hat = self(x)
234 y_hat_hard = y_hat.argmax(dim=1)
235
236 loss = self.loss(y_hat, y)
237
238 # by default, the test and validation steps only log per *epoch*
239 self.log("test_loss", loss, on_step=False, on_epoch=True)
240 self.test_metrics(y_hat_hard, y)
241
242 def test_epoch_end(self, outputs: Any) -> None:
243 """Logs epoch level test metrics.
244
245 Args:
246 outputs: list of items returned by test_step
247 """
248 self.log_dict(self.test_metrics.compute())
249 self.test_metrics.reset()
250
251 def configure_optimizers(self) -> Dict[str, Any]:
252 """Initialize the optimizer and learning rate scheduler.
253
254 Returns:
255 a "lr dict" according to the pytorch lightning documentation --
256 https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
257 """
258 optimizer = torch.optim.Adam(
259 self.model.parameters(), lr=self.hyperparams["learning_rate"]
260 )
261 return {
262 "optimizer": optimizer,
263 "lr_scheduler": {
264 "scheduler": ReduceLROnPlateau(
265 optimizer,
266 patience=self.hyperparams["learning_rate_schedule_patience"],
267 ),
268 "monitor": "val_loss",
269 },
270 }
271
[end of torchgeo/trainers/segmentation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py
--- a/torchgeo/trainers/segmentation.py
+++ b/torchgeo/trainers/segmentation.py
@@ -248,6 +248,25 @@
self.log_dict(self.test_metrics.compute())
self.test_metrics.reset()
+ def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:
+ """Compute and return the predictions.
+
+ By default, this will loop over images in a dataloader and aggregate
+ predictions into a list. This may not be desirable if you have many images
+ or large images which could cause out of memory errors. In this case
+ it's recommended to override this with a custom predict_step.
+
+ Args:
+ batch: the output of your DataLoader
+
+ Returns:
+ predicted softmax probabilities
+ """
+ batch = args[0]
+ x = batch["image"]
+ y_hat: Tensor = self(x).softmax(dim=1)
+ return y_hat
+
def configure_optimizers(self) -> Dict[str, Any]:
"""Initialize the optimizer and learning rate scheduler.
|
{"golden_diff": "diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py\n--- a/torchgeo/trainers/segmentation.py\n+++ b/torchgeo/trainers/segmentation.py\n@@ -248,6 +248,25 @@\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n \n+ def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n+ \"\"\"Compute and return the predictions.\n+\n+ By default, this will loop over images in a dataloader and aggregate\n+ predictions into a list. This may not be desirable if you have many images\n+ or large images which could cause out of memory errors. In this case\n+ it's recommended to override this with a custom predict_step.\n+\n+ Args:\n+ batch: the output of your DataLoader\n+\n+ Returns:\n+ predicted softmax probabilities\n+ \"\"\"\n+ batch = args[0]\n+ x = batch[\"image\"]\n+ y_hat: Tensor = self(x).softmax(dim=1)\n+ return y_hat\n+\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n", "issue": "Trainers: predict step\n### Summary\r\n\r\nThis issue is to track progress on adding a `predict` step to all Trainers.\r\n\r\n- [x] BYOLTask (#819)\r\n- [x] ClassificationTask (#790)\r\n- [x] MultiLabelClassificationTask (#792)\r\n- [x] ObjectDetectionTask (#758)\r\n- [x] RegressionTask (#818)\r\n- [x] SemanticSegmentationTask (#939)\r\n\r\n### Rationale\r\n\r\nThe default `predict` step does not know how to handle our batch dicts.\r\n\r\n### Implementation\r\n\r\nSee implementations that have already been finished.\r\n\r\n### Alternatives\r\n\r\n_No response_\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Segmentation tasks.\"\"\"\n\nimport warnings\nfrom typing import Any, Dict, cast\n\nimport matplotlib.pyplot as plt\nimport pytorch_lightning as pl\nimport segmentation_models_pytorch as smp\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torchmetrics import MetricCollection\nfrom torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex\n\nfrom ..datasets.utils import unbind_samples\nfrom ..models import FCN\n\n# https://github.com/pytorch/pytorch/issues/60979\n# https://github.com/pytorch/pytorch/pull/61045\nDataLoader.__module__ = \"torch.utils.data\"\n\n\nclass SemanticSegmentationTask(pl.LightningModule):\n \"\"\"LightningModule for semantic segmentation of images.\n\n Supports `Segmentation Models Pytorch\n <https://github.com/qubvel/segmentation_models.pytorch>`_\n as an architecture choice in combination with any of these\n `TIMM encoders <https://smp.readthedocs.io/en/latest/encoders_timm.html>`_.\n \"\"\"\n\n def config_task(self) -> None:\n \"\"\"Configures the task based on kwargs parameters passed to the constructor.\"\"\"\n if self.hyperparams[\"segmentation_model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"encoder_name\"],\n encoder_weights=self.hyperparams[\"encoder_weights\"],\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"segmentation_model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['segmentation_model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n self.loss = nn.CrossEntropyLoss(ignore_index=ignore_value)\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n def __init__(self, **kwargs: Any) -> None:\n \"\"\"Initialize the LightningModule with a model and loss function.\n\n Keyword Args:\n segmentation_model: Name of the segmentation model type to use\n encoder_name: Name of the encoder model backbone to use\n encoder_weights: None or \"imagenet\" to use imagenet pretrained weights in\n the encoder model\n in_channels: Number of channels in input image\n num_classes: Number of semantic classes to predict\n loss: Name of the loss function\n ignore_index: Optional integer class index to ignore in the loss and metrics\n learning_rate: Learning rate for optimizer\n learning_rate_schedule_patience: Patience for learning rate scheduler\n\n Raises:\n ValueError: if kwargs arguments are invalid\n\n .. versionchanged:: 0.3\n The *ignore_zeros* parameter was renamed to *ignore_index*.\n \"\"\"\n super().__init__()\n\n # Creates `self.hparams` from kwargs\n self.save_hyperparameters() # type: ignore[operator]\n self.hyperparams = cast(Dict[str, Any], self.hparams)\n\n if not isinstance(kwargs[\"ignore_index\"], (int, type(None))):\n raise ValueError(\"ignore_index must be an int or None\")\n if (kwargs[\"ignore_index\"] is not None) and (kwargs[\"loss\"] == \"jaccard\"):\n warnings.warn(\n \"ignore_index has no effect on training when loss='jaccard'\",\n UserWarning,\n )\n self.ignore_index = kwargs[\"ignore_index\"]\n self.config_task()\n\n self.train_metrics = MetricCollection(\n [\n MulticlassAccuracy(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n mdmc_average=\"global\",\n ),\n MulticlassJaccardIndex(\n num_classes=self.hyperparams[\"num_classes\"],\n ignore_index=self.ignore_index,\n ),\n ],\n prefix=\"train_\",\n )\n self.val_metrics = self.train_metrics.clone(prefix=\"val_\")\n self.test_metrics = self.train_metrics.clone(prefix=\"test_\")\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward pass of the model.\n\n Args:\n x: tensor of data to run through the model\n\n Returns:\n output from the model\n \"\"\"\n return self.model(*args, **kwargs)\n\n def training_step(self, *args: Any, **kwargs: Any) -> Tensor:\n \"\"\"Compute and return the training loss.\n\n Args:\n batch: the output of your DataLoader\n\n Returns:\n training loss\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the train step logs every `log_every_n_steps` steps where\n # `log_every_n_steps` is a parameter to the `Trainer` object\n self.log(\"train_loss\", loss, on_step=True, on_epoch=False)\n self.train_metrics(y_hat_hard, y)\n\n return cast(Tensor, loss)\n\n def training_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level training metrics.\n\n Args:\n outputs: list of items returned by training_step\n \"\"\"\n self.log_dict(self.train_metrics.compute())\n self.train_metrics.reset()\n\n def validation_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute validation loss and log example predictions.\n\n Args:\n batch: the output of your DataLoader\n batch_idx: the index of this batch\n \"\"\"\n batch = args[0]\n batch_idx = args[1]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n self.log(\"val_loss\", loss, on_step=False, on_epoch=True)\n self.val_metrics(y_hat_hard, y)\n\n if batch_idx < 10:\n try:\n datamodule = self.trainer.datamodule # type: ignore[attr-defined]\n batch[\"prediction\"] = y_hat_hard\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n fig = datamodule.plot(sample)\n summary_writer = self.logger.experiment # type: ignore[union-attr]\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n except AttributeError:\n pass\n\n def validation_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level validation metrics.\n\n Args:\n outputs: list of items returned by validation_step\n \"\"\"\n self.log_dict(self.val_metrics.compute())\n self.val_metrics.reset()\n\n def test_step(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Compute test loss.\n\n Args:\n batch: the output of your DataLoader\n \"\"\"\n batch = args[0]\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n y_hat_hard = y_hat.argmax(dim=1)\n\n loss = self.loss(y_hat, y)\n\n # by default, the test and validation steps only log per *epoch*\n self.log(\"test_loss\", loss, on_step=False, on_epoch=True)\n self.test_metrics(y_hat_hard, y)\n\n def test_epoch_end(self, outputs: Any) -> None:\n \"\"\"Logs epoch level test metrics.\n\n Args:\n outputs: list of items returned by test_step\n \"\"\"\n self.log_dict(self.test_metrics.compute())\n self.test_metrics.reset()\n\n def configure_optimizers(self) -> Dict[str, Any]:\n \"\"\"Initialize the optimizer and learning rate scheduler.\n\n Returns:\n a \"lr dict\" according to the pytorch lightning documentation --\n https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers\n \"\"\"\n optimizer = torch.optim.Adam(\n self.model.parameters(), lr=self.hyperparams[\"learning_rate\"]\n )\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": {\n \"scheduler\": ReduceLROnPlateau(\n optimizer,\n patience=self.hyperparams[\"learning_rate_schedule_patience\"],\n ),\n \"monitor\": \"val_loss\",\n },\n }\n", "path": "torchgeo/trainers/segmentation.py"}]}
| 3,542 | 267 |
gh_patches_debug_4330
|
rasdani/github-patches
|
git_diff
|
LMFDB__lmfdb-5664
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dirichlet character browse page thinks all characters are primitive
On a Dir Char browse page, such as
https://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20
all characters are shown by default, and shown as "primitive".
On loading, only the primitive characters should be shown.
The underlying problem is that the `td` holding the character
index has `class="primitive"` in all cases. (So, the Imprimitive/Primitive
toggles in the upper right are working correctly, it's just that the
page is being generated with incorrect markup).
Dirichlet character browse page thinks all characters are primitive
On a Dir Char browse page, such as
https://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20
all characters are shown by default, and shown as "primitive".
On loading, only the primitive characters should be shown.
The underlying problem is that the `td` holding the character
index has `class="primitive"` in all cases. (So, the Imprimitive/Primitive
toggles in the upper right are working correctly, it's just that the
page is being generated with incorrect markup).
</issue>
<code>
[start of lmfdb/characters/ListCharacters.py]
1 # -*- coding: utf-8 -*-
2 # ListCharacters.py
3 import re
4 from sage.all import lcm, factor, Integers
5 from sage.databases.cremona import cremona_letter_code
6 from lmfdb.characters.web_character import WebDirichlet, parity_string
7 from lmfdb.characters.TinyConrey import ConreyCharacter
8 from lmfdb.utils import flash_error, integer_divisors
9
10 # utility functions #
11
12
13 def modn_exponent(n):
14 """ given a nonzero integer n, returns the group exponent of (Z/nZ)* """
15 return lcm( [ (p-1)*p**(e-1) for (p,e) in factor(n) ] ) // (1 if n%8 else 2)
16
17 def divisors_in_interval(n, a, b):
18 """ given a nonzero integer n and an interval [a,b] returns a list of the divisors of n in [a,b] """
19 return [d for d in integer_divisors(n) if a <= d and d <= b]
20
21 def parse_interval(arg, name):
22 """ parses a user specified interval of positive integers (or a single integer), flashes errors and raises exceptions """
23 a,b = 0,0
24 arg = arg.replace (' ','')
25 if re.match('^[0-9]+$', arg):
26 a,b = (int(arg), int(arg))
27 elif re.match('^[0-9]+-[0-9]+$', arg):
28 s = arg.split('-')
29 a,b = (int(s[0]), int(s[1]))
30 elif re.match('^[0-9]+..[0-9]+$', arg):
31 s = arg.split('..')
32 a,b = (int(s[0]), int(s[1]))
33 elif re.match(r'^\[[0-9]+..[0-9]+\]$', arg):
34 s = arg[1:-1].split('..')
35 a,b = (int(s[0]), int(s[1]))
36 if a <= 0 or b < a:
37 flash_error("%s is not a valid value for %s. It should be a positive integer (e.g. 7) or a nonempty range of positive integers (e.g. 1-10 or 1..10)", arg, name)
38 raise ValueError("invalid " + name)
39 return a,b
40
41 def parse_limit(arg):
42 if not arg:
43 return 50
44 limit = -1
45 arg = arg.replace (' ','')
46 if re.match('^[0-9]+$', arg):
47 limit = int(arg)
48 if limit > 100:
49 flash_error("%s is not a valid limit on the number of results to display. It should be a positive integer no greater than 100.", arg)
50 raise ValueError("limit")
51 return limit
52
53 def get_character_modulus(a, b, limit=7):
54 """ this function is also used by lfunctions/LfunctionPlot.py """
55 headers = list(range(1, limit))
56 headers.append("more")
57 entries = {}
58 rows = list(range(a, b + 1))
59 for row in rows:
60 if row != 1:
61 G = Integers(row).list_of_elements_of_multiplicative_group()
62 else:
63 G = [1]
64 for chi_n in G:
65 chi = ConreyCharacter(row, chi_n)
66 multorder = chi.order
67 if multorder <= limit:
68 el = chi
69 col = multorder
70 entry = entries.get((row, col), [])
71 entry.append(el)
72 entries[(row, col)] = entry
73 entries2 = {}
74 def out(chi): return (chi.number, chi.is_primitive,
75 chi.order, chi.is_even())
76 for k, v in entries.items():
77 l = []
78 v = sorted(v, key=lambda x: x.number)
79 while v:
80 e1 = v.pop(0)
81 e1_num = e1.number
82 inv_num = 1 if e1_num == 1 else e1_num.inverse_mod(e1.modulus)
83
84 inv = ConreyCharacter(e1.modulus, inv_num)
85
86 if e1_num == inv_num:
87 l.append((out(e1),))
88 else:
89 l.append((out(e1), out(inv)))
90 v = [x for x in v if (x.modulus, x.number) != (inv.modulus, inv.number)]
91 if k[1] == "more":
92 l = sorted(l, key=lambda e: e[0][2])
93 entries2[k] = l
94 cols = headers
95 return headers, entries2, rows, cols
96
97
98 def info_from_db_orbit(orbit):
99 mod = orbit['modulus']
100 conductor = orbit['conductor']
101 orbit_index = orbit['orbit_index']
102 orbit_letter = cremona_letter_code(orbit_index - 1)
103 orbit_label = "{}.{}".format(mod, orbit_letter)
104 order = orbit['order']
105 is_odd = parity_string(orbit['parity'])
106 is_prim = _is_primitive(orbit['is_primitive'])
107 results = []
108 for num in orbit['galois_orbit']:
109 results.append((
110 mod,
111 num,
112 conductor,
113 orbit_label,
114 order,
115 is_odd,
116 is_prim,
117 WebDirichlet.char2tex(mod, num)
118 ))
119 return results
120
121
122 def _is_primitive(db_primitive):
123 """
124 Translate db's primitive entry to boolean.
125 """
126 if str(db_primitive) == "True":
127 return True
128 return False
129
130
131 def _is_odd(db_parity):
132 """
133 Translate db's parity entry to boolean.
134 """
135 _parity = int(db_parity)
136 if _parity == -1:
137 return True
138 return False
139
[end of lmfdb/characters/ListCharacters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lmfdb/characters/ListCharacters.py b/lmfdb/characters/ListCharacters.py
--- a/lmfdb/characters/ListCharacters.py
+++ b/lmfdb/characters/ListCharacters.py
@@ -71,7 +71,7 @@
entry.append(el)
entries[(row, col)] = entry
entries2 = {}
- def out(chi): return (chi.number, chi.is_primitive,
+ def out(chi): return (chi.number, chi.is_primitive(),
chi.order, chi.is_even())
for k, v in entries.items():
l = []
|
{"golden_diff": "diff --git a/lmfdb/characters/ListCharacters.py b/lmfdb/characters/ListCharacters.py\n--- a/lmfdb/characters/ListCharacters.py\n+++ b/lmfdb/characters/ListCharacters.py\n@@ -71,7 +71,7 @@\n entry.append(el)\n entries[(row, col)] = entry\n entries2 = {}\n- def out(chi): return (chi.number, chi.is_primitive,\n+ def out(chi): return (chi.number, chi.is_primitive(),\n chi.order, chi.is_even())\n for k, v in entries.items():\n l = []\n", "issue": "Dirichlet character browse page thinks all characters are primitive \nOn a Dir Char browse page, such as\r\n\r\nhttps://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20\r\n\r\nall characters are shown by default, and shown as \"primitive\".\r\nOn loading, only the primitive characters should be shown.\r\n\r\nThe underlying problem is that the `td` holding the character\r\nindex has `class=\"primitive\"` in all cases. (So, the Imprimitive/Primitive\r\ntoggles in the upper right are working correctly, it's just that the\r\npage is being generated with incorrect markup).\r\n\r\n\r\n\r\n\nDirichlet character browse page thinks all characters are primitive \nOn a Dir Char browse page, such as\r\n\r\nhttps://beta.lmfdb.org/Character/Dirichlet/?modbrowse=1-20\r\n\r\nall characters are shown by default, and shown as \"primitive\".\r\nOn loading, only the primitive characters should be shown.\r\n\r\nThe underlying problem is that the `td` holding the character\r\nindex has `class=\"primitive\"` in all cases. (So, the Imprimitive/Primitive\r\ntoggles in the upper right are working correctly, it's just that the\r\npage is being generated with incorrect markup).\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# ListCharacters.py\nimport re\nfrom sage.all import lcm, factor, Integers\nfrom sage.databases.cremona import cremona_letter_code\nfrom lmfdb.characters.web_character import WebDirichlet, parity_string\nfrom lmfdb.characters.TinyConrey import ConreyCharacter\nfrom lmfdb.utils import flash_error, integer_divisors\n\n# utility functions #\n\n\ndef modn_exponent(n):\n \"\"\" given a nonzero integer n, returns the group exponent of (Z/nZ)* \"\"\"\n return lcm( [ (p-1)*p**(e-1) for (p,e) in factor(n) ] ) // (1 if n%8 else 2)\n\ndef divisors_in_interval(n, a, b):\n \"\"\" given a nonzero integer n and an interval [a,b] returns a list of the divisors of n in [a,b] \"\"\"\n return [d for d in integer_divisors(n) if a <= d and d <= b]\n\ndef parse_interval(arg, name):\n \"\"\" parses a user specified interval of positive integers (or a single integer), flashes errors and raises exceptions \"\"\"\n a,b = 0,0\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n a,b = (int(arg), int(arg))\n elif re.match('^[0-9]+-[0-9]+$', arg):\n s = arg.split('-')\n a,b = (int(s[0]), int(s[1]))\n elif re.match('^[0-9]+..[0-9]+$', arg):\n s = arg.split('..')\n a,b = (int(s[0]), int(s[1]))\n elif re.match(r'^\\[[0-9]+..[0-9]+\\]$', arg):\n s = arg[1:-1].split('..')\n a,b = (int(s[0]), int(s[1]))\n if a <= 0 or b < a:\n flash_error(\"%s is not a valid value for %s. It should be a positive integer (e.g. 7) or a nonempty range of positive integers (e.g. 1-10 or 1..10)\", arg, name)\n raise ValueError(\"invalid \" + name)\n return a,b\n\ndef parse_limit(arg):\n if not arg:\n return 50\n limit = -1\n arg = arg.replace (' ','')\n if re.match('^[0-9]+$', arg):\n limit = int(arg)\n if limit > 100:\n flash_error(\"%s is not a valid limit on the number of results to display. It should be a positive integer no greater than 100.\", arg)\n raise ValueError(\"limit\")\n return limit\n\ndef get_character_modulus(a, b, limit=7):\n \"\"\" this function is also used by lfunctions/LfunctionPlot.py \"\"\"\n headers = list(range(1, limit))\n headers.append(\"more\")\n entries = {}\n rows = list(range(a, b + 1))\n for row in rows:\n if row != 1:\n G = Integers(row).list_of_elements_of_multiplicative_group()\n else:\n G = [1]\n for chi_n in G:\n chi = ConreyCharacter(row, chi_n)\n multorder = chi.order\n if multorder <= limit:\n el = chi\n col = multorder\n entry = entries.get((row, col), [])\n entry.append(el)\n entries[(row, col)] = entry\n entries2 = {}\n def out(chi): return (chi.number, chi.is_primitive,\n chi.order, chi.is_even())\n for k, v in entries.items():\n l = []\n v = sorted(v, key=lambda x: x.number)\n while v:\n e1 = v.pop(0)\n e1_num = e1.number\n inv_num = 1 if e1_num == 1 else e1_num.inverse_mod(e1.modulus)\n\n inv = ConreyCharacter(e1.modulus, inv_num)\n\n if e1_num == inv_num:\n l.append((out(e1),))\n else:\n l.append((out(e1), out(inv)))\n v = [x for x in v if (x.modulus, x.number) != (inv.modulus, inv.number)]\n if k[1] == \"more\":\n l = sorted(l, key=lambda e: e[0][2])\n entries2[k] = l\n cols = headers\n return headers, entries2, rows, cols\n\n\ndef info_from_db_orbit(orbit):\n mod = orbit['modulus']\n conductor = orbit['conductor']\n orbit_index = orbit['orbit_index']\n orbit_letter = cremona_letter_code(orbit_index - 1)\n orbit_label = \"{}.{}\".format(mod, orbit_letter)\n order = orbit['order']\n is_odd = parity_string(orbit['parity'])\n is_prim = _is_primitive(orbit['is_primitive'])\n results = []\n for num in orbit['galois_orbit']:\n results.append((\n mod,\n num,\n conductor,\n orbit_label,\n order,\n is_odd,\n is_prim,\n WebDirichlet.char2tex(mod, num)\n ))\n return results\n\n\ndef _is_primitive(db_primitive):\n \"\"\"\n Translate db's primitive entry to boolean.\n \"\"\"\n if str(db_primitive) == \"True\":\n return True\n return False\n\n\ndef _is_odd(db_parity):\n \"\"\"\n Translate db's parity entry to boolean.\n \"\"\"\n _parity = int(db_parity)\n if _parity == -1:\n return True\n return False\n", "path": "lmfdb/characters/ListCharacters.py"}]}
| 2,355 | 129 |
gh_patches_debug_54784
|
rasdani/github-patches
|
git_diff
|
encode__httpx-589
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module 'rfc3986.api' has no attribute 'iri_reference'
Just did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title.
(python37) $ pip install httpx
Collecting httpx
Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl
Requirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16)
Requirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0)
Requirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4)
Requirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1)
Requirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0)
Requirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8)
Collecting hstspreload>=2019.8.27 (from httpx)
Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB)
|████████████████████████████████| 706kB 740kB/s
Requirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0)
Requirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0)
Building wheels for collected packages: hstspreload
Building wheel for hstspreload (setup.py) ... done
Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b
Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033
Successfully built hstspreload
Installing collected packages: hstspreload, httpx
Successfully installed hstspreload-2019.11.4 httpx-0.7.6
Manually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import re
5 from pathlib import Path
6
7 from setuptools import setup
8
9
10 def get_version(package):
11 """
12 Return package version as listed in `__version__` in `init.py`.
13 """
14 version = Path(package, "__version__.py").read_text()
15 return re.search("__version__ = ['\"]([^'\"]+)['\"]", version).group(1)
16
17
18 def get_long_description():
19 """
20 Return the README.
21 """
22 long_description = ""
23 with open("README.md", encoding="utf8") as f:
24 long_description += f.read()
25 long_description += "\n\n"
26 with open("CHANGELOG.md", encoding="utf8") as f:
27 long_description += f.read()
28 return long_description
29
30
31 def get_packages(package):
32 """
33 Return root package and all sub-packages.
34 """
35 return [str(path.parent) for path in Path(package).glob("**/__init__.py")]
36
37
38 setup(
39 name="httpx",
40 python_requires=">=3.6",
41 version=get_version("httpx"),
42 url="https://github.com/encode/httpx",
43 license="BSD",
44 description="The next generation HTTP client.",
45 long_description=get_long_description(),
46 long_description_content_type="text/markdown",
47 author="Tom Christie",
48 author_email="[email protected]",
49 package_data={"httpx": ["py.typed"]},
50 packages=get_packages("httpx"),
51 include_package_data=True,
52 zip_safe=False,
53 install_requires=[
54 "certifi",
55 "hstspreload",
56 "chardet==3.*",
57 "h11==0.8.*",
58 "h2==3.*",
59 "idna==2.*",
60 "rfc3986==1.*",
61 "sniffio==1.*",
62 ],
63 classifiers=[
64 "Development Status :: 3 - Alpha",
65 "Environment :: Web Environment",
66 "Intended Audience :: Developers",
67 "License :: OSI Approved :: BSD License",
68 "Operating System :: OS Independent",
69 "Topic :: Internet :: WWW/HTTP",
70 "Framework :: AsyncIO",
71 "Framework :: Trio",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.6",
74 "Programming Language :: Python :: 3.7",
75 "Programming Language :: Python :: 3.8",
76 ],
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -57,7 +57,7 @@
"h11==0.8.*",
"h2==3.*",
"idna==2.*",
- "rfc3986==1.*",
+ "rfc3986>=1.3,<2",
"sniffio==1.*",
],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,7 +57,7 @@\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n- \"rfc3986==1.*\",\n+ \"rfc3986>=1.3,<2\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n", "issue": "AttributeError: module 'rfc3986.api' has no attribute 'iri_reference'\nJust did a pip install of httpx, and it appeared to meet all the pre-reqs. However, I'd encounter the error in the title.\r\n\r\n\r\n(python37) $ pip install httpx\r\nCollecting httpx\r\n Using cached https://files.pythonhosted.org/packages/15/fb/81861a0dbe87a0de57491b1b587419845f9a99e5b3d48e56cd440b1fcb68/httpx-0.7.6-py2.py3-none-any.whl\r\nRequirement already satisfied: certifi in .../envs/python37/lib/python3.7/site-packages (from httpx) (2019.6.16)\r\nRequirement already satisfied: h2==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.1.0)\r\nRequirement already satisfied: chardet==3.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (3.0.4)\r\nRequirement already satisfied: h11==0.8.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (0.8.1)\r\nRequirement already satisfied: rfc3986==1.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (1.2.0)\r\nRequirement already satisfied: idna==2.* in .../envs/python37/lib/python3.7/site-packages (from httpx) (2.8)\r\nCollecting hstspreload>=2019.8.27 (from httpx)\r\n Downloading https://files.pythonhosted.org/packages/55/77/e0031fbb2930891db135af5a53afdf5567ca6f1b1ff55bf231ed5330f66d/hstspreload-2019.11.4.tar.gz (701kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 706kB 740kB/s \r\nRequirement already satisfied: hpack<4,>=2.3 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (3.0.0)\r\nRequirement already satisfied: hyperframe<6,>=5.2.0 in .../envs/python37/lib/python3.7/site-packages (from h2==3.*->httpx) (5.2.0)\r\nBuilding wheels for collected packages: hstspreload\r\n Building wheel for hstspreload (setup.py) ... done\r\n Created wheel for hstspreload: filename=hstspreload-2019.11.4-cp37-none-any.whl size=704190 sha256=ac82e6b8abe795fa1a179318d64d73660fa3024715b04b7db0746971c3e5a87b\r\n Stored in directory: .../Library/Caches/pip/wheels/49/f1/9d/dc01d2c108593d8d2c32d00cdd9daf595e6ad4a727d57b7033\r\nSuccessfully built hstspreload\r\nInstalling collected packages: hstspreload, httpx\r\nSuccessfully installed hstspreload-2019.11.4 httpx-0.7.6\r\n\r\nManually upgrading rfc3986 to 1.3.2 resolved the issue. Seems the installer should be checking for rfc3986==1.3.2 or greater? \n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n version = Path(package, \"__version__.py\").read_text()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", version).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n long_description = \"\"\n with open(\"README.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n long_description += \"\\n\\n\"\n with open(\"CHANGELOG.md\", encoding=\"utf8\") as f:\n long_description += f.read()\n return long_description\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]\n\n\nsetup(\n name=\"httpx\",\n python_requires=\">=3.6\",\n version=get_version(\"httpx\"),\n url=\"https://github.com/encode/httpx\",\n license=\"BSD\",\n description=\"The next generation HTTP client.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n package_data={\"httpx\": [\"py.typed\"]},\n packages=get_packages(\"httpx\"),\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n \"certifi\",\n \"hstspreload\",\n \"chardet==3.*\",\n \"h11==0.8.*\",\n \"h2==3.*\",\n \"idna==2.*\",\n \"rfc3986==1.*\",\n \"sniffio==1.*\",\n ],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n \"Framework :: Trio\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n)\n", "path": "setup.py"}]}
| 2,087 | 102 |
gh_patches_debug_11881
|
rasdani/github-patches
|
git_diff
|
spack__spack-5816
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
libxml2 fails with Intel 16.0.2
I am now trying to build `amrex` on another machine. I setup spack for intel compilers (to load the proper module, etc.). Lots of packages build just fine (including, e.g. `m4` which failed in #5705 and we had to fix it in spack). But the `libxml2` packages fails with:
```
$ spack install --restage amrex%[email protected]
...
==> Executing phase: 'autoreconf'
==> Executing phase: 'configure'
==> Executing phase: 'build'
==> Error: ProcessError: Command exited with status 2:
'make' '-j32'
4 errors found in build log:
[ ... ]
328 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'
329 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'
330 HTMLparser.c(5409): warning #2338: this switch statement does not have a default clause
331 switch (ctxt->instate) {
332 ^
333
>> 334 xmlIO.c(1450): error: identifier "LZMA_OK" is undefined
335 ret = (__libxml2_xzclose((xzFile) context) == LZMA_OK ) ? 0 : -1;
336 ^
337
338 xmlIO.c(2044): warning #3179: deprecated conversion of string literal to char* (should be const char*)
339 char * content_type = (char *) "text/xml";
340 ^
[ ... ]
342 xmlIO.c(2056): warning #3179: deprecated conversion of string literal to char* (should be const char*)
343 content_encoding = (char *) "Content-Encoding: gzip";
344 ^
345
346 compilation aborted for xmlIO.c (code 2)
347 Makefile:1248: recipe for target 'xmlIO.lo' failed
>> 348 make[2]: *** [xmlIO.lo] Error 1
349 make[2]: *** Waiting for unfinished jobs....
350 icc: command line warning #10006: ignoring unknown option '-Wno-format-extra-args'
351 icc: command line warning #10006: ignoring unknown option '-Wcast-align'
352 icc: command line warning #10006: ignoring unknown option '-Waggregate-return'
353 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'
354 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'
[ ... ]
981 trionan.c(218): warning #177: function "trio_is_negative" was declared but never referenced
982 trio_is_negative
983 ^
984
985 make[2]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'
986 Makefile:1440: recipe for target 'all-recursive' failed
>> 987 make[1]: *** [all-recursive] Error 1
988 make[1]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'
989 Makefile:859: recipe for target 'all' failed
>> 990 make: *** [all] Error 2
See build log for details:
/home/certik/repos/spack/var/spack/stage/libxml2-2.9.4-j26m2sopxpelbb2vhdtbfdb7gdhczbl2/libxml2-2.9.4/spack-build.out
```
The missing `LZMA_OK` symbol I think comes from the `xz` package, that was installed successfully by spack already. Here is the spec:
```
$ spack spec amrex%[email protected]
Input spec
--------------------------------
amrex%[email protected]
Normalized
--------------------------------
amrex%[email protected]
^cmake
Concretized
--------------------------------
[email protected]%[email protected] build_type=RelWithDebInfo ~debug dims=3 +fortran+mpi~openmp~particles prec=DOUBLE arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]~doc+ncurses+openssl+ownlibs~qt arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] patches=f84b2708a42777aadcc7f502a261afe10ca5646a51c1ef8b5e60d2070d926b57 ~symlinks arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]+internal_glib arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]+pic+shared arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]~cuda fabrics= ~java schedulers= ~sqlite3~thread_multiple+vt arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]~cuda+libxml2+pci arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00 +sigsegv arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected]~python arch=linux-ubuntu16.04-x86_64
^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64
```
Note that `libxml2` builds just fine with Intel 17.0.1 on another machine.
</issue>
<code>
[start of var/spack/repos/builtin/packages/libxml2/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 from spack import *
26
27
28 class Libxml2(AutotoolsPackage):
29 """Libxml2 is the XML C parser and toolkit developed for the Gnome
30 project (but usable outside of the Gnome platform), it is free
31 software available under the MIT License."""
32 homepage = "http://xmlsoft.org"
33 url = "http://xmlsoft.org/sources/libxml2-2.9.2.tar.gz"
34
35 version('2.9.4', 'ae249165c173b1ff386ee8ad676815f5')
36 version('2.9.2', '9e6a9aca9d155737868b3dc5fd82f788')
37 version('2.7.8', '8127a65e8c3b08856093099b52599c86')
38
39 variant('python', default=False, description='Enable Python support')
40
41 extends('python', when='+python',
42 ignore=r'(bin.*$)|(include.*$)|(share.*$)|(lib/libxml2.*$)|'
43 '(lib/xml2.*$)|(lib/cmake.*$)')
44 depends_on('zlib')
45 depends_on('xz')
46
47 depends_on('[email protected]:', type='build')
48
49 def configure_args(self):
50 spec = self.spec
51 if '+python' in spec:
52 python_args = [
53 '--with-python={0}'.format(spec['python'].home),
54 '--with-python-install-dir={0}'.format(site_packages_dir)
55 ]
56 else:
57 python_args = ['--without-python']
58
59 return python_args
60
[end of var/spack/repos/builtin/packages/libxml2/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/libxml2/package.py b/var/spack/repos/builtin/packages/libxml2/package.py
--- a/var/spack/repos/builtin/packages/libxml2/package.py
+++ b/var/spack/repos/builtin/packages/libxml2/package.py
@@ -48,12 +48,15 @@
def configure_args(self):
spec = self.spec
+
+ args = ["--with-lzma=%s" % spec['xz'].prefix]
+
if '+python' in spec:
- python_args = [
+ args.extend([
'--with-python={0}'.format(spec['python'].home),
'--with-python-install-dir={0}'.format(site_packages_dir)
- ]
+ ])
else:
- python_args = ['--without-python']
+ args.append('--without-python')
- return python_args
+ return args
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/libxml2/package.py b/var/spack/repos/builtin/packages/libxml2/package.py\n--- a/var/spack/repos/builtin/packages/libxml2/package.py\n+++ b/var/spack/repos/builtin/packages/libxml2/package.py\n@@ -48,12 +48,15 @@\n \n def configure_args(self):\n spec = self.spec\n+\n+ args = [\"--with-lzma=%s\" % spec['xz'].prefix]\n+\n if '+python' in spec:\n- python_args = [\n+ args.extend([\n '--with-python={0}'.format(spec['python'].home),\n '--with-python-install-dir={0}'.format(site_packages_dir)\n- ]\n+ ])\n else:\n- python_args = ['--without-python']\n+ args.append('--without-python')\n \n- return python_args\n+ return args\n", "issue": "libxml2 fails with Intel 16.0.2\nI am now trying to build `amrex` on another machine. I setup spack for intel compilers (to load the proper module, etc.). Lots of packages build just fine (including, e.g. `m4` which failed in #5705 and we had to fix it in spack). But the `libxml2` packages fails with:\r\n```\r\n$ spack install --restage amrex%[email protected]\r\n...\r\n==> Executing phase: 'autoreconf'\r\n==> Executing phase: 'configure'\r\n==> Executing phase: 'build'\r\n==> Error: ProcessError: Command exited with status 2:\r\n 'make' '-j32'\r\n\r\n4 errors found in build log:\r\n [ ... ]\r\n 328 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'\r\n 329 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'\r\n 330 HTMLparser.c(5409): warning #2338: this switch statement does not have a default clause\r\n 331 switch (ctxt->instate) {\r\n 332 ^\r\n 333 \r\n >> 334 xmlIO.c(1450): error: identifier \"LZMA_OK\" is undefined\r\n 335 ret = (__libxml2_xzclose((xzFile) context) == LZMA_OK ) ? 0 : -1;\r\n 336 ^\r\n 337 \r\n 338 xmlIO.c(2044): warning #3179: deprecated conversion of string literal to char* (should be const char*)\r\n 339 char * content_type = (char *) \"text/xml\";\r\n 340 ^\r\n [ ... ]\r\n 342 xmlIO.c(2056): warning #3179: deprecated conversion of string literal to char* (should be const char*)\r\n 343 content_encoding = (char *) \"Content-Encoding: gzip\";\r\n 344 ^\r\n 345 \r\n 346 compilation aborted for xmlIO.c (code 2)\r\n 347 Makefile:1248: recipe for target 'xmlIO.lo' failed\r\n >> 348 make[2]: *** [xmlIO.lo] Error 1\r\n 349 make[2]: *** Waiting for unfinished jobs....\r\n 350 icc: command line warning #10006: ignoring unknown option '-Wno-format-extra-args'\r\n 351 icc: command line warning #10006: ignoring unknown option '-Wcast-align'\r\n 352 icc: command line warning #10006: ignoring unknown option '-Waggregate-return'\r\n 353 icc: command line warning #10006: ignoring unknown option '-Wnested-externs'\r\n 354 icc: command line warning #10006: ignoring unknown option '-Wredundant-decls'\r\n [ ... ]\r\n 981 trionan.c(218): warning #177: function \"trio_is_negative\" was declared but never referenced\r\n 982 trio_is_negative\r\n 983 ^\r\n 984 \r\n 985 make[2]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'\r\n 986 Makefile:1440: recipe for target 'all-recursive' failed\r\n >> 987 make[1]: *** [all-recursive] Error 1\r\n 988 make[1]: Leaving directory '/home/certik/tmp/spack-stage/spack-stage-dmGFaF/libxml2-2.9.4'\r\n 989 Makefile:859: recipe for target 'all' failed\r\n >> 990 make: *** [all] Error 2\r\n\r\nSee build log for details:\r\n /home/certik/repos/spack/var/spack/stage/libxml2-2.9.4-j26m2sopxpelbb2vhdtbfdb7gdhczbl2/libxml2-2.9.4/spack-build.out\r\n```\r\nThe missing `LZMA_OK` symbol I think comes from the `xz` package, that was installed successfully by spack already. Here is the spec:\r\n```\r\n$ spack spec amrex%[email protected]\r\nInput spec\r\n--------------------------------\r\namrex%[email protected]\r\n\r\nNormalized\r\n--------------------------------\r\namrex%[email protected]\r\n ^cmake\r\n\r\nConcretized\r\n--------------------------------\r\[email protected]%[email protected] build_type=RelWithDebInfo ~debug dims=3 +fortran+mpi~openmp~particles prec=DOUBLE arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~doc+ncurses+openssl+ownlibs~qt arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] patches=f84b2708a42777aadcc7f502a261afe10ca5646a51c1ef8b5e60d2070d926b57 ~symlinks arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]+internal_glib arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]+pic+shared arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~cuda fabrics= ~java schedulers= ~sqlite3~thread_multiple+vt arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~cuda+libxml2+pci arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] patches=3877ab548f88597ab2327a2230ee048d2d07ace1062efe81fc92e91b7f39cd00 +sigsegv arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected]~python arch=linux-ubuntu16.04-x86_64 \r\n ^[email protected]%[email protected] arch=linux-ubuntu16.04-x86_64 \r\n```\r\nNote that `libxml2` builds just fine with Intel 17.0.1 on another machine.\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\n\n\nclass Libxml2(AutotoolsPackage):\n \"\"\"Libxml2 is the XML C parser and toolkit developed for the Gnome\n project (but usable outside of the Gnome platform), it is free\n software available under the MIT License.\"\"\"\n homepage = \"http://xmlsoft.org\"\n url = \"http://xmlsoft.org/sources/libxml2-2.9.2.tar.gz\"\n\n version('2.9.4', 'ae249165c173b1ff386ee8ad676815f5')\n version('2.9.2', '9e6a9aca9d155737868b3dc5fd82f788')\n version('2.7.8', '8127a65e8c3b08856093099b52599c86')\n\n variant('python', default=False, description='Enable Python support')\n\n extends('python', when='+python',\n ignore=r'(bin.*$)|(include.*$)|(share.*$)|(lib/libxml2.*$)|'\n '(lib/xml2.*$)|(lib/cmake.*$)')\n depends_on('zlib')\n depends_on('xz')\n\n depends_on('[email protected]:', type='build')\n\n def configure_args(self):\n spec = self.spec\n if '+python' in spec:\n python_args = [\n '--with-python={0}'.format(spec['python'].home),\n '--with-python-install-dir={0}'.format(site_packages_dir)\n ]\n else:\n python_args = ['--without-python']\n\n return python_args\n", "path": "var/spack/repos/builtin/packages/libxml2/package.py"}]}
| 3,205 | 195 |
gh_patches_debug_6596
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3023
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove Sphinx as a dependency
As mentioned in https://github.com/DataBiosphere/toil/pull/3011#issuecomment-601349848 we depend on Sphinx, but we only use it for generating documentation. We never use it at runtime.
We should remove the Sphinx dependency, and just make sure it is installed by the `Makefile` when running the `docs` target. Maybe it could be one of the things installed by `make prepare`.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-528)
┆Issue Number: TOIL-528
</issue>
<code>
[start of setup.py]
1 # Copyright (C) 2015-2016 Regents of the University of California
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from setuptools import find_packages, setup
15 import os
16
17 # setting the 'CPPFLAGS' flag specifies the necessary cython dependency for "http-parser", for more info:
18 # toil issue: https://github.com/DataBiosphere/toil/issues/2924
19 # very similar to this issue: https://github.com/mcfletch/pyopengl/issues/11
20 # the "right way" is waiting for a fix from "http-parser", but this fixes things in the meantime since that might take a while
21 cppflags = os.environ.get('CPPFLAGS')
22 if cppflags:
23 # note, duplicate options don't affect things here so we don't check - Mark D
24 os.environ['CPPFLAGS'] = ' '.join([cppflags, '-DPYPY_VERSION'])
25 else:
26 os.environ['CPPFLAGS'] = '-DPYPY_VERSION'
27
28
29 def runSetup():
30 """
31 Calls setup(). This function exists so the setup() invocation preceded more internal
32 functionality. The `version` module is imported dynamically by importVersion() below.
33 """
34 boto = 'boto==2.48.0'
35 boto3 = 'boto3>=1.7.50, <2.0'
36 futures = 'futures==3.1.1'
37 pycryptodome = 'pycryptodome==3.5.1'
38 pymesos = 'pymesos==0.3.7'
39 psutil = 'psutil >= 3.0.1, <6'
40 pynacl = 'pynacl==1.1.2'
41 gcs = 'google-cloud-storage==1.6.0'
42 gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'
43 apacheLibcloud = 'apache-libcloud==2.2.1'
44 cwltool = 'cwltool<=2.0.20200126090152'
45 galaxyToolUtil = 'galaxy-tool-util'
46 htcondor = 'htcondor>=8.6.0'
47 kubernetes = 'kubernetes>=10, <11'
48 pytz = 'pytz>=2012'
49 dill = 'dill==0.2.7.1'
50 six = 'six>=1.10.0'
51 future = 'future'
52 requests = 'requests>=2, <3'
53 docker = 'docker==2.5.1'
54 dateutil = 'python-dateutil'
55 addict = 'addict<=2.2.0'
56 sphinx = 'sphinx>=2.4.4, <3'
57 pathlib2 = 'pathlib2==2.3.2'
58
59 core_reqs = [
60 dill,
61 six,
62 future,
63 requests,
64 docker,
65 dateutil,
66 psutil,
67 addict,
68 sphinx,
69 pathlib2,
70 pytz]
71
72 aws_reqs = [
73 boto,
74 boto3,
75 futures,
76 pycryptodome]
77 cwl_reqs = [
78 cwltool,
79 galaxyToolUtil]
80 encryption_reqs = [
81 pynacl]
82 google_reqs = [
83 gcs_oauth2_boto_plugin, # is this being used??
84 apacheLibcloud,
85 gcs]
86 htcondor_reqs = [
87 htcondor]
88 kubernetes_reqs = [
89 kubernetes]
90 mesos_reqs = [
91 pymesos,
92 psutil]
93 wdl_reqs = []
94
95
96 # htcondor is not supported by apple
97 # this is tricky to conditionally support in 'all' due
98 # to how wheels work, so it is not included in all and
99 # must be explicitly installed as an extra
100 all_reqs = \
101 aws_reqs + \
102 cwl_reqs + \
103 encryption_reqs + \
104 google_reqs + \
105 kubernetes_reqs + \
106 mesos_reqs
107
108
109 setup(
110 name='toil',
111 version=version.distVersion,
112 description='Pipeline management software for clusters.',
113 author='Benedict Paten',
114 author_email='[email protected]',
115 url="https://github.com/DataBiosphere/toil",
116 classifiers=[
117 'Development Status :: 5 - Production/Stable',
118 'Environment :: Console',
119 'Intended Audience :: Developers',
120 'Intended Audience :: Science/Research',
121 'Intended Audience :: Healthcare Industry',
122 'License :: OSI Approved :: Apache Software License',
123 'Natural Language :: English',
124 'Operating System :: MacOS :: MacOS X',
125 'Operating System :: POSIX',
126 'Operating System :: POSIX :: Linux',
127 'Programming Language :: Python :: 3.6',
128 'Topic :: Scientific/Engineering',
129 'Topic :: Scientific/Engineering :: Bio-Informatics',
130 'Topic :: Scientific/Engineering :: Astronomy',
131 'Topic :: Scientific/Engineering :: Atmospheric Science',
132 'Topic :: Scientific/Engineering :: Information Analysis',
133 'Topic :: Scientific/Engineering :: Medical Science Apps.',
134 'Topic :: System :: Distributed Computing',
135 'Topic :: Utilities'],
136 license="Apache License v2.0",
137 install_requires=core_reqs,
138 extras_require={
139 'aws': aws_reqs,
140 'cwl': cwl_reqs,
141 'encryption': encryption_reqs,
142 'google': google_reqs,
143 'htcondor:sys_platform!="darwin"': htcondor_reqs,
144 'kubernetes': kubernetes_reqs,
145 'mesos': mesos_reqs,
146 'wdl': wdl_reqs,
147 'all': all_reqs},
148 package_dir={'': 'src'},
149 packages=find_packages(where='src',
150 # Note that we intentionally include the top-level `test` package for
151 # functionality like the @experimental and @integrative decoratorss:
152 exclude=['*.test.*']),
153 package_data = {
154 '': ['*.yml', 'cloud-config'],
155 },
156 # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
157 # you can't just change them here. Luckily, most of them are pretty unique strings, and thus
158 # easy to search for.
159 entry_points={
160 'console_scripts': [
161 'toil = toil.utils.toilMain:main',
162 '_toil_worker = toil.worker:main',
163 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',
164 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
165 'toil-wdl-runner = toil.wdl.toilwdl:main',
166 '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',
167 '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})
168
169
170 def importVersion():
171 """
172 Load and return the module object for src/toil/version.py, generating it from the template if
173 required.
174 """
175 import imp
176 try:
177 # Attempt to load the template first. It only exists in a working copy cloned via git.
178 import version_template
179 except ImportError:
180 # If loading the template fails we must be in a unpacked source distribution and
181 # src/toil/version.py will already exist.
182 pass
183 else:
184 # Use the template to generate src/toil/version.py
185 import os
186 import errno
187 from tempfile import NamedTemporaryFile
188
189 new = version_template.expand_()
190 try:
191 with open('src/toil/version.py') as f:
192 old = f.read()
193 except IOError as e:
194 if e.errno == errno.ENOENT:
195 old = None
196 else:
197 raise
198
199 if old != new:
200 with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:
201 f.write(new)
202 os.rename(f.name, 'src/toil/version.py')
203 # Unfortunately, we can't use a straight import here because that would also load the stuff
204 # defined in src/toil/__init__.py which imports modules from external dependencies that may
205 # yet to be installed when setup.py is invoked.
206 return imp.load_source('toil.version', 'src/toil/version.py')
207
208
209 version = importVersion()
210 runSetup()
211
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,6 @@
docker = 'docker==2.5.1'
dateutil = 'python-dateutil'
addict = 'addict<=2.2.0'
- sphinx = 'sphinx>=2.4.4, <3'
pathlib2 = 'pathlib2==2.3.2'
core_reqs = [
@@ -65,7 +64,6 @@
dateutil,
psutil,
addict,
- sphinx,
pathlib2,
pytz]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,6 @@\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n- sphinx = 'sphinx>=2.4.4, <3'\n pathlib2 = 'pathlib2==2.3.2'\n \n core_reqs = [\n@@ -65,7 +64,6 @@\n dateutil,\n psutil,\n addict,\n- sphinx,\n pathlib2,\n pytz]\n", "issue": "Remove Sphinx as a dependency\nAs mentioned in https://github.com/DataBiosphere/toil/pull/3011#issuecomment-601349848 we depend on Sphinx, but we only use it for generating documentation. We never use it at runtime.\n\nWe should remove the Sphinx dependency, and just make sure it is installed by the `Makefile` when running the `docs` target. Maybe it could be one of the things installed by `make prepare`.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-528)\n\u2506Issue Number: TOIL-528\n\n", "before_files": [{"content": "# Copyright (C) 2015-2016 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom setuptools import find_packages, setup\nimport os\n\n# setting the 'CPPFLAGS' flag specifies the necessary cython dependency for \"http-parser\", for more info:\n# toil issue: https://github.com/DataBiosphere/toil/issues/2924\n# very similar to this issue: https://github.com/mcfletch/pyopengl/issues/11\n# the \"right way\" is waiting for a fix from \"http-parser\", but this fixes things in the meantime since that might take a while\ncppflags = os.environ.get('CPPFLAGS')\nif cppflags:\n # note, duplicate options don't affect things here so we don't check - Mark D\n os.environ['CPPFLAGS'] = ' '.join([cppflags, '-DPYPY_VERSION'])\nelse:\n os.environ['CPPFLAGS'] = '-DPYPY_VERSION'\n\n\ndef runSetup():\n \"\"\"\n Calls setup(). This function exists so the setup() invocation preceded more internal\n functionality. The `version` module is imported dynamically by importVersion() below.\n \"\"\"\n boto = 'boto==2.48.0'\n boto3 = 'boto3>=1.7.50, <2.0'\n futures = 'futures==3.1.1'\n pycryptodome = 'pycryptodome==3.5.1'\n pymesos = 'pymesos==0.3.7'\n psutil = 'psutil >= 3.0.1, <6'\n pynacl = 'pynacl==1.1.2'\n gcs = 'google-cloud-storage==1.6.0'\n gcs_oauth2_boto_plugin = 'gcs_oauth2_boto_plugin==1.14'\n apacheLibcloud = 'apache-libcloud==2.2.1'\n cwltool = 'cwltool<=2.0.20200126090152'\n galaxyToolUtil = 'galaxy-tool-util'\n htcondor = 'htcondor>=8.6.0'\n kubernetes = 'kubernetes>=10, <11'\n pytz = 'pytz>=2012'\n dill = 'dill==0.2.7.1'\n six = 'six>=1.10.0'\n future = 'future'\n requests = 'requests>=2, <3'\n docker = 'docker==2.5.1'\n dateutil = 'python-dateutil'\n addict = 'addict<=2.2.0'\n sphinx = 'sphinx>=2.4.4, <3'\n pathlib2 = 'pathlib2==2.3.2'\n\n core_reqs = [\n dill,\n six,\n future,\n requests,\n docker,\n dateutil,\n psutil,\n addict,\n sphinx,\n pathlib2,\n pytz]\n\n aws_reqs = [\n boto,\n boto3,\n futures,\n pycryptodome]\n cwl_reqs = [\n cwltool,\n galaxyToolUtil]\n encryption_reqs = [\n pynacl]\n google_reqs = [\n gcs_oauth2_boto_plugin, # is this being used??\n apacheLibcloud,\n gcs]\n htcondor_reqs = [\n htcondor]\n kubernetes_reqs = [\n kubernetes]\n mesos_reqs = [\n pymesos,\n psutil]\n wdl_reqs = []\n \n\n # htcondor is not supported by apple\n # this is tricky to conditionally support in 'all' due\n # to how wheels work, so it is not included in all and\n # must be explicitly installed as an extra\n all_reqs = \\\n aws_reqs + \\\n cwl_reqs + \\\n encryption_reqs + \\\n google_reqs + \\\n kubernetes_reqs + \\\n mesos_reqs\n\n\n setup(\n name='toil',\n version=version.distVersion,\n description='Pipeline management software for clusters.',\n author='Benedict Paten',\n author_email='[email protected]',\n url=\"https://github.com/DataBiosphere/toil\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: POSIX',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Bio-Informatics',\n 'Topic :: Scientific/Engineering :: Astronomy',\n 'Topic :: Scientific/Engineering :: Atmospheric Science',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Scientific/Engineering :: Medical Science Apps.',\n 'Topic :: System :: Distributed Computing',\n 'Topic :: Utilities'],\n license=\"Apache License v2.0\",\n install_requires=core_reqs,\n extras_require={\n 'aws': aws_reqs,\n 'cwl': cwl_reqs,\n 'encryption': encryption_reqs,\n 'google': google_reqs,\n 'htcondor:sys_platform!=\"darwin\"': htcondor_reqs,\n 'kubernetes': kubernetes_reqs,\n 'mesos': mesos_reqs,\n 'wdl': wdl_reqs,\n 'all': all_reqs},\n package_dir={'': 'src'},\n packages=find_packages(where='src',\n # Note that we intentionally include the top-level `test` package for\n # functionality like the @experimental and @integrative decoratorss:\n exclude=['*.test.*']),\n package_data = {\n '': ['*.yml', 'cloud-config'],\n },\n # Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so\n # you can't just change them here. Luckily, most of them are pretty unique strings, and thus\n # easy to search for.\n entry_points={\n 'console_scripts': [\n 'toil = toil.utils.toilMain:main',\n '_toil_worker = toil.worker:main',\n 'cwltoil = toil.cwl.cwltoil:cwltoil_was_removed [cwl]',\n 'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',\n 'toil-wdl-runner = toil.wdl.toilwdl:main',\n '_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]',\n '_toil_kubernetes_executor = toil.batchSystems.kubernetes:executor [kubernetes]']})\n\n\ndef importVersion():\n \"\"\"\n Load and return the module object for src/toil/version.py, generating it from the template if\n required.\n \"\"\"\n import imp\n try:\n # Attempt to load the template first. It only exists in a working copy cloned via git.\n import version_template\n except ImportError:\n # If loading the template fails we must be in a unpacked source distribution and\n # src/toil/version.py will already exist.\n pass\n else:\n # Use the template to generate src/toil/version.py\n import os\n import errno\n from tempfile import NamedTemporaryFile\n\n new = version_template.expand_()\n try:\n with open('src/toil/version.py') as f:\n old = f.read()\n except IOError as e:\n if e.errno == errno.ENOENT:\n old = None\n else:\n raise\n\n if old != new:\n with NamedTemporaryFile(mode='w', dir='src/toil', prefix='version.py.', delete=False) as f:\n f.write(new)\n os.rename(f.name, 'src/toil/version.py')\n # Unfortunately, we can't use a straight import here because that would also load the stuff\n # defined in src/toil/__init__.py which imports modules from external dependencies that may\n # yet to be installed when setup.py is invoked.\n return imp.load_source('toil.version', 'src/toil/version.py')\n\n\nversion = importVersion()\nrunSetup()\n", "path": "setup.py"}]}
| 3,156 | 142 |
gh_patches_debug_36379
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1175
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Datetime timezone stuff gives runtime warnings in server log
/home/vagrant/.virtualenvs/onlineweb/local/lib/python2.7/site-packages/django/db/models/fields/**init**.py:827: RuntimeWarning: DateTimeField received a naive datetime (2013-09-05 00:00:00) while time zone support is active.
RuntimeWarning)
humdum.
</issue>
<code>
[start of apps/events/utils.py]
1 #-*- coding: utf-8 -*-
2 from datetime import timedelta
3
4 from django.conf import settings
5 from django.core.mail import send_mail
6 from django.utils import timezone
7
8 from django.core.signing import Signer, BadSignature
9 from django.http import HttpResponse
10 from django.utils import timezone
11 from filebrowser.base import FileObject
12 from filebrowser.settings import VERSIONS
13
14 from apps.authentication.models import OnlineUser as User
15 from apps.events.models import Event
16 from apps.splash.models import SplashYear
17
18 import icalendar
19
20
21 def get_group_restricted_events(user):
22 """ Returns a queryset of events with attendance_event that a user has access to """
23 types_allowed = []
24
25 groups = user.groups.all()
26
27 if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):
28 return Event.objects.filter(attendance_event__isnull=False)
29
30 for group in groups:
31 if group.name == 'arrKom':
32 types_allowed.append(1) # sosialt
33 types_allowed.append(4) # utflukt
34
35 if group.name == 'bedKom':
36 types_allowed.append(2) # bedriftspresentasjon
37
38 if group.name == 'fagKom':
39 types_allowed.append(3) # kurs
40
41 return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)
42
43
44 def handle_waitlist_bump(event, host, attendees, payment=None):
45
46 title = u'Du har fått plass på %s' % (event.title)
47
48 extended_deadline = timezone.now() + timedelta(days=2)
49 message = u'Du har stått på venteliste for arrangementet "%s" og har nå fått plass.\n' % (unicode(event.title))
50
51 if payment:
52 if payment.payment_type == 1: #Instant
53 for attendee in attendees:
54 payment.create_payment_delay(attendee.user, extended_deadline)
55 message += u"Dette arrangementet krever betaling og du må betale innen 48 timer."
56
57 elif payment.payment_type == 2: #Deadline
58 if payment.deadline > extended_deadline: #More than 2 days left of payment deadline
59 message += u"Dette arrangementet krever betaling og fristen for og betale er %s" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))
60 else: #The deadline is in less than 2 days
61 for attendee in attendees:
62 payment.create_payment_delay(attendee.user, extended_deadline)
63 message += u"Dette arrangementet krever betaling og du har 48 timer på å betale"
64
65 elif payment.payment_type == 3: #Delay
66 deadline = timezone.now() + timedelta(days=payment.delay)
67 for attendee in attendees:
68 payment.create_payment_delay(attendee.user, deadline)
69 message += u"Dette arrangementet krever betaling og du må betale innen %d dager." % (payment.delay)
70 if len(payment.prices()) == 1:
71 message += u"\nPrisen for dette arrangementet er %skr." % (payment.prices()[0].price)
72 # elif len(payment.prices()) >= 2:
73 # message += u"\nDette arrangementet har flere prisklasser:"
74 # for payment_price in payment.prices():
75 # message += "\n%s: %skr" % (payment_price.description, payment_price.price)
76 else:
77 message += u"Det kreves ingen ekstra handling fra deg med mindre du vil melde deg av."
78
79 message += u"\n\nFor mer info:"
80 message += u"\nhttp://%s%s" % (host, event.get_absolute_url())
81
82 for attendee in attendees:
83 send_mail(title, message, settings.DEFAULT_FROM_EMAIL, [attendee.user.email])
84
85
86 class Calendar(object):
87 def __init__(self):
88 self.cal = icalendar.Calendar()
89 # Filename served by webserver
90 self.filename = 'online'
91 # Required ical info
92 self.cal.add('prodid', '-//Online//Onlineweb//EN')
93 self.cal.add('version', '2.0')
94
95 def add_event(self, event):
96 raise NotImplementedError
97
98 def add_events(self, events):
99 for event in events:
100 self.add_event(event)
101
102 def output(self):
103 """Return icalendar as text"""
104 return self.cal.to_ical()
105
106 def response(self):
107 """Returns a response object"""
108 response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')
109 response['Content-Type'] = 'text/calendar; charset=utf-8'
110 response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'
111 return response
112
113
114 class EventCalendar(Calendar):
115 def user(self, user):
116 """
117 Personalized calendar
118 This calendar is publicly available, but the url is not guessable so data should not be leaked to everyone
119 """
120 signer = Signer()
121 try:
122 username = signer.unsign(user)
123 user = User.objects.get(username=username)
124 except (BadSignature, User.DoesNotExist):
125 user = None
126 if user:
127 # Getting all events that the user has/is participating in
128 self.add_events(Event.objects.filter(
129 attendance_event__attendees__user=user
130 ).order_by('event_start').prefetch_related(
131 'attendance_event', 'attendance_event__attendees'
132 ))
133 self.filename = username
134
135 def event(self, event_id):
136 """Single event"""
137 try:
138 self.add_event(Event.objects.get(id=event_id))
139 except Event.DoesNotExist:
140 pass
141 self.filename = str(event_id)
142
143 def events(self):
144 """All events that haven't ended yet"""
145 self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))
146 self.filename = 'events'
147
148 def add_event(self, event):
149 cal_event = icalendar.Event()
150
151 cal_event.add('dtstart', event.event_start)
152 cal_event.add('dtend', event.event_end)
153 cal_event.add('location', event.location)
154 cal_event.add('summary', event.title)
155 cal_event.add('description', event.ingress_short)
156 cal_event.add('uid', 'event-' + str(event.id) + '@online.ntnu.no')
157
158 self.cal.add_component(cal_event)
159
160
161 class SplashCalendar(Calendar):
162 def add_event(self, event):
163 cal_event = icalendar.Event()
164 cal_event.add('dtstart', event.start_time)
165 cal_event.add('dtend', event.end_time)
166 cal_event.add('summary', event.title)
167 cal_event.add('description', event.content)
168 cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')
169
170 self.cal.add_component(cal_event)
171
172 def events(self):
173 self.add_events(SplashYear.objects.current().splash_events.all())
174 self.filename = 'events'
175
176
177 def find_image_versions(event):
178 img = event.image
179 img_strings = []
180 print("gnna find sm imgs")
181
182 for ver in VERSIONS.keys():
183 if ver.startswith('events_'):
184 print(ver)
185 img_strings.append(img.version_generate(ver).url)
186
187 return img_strings
188
[end of apps/events/utils.py]
[start of apps/companyprofile/utils.py]
1 from filebrowser.settings import VERSIONS
2
3
4 def find_image_versions(company):
5 img = company.old_image
6 img_strings = []
7 print("gnna find sm imgs")
8
9 for ver in VERSIONS.keys():
10 if ver.startswith('companies_'):
11 print(ver)
12 img_strings.append(img.version_generate(ver).url)
13
14 return img_strings
15
[end of apps/companyprofile/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/companyprofile/utils.py b/apps/companyprofile/utils.py
--- a/apps/companyprofile/utils.py
+++ b/apps/companyprofile/utils.py
@@ -4,11 +4,9 @@
def find_image_versions(company):
img = company.old_image
img_strings = []
- print("gnna find sm imgs")
for ver in VERSIONS.keys():
if ver.startswith('companies_'):
- print(ver)
img_strings.append(img.version_generate(ver).url)
return img_strings
diff --git a/apps/events/utils.py b/apps/events/utils.py
--- a/apps/events/utils.py
+++ b/apps/events/utils.py
@@ -53,7 +53,7 @@
for attendee in attendees:
payment.create_payment_delay(attendee.user, extended_deadline)
message += u"Dette arrangementet krever betaling og du må betale innen 48 timer."
-
+
elif payment.payment_type == 2: #Deadline
if payment.deadline > extended_deadline: #More than 2 days left of payment deadline
message += u"Dette arrangementet krever betaling og fristen for og betale er %s" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))
@@ -61,7 +61,7 @@
for attendee in attendees:
payment.create_payment_delay(attendee.user, extended_deadline)
message += u"Dette arrangementet krever betaling og du har 48 timer på å betale"
-
+
elif payment.payment_type == 3: #Delay
deadline = timezone.now() + timedelta(days=payment.delay)
for attendee in attendees:
@@ -177,11 +177,9 @@
def find_image_versions(event):
img = event.image
img_strings = []
- print("gnna find sm imgs")
for ver in VERSIONS.keys():
if ver.startswith('events_'):
- print(ver)
img_strings.append(img.version_generate(ver).url)
return img_strings
|
{"golden_diff": "diff --git a/apps/companyprofile/utils.py b/apps/companyprofile/utils.py\n--- a/apps/companyprofile/utils.py\n+++ b/apps/companyprofile/utils.py\n@@ -4,11 +4,9 @@\n def find_image_versions(company):\n img = company.old_image\n img_strings = []\n- print(\"gnna find sm imgs\")\n \n for ver in VERSIONS.keys():\n if ver.startswith('companies_'):\n- print(ver)\n img_strings.append(img.version_generate(ver).url)\n \n return img_strings\ndiff --git a/apps/events/utils.py b/apps/events/utils.py\n--- a/apps/events/utils.py\n+++ b/apps/events/utils.py\n@@ -53,7 +53,7 @@\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen 48 timer.\"\n- \n+\n elif payment.payment_type == 2: #Deadline\n if payment.deadline > extended_deadline: #More than 2 days left of payment deadline\n message += u\"Dette arrangementet krever betaling og fristen for og betale er %s\" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))\n@@ -61,7 +61,7 @@\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du har 48 timer p\u00e5 \u00e5 betale\"\n- \n+\n elif payment.payment_type == 3: #Delay\n deadline = timezone.now() + timedelta(days=payment.delay)\n for attendee in attendees:\n@@ -177,11 +177,9 @@\n def find_image_versions(event):\n img = event.image\n img_strings = []\n- print(\"gnna find sm imgs\")\n \n for ver in VERSIONS.keys():\n if ver.startswith('events_'):\n- print(ver)\n img_strings.append(img.version_generate(ver).url)\n \n return img_strings\n", "issue": "Datetime timezone stuff gives runtime warnings in server log\n/home/vagrant/.virtualenvs/onlineweb/local/lib/python2.7/site-packages/django/db/models/fields/**init**.py:827: RuntimeWarning: DateTimeField received a naive datetime (2013-09-05 00:00:00) while time zone support is active.\n RuntimeWarning)\n\nhumdum.\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.mail import send_mail\nfrom django.utils import timezone\n\nfrom django.core.signing import Signer, BadSignature\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\n\nfrom apps.authentication.models import OnlineUser as User\nfrom apps.events.models import Event\nfrom apps.splash.models import SplashYear\n\nimport icalendar\n\n\ndef get_group_restricted_events(user):\n \"\"\" Returns a queryset of events with attendance_event that a user has access to \"\"\"\n types_allowed = []\n\n groups = user.groups.all()\n\n if reduce(lambda r, g: g.name in ['Hovedstyret', 'dotKom'] or r, groups, False):\n return Event.objects.filter(attendance_event__isnull=False)\n\n for group in groups:\n if group.name == 'arrKom':\n types_allowed.append(1) # sosialt\n types_allowed.append(4) # utflukt\n\n if group.name == 'bedKom':\n types_allowed.append(2) # bedriftspresentasjon\n\n if group.name == 'fagKom':\n types_allowed.append(3) # kurs\n\n return Event.objects.filter(attendance_event__isnull=False, event_type__in=types_allowed)\n\n\ndef handle_waitlist_bump(event, host, attendees, payment=None):\n\n title = u'Du har f\u00e5tt plass p\u00e5 %s' % (event.title)\n\n extended_deadline = timezone.now() + timedelta(days=2)\n message = u'Du har st\u00e5tt p\u00e5 venteliste for arrangementet \"%s\" og har n\u00e5 f\u00e5tt plass.\\n' % (unicode(event.title))\n\n if payment:\n if payment.payment_type == 1: #Instant\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen 48 timer.\"\n \n elif payment.payment_type == 2: #Deadline\n if payment.deadline > extended_deadline: #More than 2 days left of payment deadline\n message += u\"Dette arrangementet krever betaling og fristen for og betale er %s\" % (payment.deadline.strftime('%-d %B %Y kl: %H:%M'))\n else: #The deadline is in less than 2 days\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, extended_deadline)\n message += u\"Dette arrangementet krever betaling og du har 48 timer p\u00e5 \u00e5 betale\"\n \n elif payment.payment_type == 3: #Delay\n deadline = timezone.now() + timedelta(days=payment.delay)\n for attendee in attendees:\n payment.create_payment_delay(attendee.user, deadline)\n message += u\"Dette arrangementet krever betaling og du m\u00e5 betale innen %d dager.\" % (payment.delay)\n if len(payment.prices()) == 1:\n message += u\"\\nPrisen for dette arrangementet er %skr.\" % (payment.prices()[0].price)\n # elif len(payment.prices()) >= 2:\n # message += u\"\\nDette arrangementet har flere prisklasser:\"\n # for payment_price in payment.prices():\n # message += \"\\n%s: %skr\" % (payment_price.description, payment_price.price)\n else:\n message += u\"Det kreves ingen ekstra handling fra deg med mindre du vil melde deg av.\"\n\n message += u\"\\n\\nFor mer info:\"\n message += u\"\\nhttp://%s%s\" % (host, event.get_absolute_url())\n\n for attendee in attendees:\n send_mail(title, message, settings.DEFAULT_FROM_EMAIL, [attendee.user.email])\n\n\nclass Calendar(object):\n def __init__(self):\n self.cal = icalendar.Calendar()\n # Filename served by webserver\n self.filename = 'online'\n # Required ical info\n self.cal.add('prodid', '-//Online//Onlineweb//EN')\n self.cal.add('version', '2.0')\n\n def add_event(self, event):\n raise NotImplementedError\n\n def add_events(self, events):\n for event in events:\n self.add_event(event)\n\n def output(self):\n \"\"\"Return icalendar as text\"\"\"\n return self.cal.to_ical()\n\n def response(self):\n \"\"\"Returns a response object\"\"\"\n response = HttpResponse(self.cal.to_ical(), content_type='text/calendar')\n response['Content-Type'] = 'text/calendar; charset=utf-8'\n response['Content-Disposition'] = 'attachment; filename=' + self.filename + '.ics'\n return response\n\n\nclass EventCalendar(Calendar):\n def user(self, user):\n \"\"\"\n Personalized calendar\n This calendar is publicly available, but the url is not guessable so data should not be leaked to everyone\n \"\"\"\n signer = Signer()\n try:\n username = signer.unsign(user)\n user = User.objects.get(username=username)\n except (BadSignature, User.DoesNotExist):\n user = None\n if user:\n # Getting all events that the user has/is participating in\n self.add_events(Event.objects.filter(\n attendance_event__attendees__user=user\n ).order_by('event_start').prefetch_related(\n 'attendance_event', 'attendance_event__attendees'\n ))\n self.filename = username\n\n def event(self, event_id):\n \"\"\"Single event\"\"\"\n try:\n self.add_event(Event.objects.get(id=event_id))\n except Event.DoesNotExist:\n pass\n self.filename = str(event_id)\n\n def events(self):\n \"\"\"All events that haven't ended yet\"\"\"\n self.add_events(Event.objects.filter(event_end__gt=timezone.now()).order_by('event_start'))\n self.filename = 'events'\n\n def add_event(self, event):\n cal_event = icalendar.Event()\n\n cal_event.add('dtstart', event.event_start)\n cal_event.add('dtend', event.event_end)\n cal_event.add('location', event.location)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.ingress_short)\n cal_event.add('uid', 'event-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n\nclass SplashCalendar(Calendar):\n def add_event(self, event):\n cal_event = icalendar.Event()\n cal_event.add('dtstart', event.start_time)\n cal_event.add('dtend', event.end_time)\n cal_event.add('summary', event.title)\n cal_event.add('description', event.content)\n cal_event.add('uid', 'splash-' + str(event.id) + '@online.ntnu.no')\n\n self.cal.add_component(cal_event)\n\n def events(self):\n self.add_events(SplashYear.objects.current().splash_events.all())\n self.filename = 'events'\n\n\ndef find_image_versions(event):\n img = event.image\n img_strings = []\n print(\"gnna find sm imgs\")\n\n for ver in VERSIONS.keys():\n if ver.startswith('events_'):\n print(ver)\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/events/utils.py"}, {"content": "from filebrowser.settings import VERSIONS\n\n\ndef find_image_versions(company):\n img = company.old_image\n img_strings = []\n print(\"gnna find sm imgs\")\n\n for ver in VERSIONS.keys():\n if ver.startswith('companies_'):\n print(ver)\n img_strings.append(img.version_generate(ver).url)\n\n return img_strings\n", "path": "apps/companyprofile/utils.py"}]}
| 2,774 | 444 |
gh_patches_debug_44285
|
rasdani/github-patches
|
git_diff
|
lisa-lab__pylearn2-579
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update default training algorithm to support multiple monitoring datasets
Just follow SGD or BGD for an example
</issue>
<code>
[start of pylearn2/training_algorithms/default.py]
1 """
2 .. todo::
3
4 WRITEME
5 """
6 from pylearn2.monitor import Monitor
7 from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
8 from pylearn2.utils import safe_zip
9 from pylearn2.utils.data_specs import DataSpecsMapping
10 import theano.tensor as T
11
12
13 class DefaultTrainingAlgorithm(TrainingAlgorithm):
14 """
15 .. todo::
16
17 WRITEME
18 """
19 def __init__(self, batch_size=None, batches_per_iter=1000,
20 monitoring_batches=-1, monitoring_dataset=None,
21 termination_criterion=None):
22 """
23 Parameters
24 ----------
25 batch_size : int
26 If batch_size is None, reverts to the `force_batch_size` field of \
27 the model
28 batches_per_iter : int
29 WRITEME
30 monitoring_batches : int
31 WRITEME
32 monitoring_dataset : WRITEME
33 termination_criterion : WRITEME
34 If specified, can cause the algorithm to terminate before \
35 `model.learn_batch` says to
36 """
37 self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
38 if monitoring_dataset is None:
39 assert monitoring_batches == -1
40 self.monitoring_dataset = monitoring_dataset
41 self.monitoring_batches = monitoring_batches
42 self.bSetup = False
43 self.termination_criterion = termination_criterion
44
45 def setup(self, model, dataset):
46 """
47 Allows the training algorithm to do some preliminary configuration
48 *before* we actually start training the model. The dataset is provided
49 in case other derived training algorithms need to modify model based on
50 the dataset.
51
52 Parameters
53 ----------
54 model : object
55 Python object representing the model to train loosely \
56 implementing the interface of models.model.Model.
57
58 dataset : pylearn2.datasets.dataset.Dataset
59 Dataset object used to draw training data
60 """
61 self.model = model
62
63 self.monitor = Monitor.get_monitor(model)
64
65 if self.monitoring_dataset is not None:
66 # Get the data specifications needed by the model
67 space, source = model.get_monitoring_data_specs()
68
69 # Create Theano variables for each of the individual components
70 # of that data. Usually, it will be X for inputs and Y for targets.
71 # First, we need to find these components, and put them in a tuple
72 mapping = DataSpecsMapping((space, source))
73 space_tuple = mapping.flatten(space, return_tuple=True)
74 source_tuple = mapping.flatten(source, return_tuple=True)
75 # Then, build a flat tuple of these Theano variables
76 ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
77 for (sp, src) in safe_zip(space_tuple, source_tuple))
78 # Finally, organize them back into a structure expected by the
79 # monitoring channels of the model
80 nested_ipt = mapping.nest(ipt)
81
82 self.monitor.add_dataset(dataset=self.monitoring_dataset,
83 mode="sequential",
84 batch_size=self.batch_size,
85 num_batches=self.monitoring_batches)
86
87 channels = model.get_monitoring_channels(nested_ipt)
88 if not isinstance(channels, dict):
89 raise TypeError("model.get_monitoring_channels must return a "
90 "dictionary, but it returned " + str(channels))
91 for name in channels:
92 J = channels[name]
93 if isinstance(J, tuple):
94 assert len(J) == 2
95 J, prereqs = J
96 else:
97 prereqs = None
98
99 self.monitor.add_channel(name=name,
100 ipt=nested_ipt,
101 val=J,
102 prereqs=prereqs,
103 data_specs=(space, source))
104 self.first = True
105 self.bSetup = True
106
107 def train(self, dataset):
108 """
109 .. todo::
110
111 WRITEME
112 """
113 assert self.bSetup
114 model = self.model
115 if self.batch_size is None:
116 batch_size = model.force_batch_size
117 else:
118 batch_size = self.batch_size
119 if hasattr(model, 'force_batch_size'):
120 assert (model.force_batch_size <= 0 or batch_size ==
121 model.force_batch_size)
122
123 for i in xrange(self.batches_per_iter):
124 # model.train_batch and self.train both return False when training
125 # should terminate.
126 learn_more = model.train_batch(dataset, batch_size)
127 model.monitor.report_batch(batch_size)
128 if not learn_more:
129 break
130
131 # Make sure we didn't exit training loop because Model.learn
132 # hasn't been updated to new interface yet.
133 if learn_more not in [True,False]:
134 msg = ('The learn method of model %s did not return a boolean ' +
135 'value. Please update your model accordingly.')
136 raise ValueError(msg % str(model))
137 self.learn_more = learn_more
138
139 def continue_learning(self, model):
140 """
141 .. todo::
142
143 WRITEME
144 """
145 if self.learn_more:
146 if self.termination_criterion is not None:
147 return self.termination_criterion.continue_learning(model)
148 return True
149 return False
150
[end of pylearn2/training_algorithms/default.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py
--- a/pylearn2/training_algorithms/default.py
+++ b/pylearn2/training_algorithms/default.py
@@ -29,7 +29,8 @@
WRITEME
monitoring_batches : int
WRITEME
- monitoring_dataset : WRITEME
+ monitoring_dataset: Dataset or dict
+ A Dataset or a dictionary mapping string dataset names to Datasets
termination_criterion : WRITEME
If specified, can cause the algorithm to terminate before \
`model.learn_batch` says to
@@ -37,7 +38,8 @@
self.batch_size, self.batches_per_iter = batch_size, batches_per_iter
if monitoring_dataset is None:
assert monitoring_batches == -1
- self.monitoring_dataset = monitoring_dataset
+
+ self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batches = monitoring_batches
self.bSetup = False
self.termination_criterion = termination_criterion
@@ -74,33 +76,42 @@
source_tuple = mapping.flatten(source, return_tuple=True)
# Then, build a flat tuple of these Theano variables
ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
- for (sp, src) in safe_zip(space_tuple, source_tuple))
+ for (sp, src) in safe_zip(space_tuple, source_tuple))
# Finally, organize them back into a structure expected by the
# monitoring channels of the model
nested_ipt = mapping.nest(ipt)
- self.monitor.add_dataset(dataset=self.monitoring_dataset,
- mode="sequential",
- batch_size=self.batch_size,
- num_batches=self.monitoring_batches)
-
channels = model.get_monitoring_channels(nested_ipt)
if not isinstance(channels, dict):
raise TypeError("model.get_monitoring_channels must return a "
"dictionary, but it returned " + str(channels))
- for name in channels:
- J = channels[name]
- if isinstance(J, tuple):
- assert len(J) == 2
- J, prereqs = J
+
+ for dataset_name in self.monitoring_dataset:
+ if dataset_name == '':
+ prefix = ''
else:
- prereqs = None
+ prefix = dataset_name + '_'
+ monitoring_dataset = self.monitoring_dataset[dataset_name]
+
+ self.monitor.add_dataset(dataset=monitoring_dataset,
+ mode="sequential",
+ batch_size=self.batch_size)
+
+ for name in channels:
+ J = channels[name]
+ if isinstance(J, tuple):
+ assert len(J) == 2
+ J, prereqs = J
+ else:
+ prereqs = None
+
+ self.monitor.add_channel(name=prefix + name,
+ ipt=nested_ipt,
+ val=J,
+ dataset=monitoring_dataset,
+ prereqs=prereqs,
+ data_specs=(space, source))
- self.monitor.add_channel(name=name,
- ipt=nested_ipt,
- val=J,
- prereqs=prereqs,
- data_specs=(space, source))
self.first = True
self.bSetup = True
@@ -130,7 +141,7 @@
# Make sure we didn't exit training loop because Model.learn
# hasn't been updated to new interface yet.
- if learn_more not in [True,False]:
+ if learn_more not in [True, False]:
msg = ('The learn method of model %s did not return a boolean ' +
'value. Please update your model accordingly.')
raise ValueError(msg % str(model))
|
{"golden_diff": "diff --git a/pylearn2/training_algorithms/default.py b/pylearn2/training_algorithms/default.py\n--- a/pylearn2/training_algorithms/default.py\n+++ b/pylearn2/training_algorithms/default.py\n@@ -29,7 +29,8 @@\n WRITEME\n monitoring_batches : int\n WRITEME\n- monitoring_dataset : WRITEME\n+ monitoring_dataset: Dataset or dict\n+ A Dataset or a dictionary mapping string dataset names to Datasets\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n@@ -37,7 +38,8 @@\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n- self.monitoring_dataset = monitoring_dataset\n+\n+ self._set_monitoring_dataset(monitoring_dataset)\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n@@ -74,33 +76,42 @@\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n- for (sp, src) in safe_zip(space_tuple, source_tuple))\n+ for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n \n- self.monitor.add_dataset(dataset=self.monitoring_dataset,\n- mode=\"sequential\",\n- batch_size=self.batch_size,\n- num_batches=self.monitoring_batches)\n-\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n- for name in channels:\n- J = channels[name]\n- if isinstance(J, tuple):\n- assert len(J) == 2\n- J, prereqs = J\n+\n+ for dataset_name in self.monitoring_dataset:\n+ if dataset_name == '':\n+ prefix = ''\n else:\n- prereqs = None\n+ prefix = dataset_name + '_'\n+ monitoring_dataset = self.monitoring_dataset[dataset_name]\n+\n+ self.monitor.add_dataset(dataset=monitoring_dataset,\n+ mode=\"sequential\",\n+ batch_size=self.batch_size)\n+\n+ for name in channels:\n+ J = channels[name]\n+ if isinstance(J, tuple):\n+ assert len(J) == 2\n+ J, prereqs = J\n+ else:\n+ prereqs = None\n+\n+ self.monitor.add_channel(name=prefix + name,\n+ ipt=nested_ipt,\n+ val=J,\n+ dataset=monitoring_dataset,\n+ prereqs=prereqs,\n+ data_specs=(space, source))\n \n- self.monitor.add_channel(name=name,\n- ipt=nested_ipt,\n- val=J,\n- prereqs=prereqs,\n- data_specs=(space, source))\n self.first = True\n self.bSetup = True\n \n@@ -130,7 +141,7 @@\n \n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n- if learn_more not in [True,False]:\n+ if learn_more not in [True, False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n", "issue": "Update default training algorithm to support multiple monitoring datasets\nJust follow SGD or BGD for an example\n\n", "before_files": [{"content": "\"\"\"\n.. todo::\n\n WRITEME\n\"\"\"\nfrom pylearn2.monitor import Monitor\nfrom pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm\nfrom pylearn2.utils import safe_zip\nfrom pylearn2.utils.data_specs import DataSpecsMapping\nimport theano.tensor as T\n\n\nclass DefaultTrainingAlgorithm(TrainingAlgorithm):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n def __init__(self, batch_size=None, batches_per_iter=1000,\n monitoring_batches=-1, monitoring_dataset=None,\n termination_criterion=None):\n \"\"\"\n Parameters\n ----------\n batch_size : int\n If batch_size is None, reverts to the `force_batch_size` field of \\\n the model\n batches_per_iter : int\n WRITEME\n monitoring_batches : int\n WRITEME\n monitoring_dataset : WRITEME\n termination_criterion : WRITEME\n If specified, can cause the algorithm to terminate before \\\n `model.learn_batch` says to\n \"\"\"\n self.batch_size, self.batches_per_iter = batch_size, batches_per_iter\n if monitoring_dataset is None:\n assert monitoring_batches == -1\n self.monitoring_dataset = monitoring_dataset\n self.monitoring_batches = monitoring_batches\n self.bSetup = False\n self.termination_criterion = termination_criterion\n\n def setup(self, model, dataset):\n \"\"\"\n Allows the training algorithm to do some preliminary configuration\n *before* we actually start training the model. The dataset is provided\n in case other derived training algorithms need to modify model based on\n the dataset.\n\n Parameters\n ----------\n model : object\n Python object representing the model to train loosely \\\n implementing the interface of models.model.Model.\n\n dataset : pylearn2.datasets.dataset.Dataset\n Dataset object used to draw training data\n \"\"\"\n self.model = model\n\n self.monitor = Monitor.get_monitor(model)\n\n if self.monitoring_dataset is not None:\n # Get the data specifications needed by the model\n space, source = model.get_monitoring_data_specs()\n\n # Create Theano variables for each of the individual components\n # of that data. Usually, it will be X for inputs and Y for targets.\n # First, we need to find these components, and put them in a tuple\n mapping = DataSpecsMapping((space, source))\n space_tuple = mapping.flatten(space, return_tuple=True)\n source_tuple = mapping.flatten(source, return_tuple=True)\n # Then, build a flat tuple of these Theano variables\n ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)\n for (sp, src) in safe_zip(space_tuple, source_tuple))\n # Finally, organize them back into a structure expected by the\n # monitoring channels of the model\n nested_ipt = mapping.nest(ipt)\n\n self.monitor.add_dataset(dataset=self.monitoring_dataset,\n mode=\"sequential\",\n batch_size=self.batch_size,\n num_batches=self.monitoring_batches)\n\n channels = model.get_monitoring_channels(nested_ipt)\n if not isinstance(channels, dict):\n raise TypeError(\"model.get_monitoring_channels must return a \"\n \"dictionary, but it returned \" + str(channels))\n for name in channels:\n J = channels[name]\n if isinstance(J, tuple):\n assert len(J) == 2\n J, prereqs = J\n else:\n prereqs = None\n\n self.monitor.add_channel(name=name,\n ipt=nested_ipt,\n val=J,\n prereqs=prereqs,\n data_specs=(space, source))\n self.first = True\n self.bSetup = True\n\n def train(self, dataset):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n assert self.bSetup\n model = self.model\n if self.batch_size is None:\n batch_size = model.force_batch_size\n else:\n batch_size = self.batch_size\n if hasattr(model, 'force_batch_size'):\n assert (model.force_batch_size <= 0 or batch_size ==\n model.force_batch_size)\n\n for i in xrange(self.batches_per_iter):\n # model.train_batch and self.train both return False when training\n # should terminate.\n learn_more = model.train_batch(dataset, batch_size)\n model.monitor.report_batch(batch_size)\n if not learn_more:\n break\n\n # Make sure we didn't exit training loop because Model.learn\n # hasn't been updated to new interface yet.\n if learn_more not in [True,False]:\n msg = ('The learn method of model %s did not return a boolean ' +\n 'value. Please update your model accordingly.')\n raise ValueError(msg % str(model))\n self.learn_more = learn_more\n\n def continue_learning(self, model):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n if self.learn_more:\n if self.termination_criterion is not None:\n return self.termination_criterion.continue_learning(model)\n return True\n return False\n", "path": "pylearn2/training_algorithms/default.py"}]}
| 1,995 | 833 |
gh_patches_debug_34098
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-646
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow passing a Config object to the MMDetection models
## 🚀 Feature
**Is your feature request related to a problem? Please describe.**
In the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path
**Describe the solution you'd like**
I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this:
```
cfg = Config.fromfile(cfg_filepath)
cfg.model.bbox_head.loss_cls.loss_weight = 0.8
cfg.model.bbox_head.loss_bbox.loss_weight = 2
```
**Additional context**
There is a discussion in our Discord forum around this issue:
https://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644
</issue>
<code>
[start of icevision/models/mmdet/common/bbox/two_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
[end of icevision/models/mmdet/common/bbox/two_stage/model.py]
[start of icevision/models/mmdet/common/bbox/single_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.bbox_head.num_classes = num_classes - 1
16 if weights_path is not None:
17 cfg.model.pretrained = None
18
19 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
20
21 if weights_path is not None:
22 load_checkpoint(_model, str(weights_path))
23
24 return _model
25
[end of icevision/models/mmdet/common/bbox/single_stage/model.py]
[start of icevision/models/mmdet/common/mask/two_stage/model.py]
1 __all__ = ["model"]
2
3 from icevision.imports import *
4 from mmcv import Config
5 from mmdet.models import build_detector
6 from mmcv.runner import load_checkpoint
7
8
9 def model(
10 cfg_path: Union[str, Path],
11 num_classes: int,
12 weights_path: Optional[Union[str, Path]] = None,
13 ) -> nn.Module:
14 cfg = Config.fromfile(str(cfg_path))
15 cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
16 cfg.model.roi_head.mask_head.num_classes = num_classes - 1
17 if weights_path is not None:
18 cfg.model.pretrained = None
19
20 _model = build_detector(cfg.model, cfg.get("train_cfg"), cfg.get("test_cfg"))
21
22 if weights_path is not None:
23 load_checkpoint(_model, str(weights_path))
24
25 return _model
26
[end of icevision/models/mmdet/common/mask/two_stage/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py
--- a/icevision/models/mmdet/common/bbox/single_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py
--- a/icevision/models/mmdet/common/bbox/two_stage/model.py
+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
if weights_path is not None:
cfg.model.pretrained = None
diff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py
--- a/icevision/models/mmdet/common/mask/two_stage/model.py
+++ b/icevision/models/mmdet/common/mask/two_stage/model.py
@@ -7,11 +7,16 @@
def model(
- cfg_path: Union[str, Path],
+ cfg: Union[str, Path, Config],
num_classes: int,
weights_path: Optional[Union[str, Path]] = None,
) -> nn.Module:
- cfg = Config.fromfile(str(cfg_path))
+
+ # if `cfg` argument is a path (str, Path) create an Config object from the file
+ # otherwise cfg should be already an Config object
+ if isinstance(cfg, (str, Path)):
+ cfg = Config.fromfile(str(cfg))
+
cfg.model.roi_head.bbox_head.num_classes = num_classes - 1
cfg.model.roi_head.mask_head.num_classes = num_classes - 1
if weights_path is not None:
|
{"golden_diff": "diff --git a/icevision/models/mmdet/common/bbox/single_stage/model.py b/icevision/models/mmdet/common/bbox/single_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/single_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/single_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/bbox/two_stage/model.py b/icevision/models/mmdet/common/bbox/two_stage/model.py\n--- a/icevision/models/mmdet/common/bbox/two_stage/model.py\n+++ b/icevision/models/mmdet/common/bbox/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\ndiff --git a/icevision/models/mmdet/common/mask/two_stage/model.py b/icevision/models/mmdet/common/mask/two_stage/model.py\n--- a/icevision/models/mmdet/common/mask/two_stage/model.py\n+++ b/icevision/models/mmdet/common/mask/two_stage/model.py\n@@ -7,11 +7,16 @@\n \n \n def model(\n- cfg_path: Union[str, Path],\n+ cfg: Union[str, Path, Config],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n ) -> nn.Module:\n- cfg = Config.fromfile(str(cfg_path))\n+\n+ # if `cfg` argument is a path (str, Path) create an Config object from the file\n+ # otherwise cfg should be already an Config object\n+ if isinstance(cfg, (str, Path)):\n+ cfg = Config.fromfile(str(cfg))\n+\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n", "issue": "Allow passing a Config object to the MMDetection models\n## \ud83d\ude80 Feature\r\n**Is your feature request related to a problem? Please describe.**\r\nIn the current version, to update the loss_weight for example, We have to duplicate an existing config .py file and make the changes in the new file, and pass it the model method. The latter expect a config file_path\r\n\r\n\r\n**Describe the solution you'd like**\r\n I think it would be better to pass the MMDetection config object instead to the model method. By doing so, we could let the users populate the config object and update any field they wish to change, without creating and updating external files, like this: \r\n\r\n```\r\ncfg = Config.fromfile(cfg_filepath)\r\ncfg.model.bbox_head.loss_cls.loss_weight = 0.8\r\ncfg.model.bbox_head.loss_bbox.loss_weight = 2\r\n```\r\n\r\n\r\n**Additional context**\r\nThere is a discussion in our Discord forum around this issue:\r\nhttps://discordapp.com/channels/735877944085446747/780951884683935744/811650062706540644\r\n\n", "before_files": [{"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/two_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.bbox_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/bbox/single_stage/model.py"}, {"content": "__all__ = [\"model\"]\n\nfrom icevision.imports import *\nfrom mmcv import Config\nfrom mmdet.models import build_detector\nfrom mmcv.runner import load_checkpoint\n\n\ndef model(\n cfg_path: Union[str, Path],\n num_classes: int,\n weights_path: Optional[Union[str, Path]] = None,\n) -> nn.Module:\n cfg = Config.fromfile(str(cfg_path))\n cfg.model.roi_head.bbox_head.num_classes = num_classes - 1\n cfg.model.roi_head.mask_head.num_classes = num_classes - 1\n if weights_path is not None:\n cfg.model.pretrained = None\n\n _model = build_detector(cfg.model, cfg.get(\"train_cfg\"), cfg.get(\"test_cfg\"))\n\n if weights_path is not None:\n load_checkpoint(_model, str(weights_path))\n\n return _model\n", "path": "icevision/models/mmdet/common/mask/two_stage/model.py"}]}
| 1,526 | 716 |
gh_patches_debug_43066
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1053
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide detailed error info for POST to records API when in violation of a not-null constraint
## Current behavior
1. Set up a table which contains a `NOT NULL` column.
1. POST an empty request (`{}`) to the `records` API (e.g. by clicking the `+` icon to add a new row to the table).
1. Observe the following response:
```json
[
{
"code": 4999,
"message": "(psycopg2.errors.NotNullViolation) null value in column \"common_name\" of relation \"trees\" violates not-null constraint\nDETAIL: Failing row contains (11, null, null, null, null, null, null).\n\n[SQL: INSERT INTO public.trees DEFAULT VALUES RETURNING public.trees.id]\n(Background on this error at: http://sqlalche.me/e/14/gkpj)",
"field": null,
"detail": null
}
]
```
1. Within the response, observe no easy way to identify _which column_ (or columns) was responsible for the error.
## Expected behavior
- For #775, we need enough detail for the front end to be able to display error messages specific to certain cells. For example, if a POST request has two columns with invalid data (perhaps even invalid in _different ways_), we need to be able to match specific error messages to each of those columns.
- @pavish and I are flexible on the exact response schema from the API here, as long as it satisfies the above goal.
- We should also keep #896 in-mind with this change.
## Status
- blocked by #896
</issue>
<code>
[start of mathesar/api/exceptions/database_exceptions/exceptions.py]
1 from rest_framework import status
2
3 from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
4 from mathesar.api.exceptions.error_codes import ErrorCodes
5 from mathesar.api.exceptions.generic_exceptions.base_exceptions import (
6 MathesarAPIException,
7 get_default_exception_detail,
8 )
9
10
11 class UniqueViolationAPIException(MathesarAPIException):
12 error_code = ErrorCodes.UniqueViolation.value
13
14 def __init__(
15 self,
16 exception,
17 message="This column has non-unique values so a unique constraint cannot be set",
18 field=None,
19 details=None,
20 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
21 ):
22 exception_detail = get_default_exception_detail(exception, self.error_code, message, field, details)._asdict()
23 self.detail = [exception_detail]
24 self.status_code = status_code
25
26
27 class DuplicateTableAPIException(ProgrammingAPIException):
28 # Default message is not needed as the exception string provides enough details
29 error_code = ErrorCodes.DuplicateTableError.value
30
31 def __init__(
32 self,
33 exception,
34 message=None,
35 field=None,
36 details=None,
37 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
38 ):
39 super().__init__(exception, self.error_code, message, field, details, status_code)
40
41
42 class DuplicateColumnAPIException(ProgrammingAPIException):
43 # Default message is not needed as the exception string provides enough details
44 error_code = ErrorCodes.DuplicateColumnError.value
45
46 def __init__(
47 self,
48 exception,
49 message=None,
50 field=None,
51 details=None,
52 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
53 ):
54 super().__init__(exception, self.error_code, message, field, details, status_code)
55
56
57 class InvalidDefaultAPIException(MathesarAPIException):
58 # Default message is not needed as the exception string provides enough details
59 error_code = ErrorCodes.InvalidDefault.value
60
61 def __init__(
62 self,
63 exception,
64 message=None,
65 field=None,
66 details=None,
67 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
68 ):
69 super().__init__(exception, self.error_code, message, field, details, status_code)
70
71
72 class InvalidTypeOptionAPIException(MathesarAPIException):
73 # Default message is not needed as the exception string provides enough details
74 error_code = ErrorCodes.InvalidTypeOption.value
75
76 def __init__(
77 self,
78 exception,
79 message=None,
80 field=None,
81 details=None,
82 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
83 ):
84 super().__init__(exception, self.error_code, message, field, details, status_code)
85
86
87 class InvalidTypeCastAPIException(MathesarAPIException):
88 # Default message is not needed as the exception string provides enough details
89 error_code = ErrorCodes.InvalidTypeCast.value
90
91 def __init__(
92 self,
93 exception,
94 message="Invalid type cast requested.",
95 field=None,
96 details=None,
97 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
98 ):
99 super().__init__(exception, self.error_code, message, field, details, status_code)
100
101
102 class DynamicDefaultAPIException(MathesarAPIException):
103 # Default message is not needed as the exception string provides enough details
104 error_code = ErrorCodes.UndefinedFunction.value
105
106 def __init__(
107 self,
108 exception,
109 message=None,
110 field=None,
111 details=None,
112 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
113 ):
114 super().__init__(exception, self.error_code, message, field, details, status_code)
115
116
117 class UnsupportedTypeAPIException(MathesarAPIException):
118 # Default message is not needed as the exception string provides enough details
119 error_code = ErrorCodes.UnsupportedType.value
120
121 def __init__(
122 self,
123 exception,
124 message=None,
125 field=None,
126 details=None,
127 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
128 ):
129 super().__init__(exception, self.error_code, message, field, details, status_code)
130
131
132 class BadFilterAPIException(MathesarAPIException):
133 error_code = ErrorCodes.UnsupportedType.value
134
135 def __init__(
136 self,
137 exception,
138 message="Filter arguments are not correct",
139 field=None,
140 details=None,
141 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
142 ):
143 super().__init__(exception, self.error_code, message, field, details, status_code)
144
145
146 class BadSortAPIException(MathesarAPIException):
147 # Default message is not needed as the exception string provides enough details
148 error_code = ErrorCodes.UnsupportedType.value
149
150 def __init__(
151 self,
152 exception,
153 message=None,
154 field=None,
155 details=None,
156 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
157 ):
158 super().__init__(exception, self.error_code, message, field, details, status_code)
159
160
161 class BadGroupAPIException(MathesarAPIException):
162 # Default message is not needed as the exception string provides enough details
163 error_code = ErrorCodes.UnsupportedType.value
164
165 def __init__(
166 self,
167 exception,
168 message=None,
169 field=None,
170 details=None,
171 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
172 ):
173 super().__init__(exception, self.error_code, message, field, details, status_code)
174
175
176 class RaiseExceptionAPIException(MathesarAPIException):
177 """
178 Exception raised inside a postgres function
179 """
180 error_code = ErrorCodes.RaiseException.value
181
182 def __init__(
183 self,
184 exception,
185 message=None,
186 field=None,
187 details=None,
188 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
189 ):
190 super().__init__(exception, self.error_code, message, field, details, status_code)
191
192
193 class UndefinedFunctionAPIException(MathesarAPIException):
194 # Default message is not needed as the exception string provides enough details
195 error_code = ErrorCodes.UndefinedFunction.value
196
197 def __init__(
198 self,
199 exception,
200 message=None,
201 field=None,
202 details=None,
203 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
204 ):
205 super().__init__(exception, self.error_code, message, field, details, status_code)
206
207
208 class NotNullViolationAPIException(MathesarAPIException):
209 """
210 Exception raised when trying to add not null constraint to column with null value
211 or when trying to add non-null value to a column with not null constraint
212 """
213 error_code = ErrorCodes.NotNullViolation.value
214
215 def __init__(
216 self, exception,
217 message=None,
218 field=None,
219 status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
220 ):
221 message_str, row_detail = exception.orig.args[0].split("DETAIL")
222 message_str = message if message is not None else message_str
223 details = {'row_parameters': exception.params, 'row_detail': row_detail}
224 super().__init__(exception, self.error_code, message_str, field, details, status_code)
225
[end of mathesar/api/exceptions/database_exceptions/exceptions.py]
[start of mathesar/api/db/viewsets/records.py]
1 from psycopg2.errors import NotNullViolation
2
3 from rest_framework import status, viewsets
4 from rest_framework.exceptions import NotFound
5 from rest_framework.response import Response
6 from rest_framework.renderers import BrowsableAPIRenderer
7 from sqlalchemy.exc import IntegrityError
8 from sqlalchemy_filters.exceptions import BadFilterFormat, BadSortFormat, FilterFieldNotFound, SortFieldNotFound
9
10 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
11 from db.records.exceptions import BadGroupFormat, GroupFieldNotFound, InvalidGroupType
12 from mathesar.api.pagination import TableLimitOffsetGroupPagination
13 from mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer
14 from mathesar.api.utils import get_table_or_404
15 from mathesar.models import Table
16 from mathesar.utils.json import MathesarJSONRenderer
17
18
19 class RecordViewSet(viewsets.ViewSet):
20 # There is no 'update' method.
21 # We're not supporting PUT requests because there aren't a lot of use cases
22 # where the entire record needs to be replaced, PATCH suffices for updates.
23 def get_queryset(self):
24 return Table.objects.all().order_by('-created_at')
25
26 renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]
27
28 # For filter parameter formatting, see:
29 # https://github.com/centerofci/sqlalchemy-filters#filters-format
30 # For sorting parameter formatting, see:
31 # https://github.com/centerofci/sqlalchemy-filters#sort-format
32 def list(self, request, table_pk=None):
33 paginator = TableLimitOffsetGroupPagination()
34
35 serializer = RecordListParameterSerializer(data=request.GET)
36 serializer.is_valid(raise_exception=True)
37
38 try:
39 records = paginator.paginate_queryset(
40 self.get_queryset(), request, table_pk,
41 filters=serializer.validated_data['filters'],
42 order_by=serializer.validated_data['order_by'],
43 grouping=serializer.validated_data['grouping'],
44 )
45 except (BadFilterFormat, FilterFieldNotFound) as e:
46 raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)
47 except (BadSortFormat, SortFieldNotFound) as e:
48 raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)
49 except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:
50 raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)
51
52 serializer = RecordSerializer(records, many=True)
53 return paginator.get_paginated_response(serializer.data)
54
55 def retrieve(self, request, pk=None, table_pk=None):
56 table = get_table_or_404(table_pk)
57 record = table.get_record(pk)
58 if not record:
59 raise NotFound
60 serializer = RecordSerializer(record)
61 return Response(serializer.data)
62
63 def create(self, request, table_pk=None):
64 table = get_table_or_404(table_pk)
65 # We only support adding a single record through the API.
66 assert isinstance((request.data), dict)
67 try:
68 record = table.create_record_or_records(request.data)
69 except IntegrityError as e:
70 if e.orig == NotNullViolation:
71 raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
72 else:
73 raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
74 serializer = RecordSerializer(record)
75 return Response(serializer.data, status=status.HTTP_201_CREATED)
76
77 def partial_update(self, request, pk=None, table_pk=None):
78 table = get_table_or_404(table_pk)
79 record = table.update_record(pk, request.data)
80 serializer = RecordSerializer(record)
81 return Response(serializer.data)
82
83 def destroy(self, request, pk=None, table_pk=None):
84 table = get_table_or_404(table_pk)
85 table.delete_record(pk)
86 return Response(status=status.HTTP_204_NO_CONTENT)
87
[end of mathesar/api/db/viewsets/records.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py
--- a/mathesar/api/db/viewsets/records.py
+++ b/mathesar/api/db/viewsets/records.py
@@ -43,11 +43,23 @@
grouping=serializer.validated_data['grouping'],
)
except (BadFilterFormat, FilterFieldNotFound) as e:
- raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)
+ raise database_api_exceptions.BadFilterAPIException(
+ e,
+ field='filters',
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
except (BadSortFormat, SortFieldNotFound) as e:
- raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)
+ raise database_api_exceptions.BadSortAPIException(
+ e,
+ field='order_by',
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:
- raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)
+ raise database_api_exceptions.BadGroupAPIException(
+ e,
+ field='grouping',
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
serializer = RecordSerializer(records, many=True)
return paginator.get_paginated_response(serializer.data)
@@ -67,8 +79,12 @@
try:
record = table.create_record_or_records(request.data)
except IntegrityError as e:
- if e.orig == NotNullViolation:
- raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
+ if type(e.orig) == NotNullViolation:
+ raise database_api_exceptions.NotNullViolationAPIException(
+ e,
+ status_code=status.HTTP_400_BAD_REQUEST,
+ table=table
+ )
else:
raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
serializer = RecordSerializer(record)
diff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py
--- a/mathesar/api/exceptions/database_exceptions/exceptions.py
+++ b/mathesar/api/exceptions/database_exceptions/exceptions.py
@@ -1,11 +1,13 @@
from rest_framework import status
+from db.columns.operations.select import get_columns_attnum_from_names
from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
from mathesar.api.exceptions.error_codes import ErrorCodes
from mathesar.api.exceptions.generic_exceptions.base_exceptions import (
MathesarAPIException,
get_default_exception_detail,
)
+from mathesar.models import Column
class UniqueViolationAPIException(MathesarAPIException):
@@ -216,9 +218,19 @@
self, exception,
message=None,
field=None,
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR
+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
+ table=None
):
- message_str, row_detail = exception.orig.args[0].split("DETAIL")
- message_str = message if message is not None else message_str
- details = {'row_parameters': exception.params, 'row_detail': row_detail}
+ exception_diagnostics = exception.orig.diag
+ message_str = message if message is not None else exception_diagnostics.message_primary
+ column_attnum = get_columns_attnum_from_names(
+ table.oid,
+ [exception.orig.diag.column_name],
+ table.schema._sa_engine
+ )[0][0]
+ column = Column.objects.get(attnum=column_attnum)
+ details = {
+ 'record_detail': exception_diagnostics.message_detail,
+ 'column_id': column.id
+ }
super().__init__(exception, self.error_code, message_str, field, details, status_code)
|
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py\n--- a/mathesar/api/db/viewsets/records.py\n+++ b/mathesar/api/db/viewsets/records.py\n@@ -43,11 +43,23 @@\n grouping=serializer.validated_data['grouping'],\n )\n except (BadFilterFormat, FilterFieldNotFound) as e:\n- raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadFilterAPIException(\n+ e,\n+ field='filters',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except (BadSortFormat, SortFieldNotFound) as e:\n- raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadSortAPIException(\n+ e,\n+ field='order_by',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n- raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)\n+ raise database_api_exceptions.BadGroupAPIException(\n+ e,\n+ field='grouping',\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n \n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n@@ -67,8 +79,12 @@\n try:\n record = table.create_record_or_records(request.data)\n except IntegrityError as e:\n- if e.orig == NotNullViolation:\n- raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n+ if type(e.orig) == NotNullViolation:\n+ raise database_api_exceptions.NotNullViolationAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ table=table\n+ )\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n serializer = RecordSerializer(record)\ndiff --git a/mathesar/api/exceptions/database_exceptions/exceptions.py b/mathesar/api/exceptions/database_exceptions/exceptions.py\n--- a/mathesar/api/exceptions/database_exceptions/exceptions.py\n+++ b/mathesar/api/exceptions/database_exceptions/exceptions.py\n@@ -1,11 +1,13 @@\n from rest_framework import status\n \n+from db.columns.operations.select import get_columns_attnum_from_names\n from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\n from mathesar.api.exceptions.error_codes import ErrorCodes\n from mathesar.api.exceptions.generic_exceptions.base_exceptions import (\n MathesarAPIException,\n get_default_exception_detail,\n )\n+from mathesar.models import Column\n \n \n class UniqueViolationAPIException(MathesarAPIException):\n@@ -216,9 +218,19 @@\n self, exception,\n message=None,\n field=None,\n- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n+ status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n+ table=None\n ):\n- message_str, row_detail = exception.orig.args[0].split(\"DETAIL\")\n- message_str = message if message is not None else message_str\n- details = {'row_parameters': exception.params, 'row_detail': row_detail}\n+ exception_diagnostics = exception.orig.diag\n+ message_str = message if message is not None else exception_diagnostics.message_primary\n+ column_attnum = get_columns_attnum_from_names(\n+ table.oid,\n+ [exception.orig.diag.column_name],\n+ table.schema._sa_engine\n+ )[0][0]\n+ column = Column.objects.get(attnum=column_attnum)\n+ details = {\n+ 'record_detail': exception_diagnostics.message_detail,\n+ 'column_id': column.id\n+ }\n super().__init__(exception, self.error_code, message_str, field, details, status_code)\n", "issue": "Provide detailed error info for POST to records API when in violation of a not-null constraint\n## Current behavior\r\n\r\n1. Set up a table which contains a `NOT NULL` column.\r\n1. POST an empty request (`{}`) to the `records` API (e.g. by clicking the `+` icon to add a new row to the table).\r\n1. Observe the following response:\r\n\r\n ```json\r\n [\r\n {\r\n \"code\": 4999,\r\n \"message\": \"(psycopg2.errors.NotNullViolation) null value in column \\\"common_name\\\" of relation \\\"trees\\\" violates not-null constraint\\nDETAIL: Failing row contains (11, null, null, null, null, null, null).\\n\\n[SQL: INSERT INTO public.trees DEFAULT VALUES RETURNING public.trees.id]\\n(Background on this error at: http://sqlalche.me/e/14/gkpj)\",\r\n \"field\": null,\r\n \"detail\": null\r\n }\r\n ]\r\n ```\r\n\r\n1. Within the response, observe no easy way to identify _which column_ (or columns) was responsible for the error.\r\n\r\n## Expected behavior\r\n\r\n- For #775, we need enough detail for the front end to be able to display error messages specific to certain cells. For example, if a POST request has two columns with invalid data (perhaps even invalid in _different ways_), we need to be able to match specific error messages to each of those columns.\r\n\r\n- @pavish and I are flexible on the exact response schema from the API here, as long as it satisfies the above goal.\r\n\r\n- We should also keep #896 in-mind with this change.\r\n\r\n## Status\r\n\r\n- blocked by #896\r\n\n", "before_files": [{"content": "from rest_framework import status\n\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nfrom mathesar.api.exceptions.generic_exceptions.base_exceptions import (\n MathesarAPIException,\n get_default_exception_detail,\n)\n\n\nclass UniqueViolationAPIException(MathesarAPIException):\n error_code = ErrorCodes.UniqueViolation.value\n\n def __init__(\n self,\n exception,\n message=\"This column has non-unique values so a unique constraint cannot be set\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n exception_detail = get_default_exception_detail(exception, self.error_code, message, field, details)._asdict()\n self.detail = [exception_detail]\n self.status_code = status_code\n\n\nclass DuplicateTableAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateTableError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DuplicateColumnAPIException(ProgrammingAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.DuplicateColumnError.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidDefault.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeOptionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeOption.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass InvalidTypeCastAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.InvalidTypeCast.value\n\n def __init__(\n self,\n exception,\n message=\"Invalid type cast requested.\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass DynamicDefaultAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UnsupportedTypeAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadFilterAPIException(MathesarAPIException):\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=\"Filter arguments are not correct\",\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadSortAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass BadGroupAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UnsupportedType.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass RaiseExceptionAPIException(MathesarAPIException):\n \"\"\"\n Exception raised inside a postgres function\n \"\"\"\n error_code = ErrorCodes.RaiseException.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass UndefinedFunctionAPIException(MathesarAPIException):\n # Default message is not needed as the exception string provides enough details\n error_code = ErrorCodes.UndefinedFunction.value\n\n def __init__(\n self,\n exception,\n message=None,\n field=None,\n details=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n super().__init__(exception, self.error_code, message, field, details, status_code)\n\n\nclass NotNullViolationAPIException(MathesarAPIException):\n \"\"\"\n Exception raised when trying to add not null constraint to column with null value\n or when trying to add non-null value to a column with not null constraint\n \"\"\"\n error_code = ErrorCodes.NotNullViolation.value\n\n def __init__(\n self, exception,\n message=None,\n field=None,\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR\n ):\n message_str, row_detail = exception.orig.args[0].split(\"DETAIL\")\n message_str = message if message is not None else message_str\n details = {'row_parameters': exception.params, 'row_detail': row_detail}\n super().__init__(exception, self.error_code, message_str, field, details, status_code)\n", "path": "mathesar/api/exceptions/database_exceptions/exceptions.py"}, {"content": "from psycopg2.errors import NotNullViolation\n\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy_filters.exceptions import BadFilterFormat, BadSortFormat, FilterFieldNotFound, SortFieldNotFound\n\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nfrom db.records.exceptions import BadGroupFormat, GroupFieldNotFound, InvalidGroupType\nfrom mathesar.api.pagination import TableLimitOffsetGroupPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(viewsets.ViewSet):\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#filters-format\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetGroupPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table_pk,\n filters=serializer.validated_data['filters'],\n order_by=serializer.validated_data['order_by'],\n grouping=serializer.validated_data['grouping'],\n )\n except (BadFilterFormat, FilterFieldNotFound) as e:\n raise database_api_exceptions.BadFilterAPIException(e, field='filters', status_code=status.HTTP_400_BAD_REQUEST)\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(e, field='order_by', status_code=status.HTTP_400_BAD_REQUEST)\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(e, field='grouping', status_code=status.HTTP_400_BAD_REQUEST)\n\n serializer = RecordSerializer(records, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.get_record(pk)\n if not record:\n raise NotFound\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single record through the API.\n assert isinstance((request.data), dict)\n try:\n record = table.create_record_or_records(request.data)\n except IntegrityError as e:\n if e.orig == NotNullViolation:\n raise database_api_exceptions.NotNullViolationAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n else:\n raise database_api_exceptions.MathesarAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n serializer = RecordSerializer(record)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n record = table.update_record(pk, request.data)\n serializer = RecordSerializer(record)\n return Response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n table.delete_record(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/records.py"}]}
| 4,009 | 892 |
gh_patches_debug_17331
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-914
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow `super()` as overused expression
We need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114
It is obviously allowed.
Then we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py
That's it. It is beginner friendly.
Allow `super()` as overused expression
We need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114
It is obviously allowed.
Then we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py
That's it. It is beginner friendly.
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/complexity/overuses.py]
1 # -*- coding: utf-8 -*-
2
3 import ast
4 from collections import defaultdict
5 from typing import ClassVar, DefaultDict, List, Union
6
7 from typing_extensions import final
8
9 from wemake_python_styleguide.compat.aliases import FunctionNodes
10 from wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST
11 from wemake_python_styleguide.logic import nodes, source, walk
12 from wemake_python_styleguide.types import AnyNodes
13 from wemake_python_styleguide.violations import complexity
14 from wemake_python_styleguide.visitors import base
15
16 _Expressions = DefaultDict[str, List[ast.AST]]
17 _FunctionExpressions = DefaultDict[ast.AST, _Expressions]
18 _Annotated = Union[ast.arg, ast.AnnAssign]
19
20 _AnnNodes = (ast.AnnAssign, ast.arg)
21
22
23 @final
24 class StringOveruseVisitor(base.BaseNodeVisitor):
25 """Restricts several string usages."""
26
27 def __init__(self, *args, **kwargs) -> None:
28 """Inits the counter for constants."""
29 super().__init__(*args, **kwargs)
30 self._string_constants: DefaultDict[str, int] = defaultdict(int)
31
32 def visit_Str(self, node: ast.Str) -> None:
33 """
34 Restricts to over-use string constants.
35
36 Raises:
37 OverusedStringViolation
38
39 """
40 self._check_string_constant(node)
41 self.generic_visit(node)
42
43 def _check_string_constant(self, node: ast.Str) -> None:
44 parent = nodes.get_parent(node)
45 if isinstance(parent, _AnnNodes) and parent.annotation == node:
46 return # it is argument or variable annotation
47
48 if isinstance(parent, FunctionNodes) and parent.returns == node:
49 return # it is return annotation
50
51 self._string_constants[node.s] += 1
52
53 def _post_visit(self) -> None:
54 for string, usage_count in self._string_constants.items():
55 if usage_count > self.options.max_string_usages:
56 self.add_violation(
57 complexity.OverusedStringViolation(text=string or "''"),
58 )
59
60
61 @final
62 class ExpressionOveruseVisitor(base.BaseNodeVisitor):
63 """Finds overused expressions."""
64
65 _expressions: ClassVar[AnyNodes] = (
66 # We do not treat `ast.Attribute`s as expressions
67 # because they are too widely used. That's a compromise.
68 ast.Assert,
69 ast.BoolOp,
70 ast.BinOp,
71 ast.Call,
72 ast.Compare,
73 ast.Starred,
74 ast.Subscript,
75 ast.UnaryOp,
76 ast.Lambda,
77
78 ast.DictComp,
79 ast.Dict,
80 ast.List,
81 ast.ListComp,
82 ast.Tuple,
83 ast.GeneratorExp,
84 ast.Set,
85 ast.SetComp,
86 )
87
88 _msg: ClassVar[str] = '{0}; used {1} times'
89
90 def __init__(self, *args, **kwargs) -> None:
91 """We need to track expression usage in functions and modules."""
92 super().__init__(*args, **kwargs)
93 self._module_expressions: _Expressions = defaultdict(list)
94 self._function_expressions: _FunctionExpressions = defaultdict(
95 lambda: defaultdict(list),
96 )
97
98 def visit(self, node: ast.AST) -> None:
99 """
100 Visits all nodes in a module to find overused values.
101
102 Raises:
103 OverusedExpressionViolation
104
105 """
106 if isinstance(node, self._expressions):
107 self._add_expression(node)
108 self.generic_visit(node)
109
110 def _add_expression(self, node: ast.AST) -> None:
111 ignore_predicates = [
112 self._is_decorator,
113 self._is_self_method,
114 self._is_annotation,
115
116 # We use this predicate because classes have quite complex
117 # DSL to be created: like django-orm, attrs, and dataclasses.
118 # And these DSLs are built using attributes and calls.
119 _is_class_context,
120 ]
121 if any(ignore(node) for ignore in ignore_predicates):
122 return
123
124 source_code = source.node_to_string(node)
125 self._module_expressions[source_code].append(node)
126
127 maybe_function = walk.get_closest_parent(node, FunctionNodes)
128 if maybe_function is not None:
129 self._function_expressions[maybe_function][source_code].append(
130 node,
131 )
132
133 def _is_decorator(
134 self,
135 node: ast.AST,
136 ) -> bool:
137 parent = walk.get_closest_parent(node, FunctionNodes)
138 if isinstance(parent, FunctionNodes) and parent.decorator_list:
139 return any(
140 node == decorator or walk.is_contained_by(node, decorator)
141 for decorator in parent.decorator_list
142 )
143 return False
144
145 def _is_self_method(self, node: ast.AST) -> bool:
146 if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):
147 if isinstance(node.func.value, ast.Name):
148 if node.func.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST:
149 return True
150 return False
151
152 def _is_annotation(self, node: ast.AST) -> bool:
153 typed_assign = walk.get_closest_parent(
154 node,
155 (ast.AnnAssign, ast.arg),
156 )
157
158 if isinstance(typed_assign, _AnnNodes) and typed_assign.annotation:
159 is_same_node = node == typed_assign.annotation
160 is_child_annotation = walk.is_contained_by(
161 node, typed_assign.annotation,
162 )
163 return is_same_node or is_child_annotation
164 return False
165
166 def _post_visit(self) -> None:
167 for mod_source, module_nodes in self._module_expressions.items():
168 if len(module_nodes) > self.options.max_module_expressions:
169 self.add_violation(
170 complexity.OverusedExpressionViolation(
171 module_nodes[0],
172 text=self._msg.format(mod_source, len(module_nodes)),
173 ),
174 )
175
176 for function_contexts in self._function_expressions.values():
177 for src, function_nodes in function_contexts.items():
178 if len(function_nodes) > self.options.max_function_expressions:
179 self.add_violation(
180 complexity.OverusedExpressionViolation(
181 function_nodes[0],
182 text=self._msg.format(src, len(function_nodes)),
183 ),
184 )
185
186
187 def _is_class_context(node: ast.AST) -> bool:
188 return isinstance(nodes.get_context(node), ast.ClassDef)
189
[end of wemake_python_styleguide/visitors/ast/complexity/overuses.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py
--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py
+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py
@@ -117,6 +117,7 @@
# DSL to be created: like django-orm, attrs, and dataclasses.
# And these DSLs are built using attributes and calls.
_is_class_context,
+ _is_super_call,
]
if any(ignore(node) for ignore in ignore_predicates):
return
@@ -186,3 +187,9 @@
def _is_class_context(node: ast.AST) -> bool:
return isinstance(nodes.get_context(node), ast.ClassDef)
+
+
+def _is_super_call(node: ast.AST) -> bool:
+ if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
+ return node.func.id == 'super'
+ return False
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n@@ -117,6 +117,7 @@\n # DSL to be created: like django-orm, attrs, and dataclasses.\n # And these DSLs are built using attributes and calls.\n _is_class_context,\n+ _is_super_call,\n ]\n if any(ignore(node) for ignore in ignore_predicates):\n return\n@@ -186,3 +187,9 @@\n \n def _is_class_context(node: ast.AST) -> bool:\n return isinstance(nodes.get_context(node), ast.ClassDef)\n+\n+\n+def _is_super_call(node: ast.AST) -> bool:\n+ if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n+ return node.func.id == 'super'\n+ return False\n", "issue": "Allow `super()` as overused expression\nWe need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114\r\n\r\nIt is obviously allowed.\r\n\r\nThen we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py\r\n\r\nThat's it. It is beginner friendly.\nAllow `super()` as overused expression\nWe need to add a new predicate to allow `super()` calls here: https://github.com/wemake-services/wemake-python-styleguide/blob/39a3caeb9883c3112d878a88038e8b7331389265/wemake_python_styleguide/visitors/ast/complexity/overuses.py#L114\r\n\r\nIt is obviously allowed.\r\n\r\nThen we need to add a unit test here: https://github.com/wemake-services/wemake-python-styleguide/blob/ed1afeb3366525f0b3f1adc346a189a3c0f3b923/tests/test_visitors/test_ast/test_complexity/test_overuses/test_overused_expressions.py\r\n\r\nThat's it. It is beginner friendly.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport ast\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List, Union\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST\nfrom wemake_python_styleguide.logic import nodes, source, walk\nfrom wemake_python_styleguide.types import AnyNodes\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base\n\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n_Annotated = Union[ast.arg, ast.AnnAssign]\n\n_AnnNodes = (ast.AnnAssign, ast.arg)\n\n\n@final\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Restricts several string usages.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[str, int] = defaultdict(int)\n\n def visit_Str(self, node: ast.Str) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: ast.Str) -> None:\n parent = nodes.get_parent(node)\n if isinstance(parent, _AnnNodes) and parent.annotation == node:\n return # it is argument or variable annotation\n\n if isinstance(parent, FunctionNodes) and parent.returns == node:\n return # it is return annotation\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(text=string or \"''\"),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Starred,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _msg: ClassVar[str] = '{0}; used {1} times'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n ignore_predicates = [\n self._is_decorator,\n self._is_self_method,\n self._is_annotation,\n\n # We use this predicate because classes have quite complex\n # DSL to be created: like django-orm, attrs, and dataclasses.\n # And these DSLs are built using attributes and calls.\n _is_class_context,\n ]\n if any(ignore(node) for ignore in ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _is_decorator(\n self,\n node: ast.AST,\n ) -> bool:\n parent = walk.get_closest_parent(node, FunctionNodes)\n if isinstance(parent, FunctionNodes) and parent.decorator_list:\n return any(\n node == decorator or walk.is_contained_by(node, decorator)\n for decorator in parent.decorator_list\n )\n return False\n\n def _is_self_method(self, node: ast.AST) -> bool:\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):\n if isinstance(node.func.value, ast.Name):\n if node.func.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST:\n return True\n return False\n\n def _is_annotation(self, node: ast.AST) -> bool:\n typed_assign = walk.get_closest_parent(\n node,\n (ast.AnnAssign, ast.arg),\n )\n\n if isinstance(typed_assign, _AnnNodes) and typed_assign.annotation:\n is_same_node = node == typed_assign.annotation\n is_child_annotation = walk.is_contained_by(\n node, typed_assign.annotation,\n )\n return is_same_node or is_child_annotation\n return False\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n ),\n )\n\n\ndef _is_class_context(node: ast.AST) -> bool:\n return isinstance(nodes.get_context(node), ast.ClassDef)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}]}
| 2,787 | 250 |
gh_patches_debug_31083
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-6855
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Suggest the use of `assume_spherical_screen()` in the autoalign/reproject examples
These two examples:
* Autoaligning: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/autoalign_aia_hmi.html
* Reprojecting: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/reprojection_align_aia_hmi.html
are silent about the fact that off-disk HMI data is being discarded. The examples should note that, and additionally suggest that the user can use `Helioprojective.assume_spherical_screen()` to retain the off-disk HMI data.
</issue>
<code>
[start of examples/map_transformations/reprojection_align_aia_hmi.py]
1 """
2 ========================================
3 Aligning AIA and HMI Data with Reproject
4 ========================================
5
6 This example shows how to reproject one image to be aligned with another image.
7
8 Here we use the `reproject <https://reproject.readthedocs.io/en/stable/>`__
9 package to transform one image to the reference frame of another image. The
10 reference frames are defined by the respective World Coordinate System (WCS)
11 information. This is a very generic way of aligning data, and can be very
12 accurate.
13
14 You will need ``reproject`` v0.6 or higher installed.
15
16 See :ref:`sphx_glr_generated_gallery_map_transformations_autoalign_aia_hmi.py`
17 for an alternate approach to image alignment, where the images are aligned
18 as they are plotted, with no modified map created.
19 """
20 import matplotlib.pyplot as plt
21
22 import astropy.units as u
23
24 import sunpy.data.sample
25 import sunpy.map
26
27 ######################################################################
28 # We use the AIA image and HMI image from the sample data. For the
29 # HMI map, we use the special HMI color map, which expects the plotted
30 # range to be -1500 to 1500.
31
32 map_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
33 map_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)
34 map_hmi.plot_settings['cmap'] = "hmimag"
35 map_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)
36
37 ######################################################################
38 # Plot both images side by side.
39
40 fig = plt.figure(figsize=(12, 5))
41 ax1 = fig.add_subplot(121, projection=map_aia)
42 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
43 ax2 = fig.add_subplot(122, projection=map_hmi)
44 map_hmi.plot(axes=ax2)
45
46 ######################################################################
47 # We can now reproject the HMI image to the WCS of the AIA image. We are using
48 # the fast `~reproject.reproject_interp`, however the slower but most accurate
49 # `~reproject.reproject_exact` would also work well here. The
50 # `~reproject.reproject_exact` function only works when reprojecting between
51 # two WCSes with the same observer, which makes it well suited to aligning
52 # data.
53
54 out_hmi = map_hmi.reproject_to(map_aia.wcs)
55
56 ######################################################################
57 # Plot the images side by side.
58
59 fig = plt.figure(figsize=(12, 5))
60 ax1 = fig.add_subplot(121, projection=map_aia)
61 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
62 ax2 = fig.add_subplot(122, projection=out_hmi)
63 out_hmi.plot(axes=ax2, title='Reprojected HMI image')
64
65 ######################################################################
66 # As both of these images are now on the same pixel grid we can directly plot
67 # them over one another, by setting the transparency of the HMI plot.
68
69 fig = plt.figure()
70 ax1 = fig.add_subplot(projection=map_aia)
71 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
72 out_hmi.plot(axes=ax1, alpha=0.5)
73 plt.title('HMI overlaid on AIA')
74
75 plt.show()
76
77 # sphinx_gallery_thumbnail_number = 2
78
[end of examples/map_transformations/reprojection_align_aia_hmi.py]
[start of examples/map_transformations/autoalign_aia_hmi.py]
1 """
2 ==============================================
3 Auto-Aligning AIA and HMI Data During Plotting
4 ==============================================
5
6 This example shows how to auto-align two images with different reference frames
7 during plotting.
8
9 Here we use the optional keyword ``autoalign`` when calling Map's
10 :meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by
11 the respective World Coordinate System (WCS) information.
12
13 See :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py`
14 for an alternate approach to image alignment, where one of the maps is modified
15 prior to plotting, and thus is available for purposes other than plotting.
16 """
17 import matplotlib.pyplot as plt
18
19 import astropy.units as u
20
21 import sunpy.data.sample
22 import sunpy.map
23
24 ######################################################################
25 # We use the AIA image and HMI image from the sample data. For the
26 # HMI map, we use the special HMI color map, which expects the plotted
27 # range to be -1500 to 1500.
28
29 map_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)
30 map_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)
31 map_hmi.plot_settings['cmap'] = "hmimag"
32 map_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)
33
34 ######################################################################
35 # Plot both images side by side. Note that the HMI image is oriented
36 # "upside down" relative to the AIA image.
37
38 fig = plt.figure(figsize=(12, 5))
39 ax1 = fig.add_subplot(121, projection=map_aia)
40 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
41 ax2 = fig.add_subplot(122, projection=map_hmi)
42 map_hmi.plot(axes=ax2)
43
44 ######################################################################
45 # Setting ``autoalign=True`` allows plotting the HMI image onto axes
46 # defined by the AIA reference frame. In contrast to the above code
47 # block, we intentionally set the ``projection`` for the axes to be
48 # the AIA map # instead of the HMI map. We also need to manually set
49 # the plot limits because Matplotlib gets confused by the off-disk
50 # parts of the image. Note that the HMI image now has the same
51 # orientation as the AIA image.
52
53 fig = plt.figure(figsize=(12, 5))
54 ax1 = fig.add_subplot(121, projection=map_aia)
55 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
56 ax2 = fig.add_subplot(122, projection=map_aia)
57 map_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame')
58 ax2.axis(ax1.axis())
59
60 ######################################################################
61 # We can directly plot them over one another, by setting the
62 # transparency of the HMI plot.
63
64 fig = plt.figure()
65 ax1 = fig.add_subplot(projection=map_aia)
66 map_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)
67 map_hmi.plot(axes=ax1, autoalign=True, alpha=0.5)
68 ax1.set_title('HMI overlaid on AIA')
69
70 plt.show()
71
72 # sphinx_gallery_thumbnail_number = 2
73
[end of examples/map_transformations/autoalign_aia_hmi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/map_transformations/autoalign_aia_hmi.py b/examples/map_transformations/autoalign_aia_hmi.py
--- a/examples/map_transformations/autoalign_aia_hmi.py
+++ b/examples/map_transformations/autoalign_aia_hmi.py
@@ -45,10 +45,18 @@
# Setting ``autoalign=True`` allows plotting the HMI image onto axes
# defined by the AIA reference frame. In contrast to the above code
# block, we intentionally set the ``projection`` for the axes to be
-# the AIA map # instead of the HMI map. We also need to manually set
+# the AIA map instead of the HMI map. We also need to manually set
# the plot limits because Matplotlib gets confused by the off-disk
-# parts of the image. Note that the HMI image now has the same
+# parts of the image. The HMI image now has the same
# orientation as the AIA image.
+#
+# Note that off-disk HMI data are not retained by default because an
+# additional assumption is required to define the location of the HMI
+# emission in 3D space. We can use
+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to
+# retain the off-disk HMI data. See
+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`
+# for more reference.
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_subplot(121, projection=map_aia)
diff --git a/examples/map_transformations/reprojection_align_aia_hmi.py b/examples/map_transformations/reprojection_align_aia_hmi.py
--- a/examples/map_transformations/reprojection_align_aia_hmi.py
+++ b/examples/map_transformations/reprojection_align_aia_hmi.py
@@ -55,6 +55,14 @@
######################################################################
# Plot the images side by side.
+#
+# Note that off-disk HMI data are not retained by default because an
+# additional assumption is required to define the location of the HMI
+# emission in 3D space. We can use
+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to
+# retain the off-disk HMI data. See
+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`
+# for more reference.
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_subplot(121, projection=map_aia)
|
{"golden_diff": "diff --git a/examples/map_transformations/autoalign_aia_hmi.py b/examples/map_transformations/autoalign_aia_hmi.py\n--- a/examples/map_transformations/autoalign_aia_hmi.py\n+++ b/examples/map_transformations/autoalign_aia_hmi.py\n@@ -45,10 +45,18 @@\n # Setting ``autoalign=True`` allows plotting the HMI image onto axes\n # defined by the AIA reference frame. In contrast to the above code\n # block, we intentionally set the ``projection`` for the axes to be\n-# the AIA map # instead of the HMI map. We also need to manually set\n+# the AIA map instead of the HMI map. We also need to manually set\n # the plot limits because Matplotlib gets confused by the off-disk\n-# parts of the image. Note that the HMI image now has the same\n+# parts of the image. The HMI image now has the same\n # orientation as the AIA image.\n+#\n+# Note that off-disk HMI data are not retained by default because an\n+# additional assumption is required to define the location of the HMI\n+# emission in 3D space. We can use\n+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n+# retain the off-disk HMI data. See\n+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n+# for more reference.\n \n fig = plt.figure(figsize=(12, 5))\n ax1 = fig.add_subplot(121, projection=map_aia)\ndiff --git a/examples/map_transformations/reprojection_align_aia_hmi.py b/examples/map_transformations/reprojection_align_aia_hmi.py\n--- a/examples/map_transformations/reprojection_align_aia_hmi.py\n+++ b/examples/map_transformations/reprojection_align_aia_hmi.py\n@@ -55,6 +55,14 @@\n \n ######################################################################\n # Plot the images side by side.\n+#\n+# Note that off-disk HMI data are not retained by default because an\n+# additional assumption is required to define the location of the HMI\n+# emission in 3D space. We can use\n+# :meth:`~sunpy.coordinates.Helioprojective.assume_spherical_screen` to\n+# retain the off-disk HMI data. See\n+# :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_spherical_screen.py`\n+# for more reference.\n \n fig = plt.figure(figsize=(12, 5))\n ax1 = fig.add_subplot(121, projection=map_aia)\n", "issue": "Suggest the use of `assume_spherical_screen()` in the autoalign/reproject examples\nThese two examples:\r\n* Autoaligning: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/autoalign_aia_hmi.html\r\n* Reprojecting: https://docs.sunpy.org/en/v3.1.5/generated/gallery/map_transformations/reprojection_align_aia_hmi.html\r\n\r\nare silent about the fact that off-disk HMI data is being discarded. The examples should note that, and additionally suggest that the user can use `Helioprojective.assume_spherical_screen()` to retain the off-disk HMI data.\n", "before_files": [{"content": "\"\"\"\n========================================\nAligning AIA and HMI Data with Reproject\n========================================\n\nThis example shows how to reproject one image to be aligned with another image.\n\nHere we use the `reproject <https://reproject.readthedocs.io/en/stable/>`__\npackage to transform one image to the reference frame of another image. The\nreference frames are defined by the respective World Coordinate System (WCS)\ninformation. This is a very generic way of aligning data, and can be very\naccurate.\n\nYou will need ``reproject`` v0.6 or higher installed.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_autoalign_aia_hmi.py`\nfor an alternate approach to image alignment, where the images are aligned\nas they are plotted, with no modified map created.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# We can now reproject the HMI image to the WCS of the AIA image. We are using\n# the fast `~reproject.reproject_interp`, however the slower but most accurate\n# `~reproject.reproject_exact` would also work well here. The\n# `~reproject.reproject_exact` function only works when reprojecting between\n# two WCSes with the same observer, which makes it well suited to aligning\n# data.\n\nout_hmi = map_hmi.reproject_to(map_aia.wcs)\n\n######################################################################\n# Plot the images side by side.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=out_hmi)\nout_hmi.plot(axes=ax2, title='Reprojected HMI image')\n\n######################################################################\n# As both of these images are now on the same pixel grid we can directly plot\n# them over one another, by setting the transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nout_hmi.plot(axes=ax1, alpha=0.5)\nplt.title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/reprojection_align_aia_hmi.py"}, {"content": "\"\"\"\n==============================================\nAuto-Aligning AIA and HMI Data During Plotting\n==============================================\n\nThis example shows how to auto-align two images with different reference frames\nduring plotting.\n\nHere we use the optional keyword ``autoalign`` when calling Map's\n:meth:`~sunpy.map.GenericMap.plot` method. The reference frames are defined by\nthe respective World Coordinate System (WCS) information.\n\nSee :ref:`sphx_glr_generated_gallery_map_transformations_reprojection_align_aia_hmi.py`\nfor an alternate approach to image alignment, where one of the maps is modified\nprior to plotting, and thus is available for purposes other than plotting.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport astropy.units as u\n\nimport sunpy.data.sample\nimport sunpy.map\n\n######################################################################\n# We use the AIA image and HMI image from the sample data. For the\n# HMI map, we use the special HMI color map, which expects the plotted\n# range to be -1500 to 1500.\n\nmap_aia = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE)\nmap_hmi = sunpy.map.Map(sunpy.data.sample.HMI_LOS_IMAGE)\nmap_hmi.plot_settings['cmap'] = \"hmimag\"\nmap_hmi.plot_settings['norm'] = plt.Normalize(-1500, 1500)\n\n######################################################################\n# Plot both images side by side. Note that the HMI image is oriented\n# \"upside down\" relative to the AIA image.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_hmi)\nmap_hmi.plot(axes=ax2)\n\n######################################################################\n# Setting ``autoalign=True`` allows plotting the HMI image onto axes\n# defined by the AIA reference frame. In contrast to the above code\n# block, we intentionally set the ``projection`` for the axes to be\n# the AIA map # instead of the HMI map. We also need to manually set\n# the plot limits because Matplotlib gets confused by the off-disk\n# parts of the image. Note that the HMI image now has the same\n# orientation as the AIA image.\n\nfig = plt.figure(figsize=(12, 5))\nax1 = fig.add_subplot(121, projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nax2 = fig.add_subplot(122, projection=map_aia)\nmap_hmi.plot(axes=ax2, autoalign=True, title='HMI image in AIA reference frame')\nax2.axis(ax1.axis())\n\n######################################################################\n# We can directly plot them over one another, by setting the\n# transparency of the HMI plot.\n\nfig = plt.figure()\nax1 = fig.add_subplot(projection=map_aia)\nmap_aia.plot(axes=ax1, clip_interval=(1, 99.9)*u.percent)\nmap_hmi.plot(axes=ax1, autoalign=True, alpha=0.5)\nax1.set_title('HMI overlaid on AIA')\n\nplt.show()\n\n# sphinx_gallery_thumbnail_number = 2\n", "path": "examples/map_transformations/autoalign_aia_hmi.py"}]}
| 2,505 | 565 |
gh_patches_debug_21978
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-4366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[APP SUBMITTED]: error: [Errno 99] Address not available
### INFO
**Python Version**: `2.7.14 (default, Dec 14 2017, 15:51:29) [GCC 6.4.0]`
**Operating System**: `Linux-4.2.8-x86_64-with`
**Locale**: `None`
**Branch**: [master](../tree/master)
**Database**: `44.11`
**Commit**: pymedusa/Medusa@c65ca2607fae74405dcceaaf8c02f3d29d0313ea
**Link to Log**: https://gist.github.com/0d062813555bacfdadcc7bbfcf7c7e28
### ERROR
<pre>
2018-06-11 14:32:43 ERROR SEARCHQUEUE-DAILY-SEARCH :: [c65ca26] DailySearchQueueItem Exception, error: [Errno 99] Address not available
Traceback (most recent call last):
File "/app/medusa/medusa/search/queue.py", line 289, in run
self.success = snatch_episode(result)
File "/app/medusa/medusa/search/core.py", line 147, in snatch_episode
result_downloaded = **********.sendNZB(result, is_proper)
File "/app/medusa/medusa/clients/nzb/**********.py", line 92, in sendNZB
if not NZBConnection(url):
File "/app/medusa/medusa/clients/nzb/**********.py", line 29, in NZBConnection
if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):
File "/usr/lib/python2.7/xmlrpclib.py", line 1243, in __call__
return self.__send(self.__name, args)
File "/usr/lib/python2.7/xmlrpclib.py", line 1602, in __request
verbose=self.__verbose
File "/usr/lib/python2.7/xmlrpclib.py", line 1283, in request
return self.single_request(host, handler, request_body, verbose)
File "/usr/lib/python2.7/xmlrpclib.py", line 1311, in single_request
self.send_content(h, request_body)
File "/usr/lib/python2.7/xmlrpclib.py", line 1459, in send_content
connection.endheaders(request_body)
File "/usr/lib/python2.7/httplib.py", line 1038, in endheaders
self._send_output(message_body)
File "/usr/lib/python2.7/httplib.py", line 882, in _send_output
self.send(msg)
File "/usr/lib/python2.7/httplib.py", line 844, in send
self.connect()
File "/usr/lib/python2.7/httplib.py", line 821, in connect
self.timeout, self.source_address)
File "/usr/lib/python2.7/socket.py", line 575, in create_connection
raise err
error: [Errno 99] Address not available
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
</issue>
<code>
[start of medusa/clients/nzb/nzbget.py]
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4
5 import datetime
6 import logging
7 from base64 import standard_b64encode
8 from xmlrpc.client import Error, ProtocolError, ServerProxy
9
10 from medusa import app
11 from medusa.common import Quality
12 from medusa.helper.common import try_int
13 from medusa.logger.adapters.style import BraceAdapter
14
15 from six import text_type
16
17 log = BraceAdapter(logging.getLogger(__name__))
18 log.logger.addHandler(logging.NullHandler())
19
20
21 def NZBConnection(url):
22 """Method to connect to NZBget client.
23
24 :param url: nzb url to connect
25 :return: True if connected, else False
26 """
27 nzbGetRPC = ServerProxy(url)
28 try:
29 if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):
30 log.debug('Successfully connected to NZBget')
31 else:
32 log.warning('Successfully connected to NZBget but unable to'
33 ' send a message')
34 return True
35
36 except ProtocolError as error:
37 if error.errmsg == 'Unauthorized':
38 log.warning('NZBget username or password is incorrect.')
39 else:
40 log.error('Protocol Error: {msg}', {'msg': error.errmsg})
41 return False
42
43 except Error as error:
44 log.warning('Please check your NZBget host and port (if it is'
45 ' running). NZBget is not responding to this combination'
46 ' Error: {msg}', {'msg': error.errmsg})
47 return False
48
49
50 def testNZB(host, username, password, use_https):
51 """Test NZBget client connection.
52
53 :param host: nzb host to connect
54 :param username: nzb username
55 :param password: nzb password
56 :param use_https: If we should use https or not
57
58 :return True if connected. Else False
59 """
60 url = 'http{}://{}:{}@{}/xmlrpc'.format(
61 's' if use_https else '',
62 username,
63 password,
64 host)
65 return NZBConnection(url)
66
67
68 def sendNZB(nzb, proper=False):
69 """
70 Sends NZB to NZBGet client
71
72 :param nzb: nzb object
73 :param proper: True if a Proper download, False if not.
74 """
75 if app.NZBGET_HOST is None:
76 log.warning('No NZBget host found in configuration.'
77 ' Please configure it.')
78 return False
79
80 addToTop = False
81 nzbgetprio = 0
82 category = app.NZBGET_CATEGORY
83 if nzb.series.is_anime:
84 category = app.NZBGET_CATEGORY_ANIME
85
86 url = 'http{}://{}:{}@{}/xmlrpc'.format(
87 's' if app.NZBGET_USE_HTTPS else '',
88 app.NZBGET_USERNAME,
89 app.NZBGET_PASSWORD,
90 app.NZBGET_HOST)
91
92 if not NZBConnection(url):
93 return False
94
95 nzbGetRPC = ServerProxy(url)
96
97 dupekey = ''
98 dupescore = 0
99 # if it aired recently make it high priority and generate DupeKey/Score
100 for cur_ep in nzb.episodes:
101 if dupekey == '':
102 if cur_ep.series.indexer == 1:
103 dupekey = 'Medusa-' + text_type(cur_ep.series.indexerid)
104 elif cur_ep.series.indexer == 2:
105 dupekey = 'Medusa-tvr' + text_type(cur_ep.series.indexerid)
106 dupekey += '-' + text_type(cur_ep.season) + '.' + text_type(cur_ep.episode)
107 if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7):
108 addToTop = True
109 nzbgetprio = app.NZBGET_PRIORITY
110 else:
111 category = app.NZBGET_CATEGORY_BACKLOG
112 if nzb.series.is_anime:
113 category = app.NZBGET_CATEGORY_ANIME_BACKLOG
114
115 if nzb.quality != Quality.UNKNOWN:
116 dupescore = nzb.quality * 100
117 if proper:
118 dupescore += 10
119
120 nzbcontent64 = None
121 if nzb.result_type == 'nzbdata':
122 data = nzb.extra_info[0]
123 nzbcontent64 = standard_b64encode(data)
124
125 log.info('Sending NZB to NZBget')
126 log.debug('URL: {}', url)
127
128 try:
129 # Find out if nzbget supports priority (Version 9.0+),
130 # old versions beginning with a 0.x will use the old command
131 nzbget_version_str = nzbGetRPC.version()
132 nzbget_version = try_int(
133 nzbget_version_str[:nzbget_version_str.find('.')]
134 )
135 if nzbget_version == 0:
136 if nzbcontent64:
137 nzbget_result = nzbGetRPC.append(
138 nzb.name + '.nzb',
139 category,
140 addToTop,
141 nzbcontent64
142 )
143 else:
144 if nzb.result_type == 'nzb':
145 if not nzb.provider.login():
146 return False
147
148 # TODO: Check if this needs exception handling
149 data = nzb.provider.session(nzb.url).content
150 if data is None:
151 return False
152
153 nzbcontent64 = standard_b64encode(data)
154
155 nzbget_result = nzbGetRPC.append(
156 nzb.name + '.nzb',
157 category,
158 addToTop,
159 nzbcontent64
160 )
161 elif nzbget_version == 12:
162 if nzbcontent64 is not None:
163 nzbget_result = nzbGetRPC.append(
164 nzb.name + '.nzb', category, nzbgetprio, False,
165 nzbcontent64, False, dupekey, dupescore, 'score'
166 )
167 else:
168 nzbget_result = nzbGetRPC.appendurl(
169 nzb.name + '.nzb', category, nzbgetprio, False, nzb.url,
170 False, dupekey, dupescore, 'score'
171 )
172 # v13+ has a new combined append method that accepts both (url and
173 # content) also the return value has changed from boolean to integer
174 # (Positive number representing NZBID of the queue item. 0 and negative
175 # numbers represent error codes.)
176 elif nzbget_version >= 13:
177 nzbget_result = nzbGetRPC.append(
178 nzb.name + '.nzb',
179 nzbcontent64 if nzbcontent64 is not None else nzb.url,
180 category, nzbgetprio, False, False, dupekey, dupescore,
181 'score'
182 ) > 0
183 else:
184 if nzbcontent64 is not None:
185 nzbget_result = nzbGetRPC.append(
186 nzb.name + '.nzb', category, nzbgetprio, False,
187 nzbcontent64
188 )
189 else:
190 nzbget_result = nzbGetRPC.appendurl(
191 nzb.name + '.nzb', category, nzbgetprio, False, nzb.url
192 )
193
194 if nzbget_result:
195 log.debug('NZB sent to NZBget successfully')
196 return True
197 else:
198 log.warning('NZBget could not add {name}.nzb to the queue',
199 {'name': nzb.name})
200 return False
201 except Exception:
202 log.warning('Connect Error to NZBget: could not add {file}.nzb to the'
203 ' queue', {'name': nzb.name})
204 return False
205
[end of medusa/clients/nzb/nzbget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/clients/nzb/nzbget.py b/medusa/clients/nzb/nzbget.py
--- a/medusa/clients/nzb/nzbget.py
+++ b/medusa/clients/nzb/nzbget.py
@@ -4,6 +4,7 @@
import datetime
import logging
+import socket
from base64 import standard_b64encode
from xmlrpc.client import Error, ProtocolError, ServerProxy
@@ -41,11 +42,17 @@
return False
except Error as error:
- log.warning('Please check your NZBget host and port (if it is'
- ' running). NZBget is not responding to this combination'
+ log.warning('Please check your NZBget host and port (if it is running).'
+ ' NZBget is not responding to this combination.'
' Error: {msg}', {'msg': error.errmsg})
return False
+ except socket.error as error:
+ log.warning('Please check your NZBget host and port (if it is running).'
+ ' NZBget is not responding to this combination.'
+ ' Socket Error: {msg}', {'msg': error})
+ return False
+
def testNZB(host, username, password, use_https):
"""Test NZBget client connection.
|
{"golden_diff": "diff --git a/medusa/clients/nzb/nzbget.py b/medusa/clients/nzb/nzbget.py\n--- a/medusa/clients/nzb/nzbget.py\n+++ b/medusa/clients/nzb/nzbget.py\n@@ -4,6 +4,7 @@\n \n import datetime\n import logging\n+import socket\n from base64 import standard_b64encode\n from xmlrpc.client import Error, ProtocolError, ServerProxy\n \n@@ -41,11 +42,17 @@\n return False\n \n except Error as error:\n- log.warning('Please check your NZBget host and port (if it is'\n- ' running). NZBget is not responding to this combination'\n+ log.warning('Please check your NZBget host and port (if it is running).'\n+ ' NZBget is not responding to this combination.'\n ' Error: {msg}', {'msg': error.errmsg})\n return False\n \n+ except socket.error as error:\n+ log.warning('Please check your NZBget host and port (if it is running).'\n+ ' NZBget is not responding to this combination.'\n+ ' Socket Error: {msg}', {'msg': error})\n+ return False\n+\n \n def testNZB(host, username, password, use_https):\n \"\"\"Test NZBget client connection.\n", "issue": "[APP SUBMITTED]: error: [Errno 99] Address not available\n\n### INFO\n**Python Version**: `2.7.14 (default, Dec 14 2017, 15:51:29) [GCC 6.4.0]`\n**Operating System**: `Linux-4.2.8-x86_64-with`\n**Locale**: `None`\n**Branch**: [master](../tree/master)\n**Database**: `44.11`\n**Commit**: pymedusa/Medusa@c65ca2607fae74405dcceaaf8c02f3d29d0313ea\n**Link to Log**: https://gist.github.com/0d062813555bacfdadcc7bbfcf7c7e28\n### ERROR\n<pre>\n2018-06-11 14:32:43 ERROR SEARCHQUEUE-DAILY-SEARCH :: [c65ca26] DailySearchQueueItem Exception, error: [Errno 99] Address not available\nTraceback (most recent call last):\n File \"/app/medusa/medusa/search/queue.py\", line 289, in run\n self.success = snatch_episode(result)\n File \"/app/medusa/medusa/search/core.py\", line 147, in snatch_episode\n result_downloaded = **********.sendNZB(result, is_proper)\n File \"/app/medusa/medusa/clients/nzb/**********.py\", line 92, in sendNZB\n if not NZBConnection(url):\n File \"/app/medusa/medusa/clients/nzb/**********.py\", line 29, in NZBConnection\n if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1243, in __call__\n return self.__send(self.__name, args)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1602, in __request\n verbose=self.__verbose\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1283, in request\n return self.single_request(host, handler, request_body, verbose)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1311, in single_request\n self.send_content(h, request_body)\n File \"/usr/lib/python2.7/xmlrpclib.py\", line 1459, in send_content\n connection.endheaders(request_body)\n File \"/usr/lib/python2.7/httplib.py\", line 1038, in endheaders\n self._send_output(message_body)\n File \"/usr/lib/python2.7/httplib.py\", line 882, in _send_output\n self.send(msg)\n File \"/usr/lib/python2.7/httplib.py\", line 844, in send\n self.connect()\n File \"/usr/lib/python2.7/httplib.py\", line 821, in connect\n self.timeout, self.source_address)\n File \"/usr/lib/python2.7/socket.py\", line 575, in create_connection\n raise err\nerror: [Errno 99] Address not available\n</pre>\n---\n_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators\n\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\n\nimport datetime\nimport logging\nfrom base64 import standard_b64encode\nfrom xmlrpc.client import Error, ProtocolError, ServerProxy\n\nfrom medusa import app\nfrom medusa.common import Quality\nfrom medusa.helper.common import try_int\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom six import text_type\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\ndef NZBConnection(url):\n \"\"\"Method to connect to NZBget client.\n\n :param url: nzb url to connect\n :return: True if connected, else False\n \"\"\"\n nzbGetRPC = ServerProxy(url)\n try:\n if nzbGetRPC.writelog('INFO', 'Medusa connected to test connection.'):\n log.debug('Successfully connected to NZBget')\n else:\n log.warning('Successfully connected to NZBget but unable to'\n ' send a message')\n return True\n\n except ProtocolError as error:\n if error.errmsg == 'Unauthorized':\n log.warning('NZBget username or password is incorrect.')\n else:\n log.error('Protocol Error: {msg}', {'msg': error.errmsg})\n return False\n\n except Error as error:\n log.warning('Please check your NZBget host and port (if it is'\n ' running). NZBget is not responding to this combination'\n ' Error: {msg}', {'msg': error.errmsg})\n return False\n\n\ndef testNZB(host, username, password, use_https):\n \"\"\"Test NZBget client connection.\n\n :param host: nzb host to connect\n :param username: nzb username\n :param password: nzb password\n :param use_https: If we should use https or not\n\n :return True if connected. Else False\n \"\"\"\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if use_https else '',\n username,\n password,\n host)\n return NZBConnection(url)\n\n\ndef sendNZB(nzb, proper=False):\n \"\"\"\n Sends NZB to NZBGet client\n\n :param nzb: nzb object\n :param proper: True if a Proper download, False if not.\n \"\"\"\n if app.NZBGET_HOST is None:\n log.warning('No NZBget host found in configuration.'\n ' Please configure it.')\n return False\n\n addToTop = False\n nzbgetprio = 0\n category = app.NZBGET_CATEGORY\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME\n\n url = 'http{}://{}:{}@{}/xmlrpc'.format(\n 's' if app.NZBGET_USE_HTTPS else '',\n app.NZBGET_USERNAME,\n app.NZBGET_PASSWORD,\n app.NZBGET_HOST)\n\n if not NZBConnection(url):\n return False\n\n nzbGetRPC = ServerProxy(url)\n\n dupekey = ''\n dupescore = 0\n # if it aired recently make it high priority and generate DupeKey/Score\n for cur_ep in nzb.episodes:\n if dupekey == '':\n if cur_ep.series.indexer == 1:\n dupekey = 'Medusa-' + text_type(cur_ep.series.indexerid)\n elif cur_ep.series.indexer == 2:\n dupekey = 'Medusa-tvr' + text_type(cur_ep.series.indexerid)\n dupekey += '-' + text_type(cur_ep.season) + '.' + text_type(cur_ep.episode)\n if datetime.date.today() - cur_ep.airdate <= datetime.timedelta(days=7):\n addToTop = True\n nzbgetprio = app.NZBGET_PRIORITY\n else:\n category = app.NZBGET_CATEGORY_BACKLOG\n if nzb.series.is_anime:\n category = app.NZBGET_CATEGORY_ANIME_BACKLOG\n\n if nzb.quality != Quality.UNKNOWN:\n dupescore = nzb.quality * 100\n if proper:\n dupescore += 10\n\n nzbcontent64 = None\n if nzb.result_type == 'nzbdata':\n data = nzb.extra_info[0]\n nzbcontent64 = standard_b64encode(data)\n\n log.info('Sending NZB to NZBget')\n log.debug('URL: {}', url)\n\n try:\n # Find out if nzbget supports priority (Version 9.0+),\n # old versions beginning with a 0.x will use the old command\n nzbget_version_str = nzbGetRPC.version()\n nzbget_version = try_int(\n nzbget_version_str[:nzbget_version_str.find('.')]\n )\n if nzbget_version == 0:\n if nzbcontent64:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n else:\n if nzb.result_type == 'nzb':\n if not nzb.provider.login():\n return False\n\n # TODO: Check if this needs exception handling\n data = nzb.provider.session(nzb.url).content\n if data is None:\n return False\n\n nzbcontent64 = standard_b64encode(data)\n\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n category,\n addToTop,\n nzbcontent64\n )\n elif nzbget_version == 12:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64, False, dupekey, dupescore, 'score'\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url,\n False, dupekey, dupescore, 'score'\n )\n # v13+ has a new combined append method that accepts both (url and\n # content) also the return value has changed from boolean to integer\n # (Positive number representing NZBID of the queue item. 0 and negative\n # numbers represent error codes.)\n elif nzbget_version >= 13:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb',\n nzbcontent64 if nzbcontent64 is not None else nzb.url,\n category, nzbgetprio, False, False, dupekey, dupescore,\n 'score'\n ) > 0\n else:\n if nzbcontent64 is not None:\n nzbget_result = nzbGetRPC.append(\n nzb.name + '.nzb', category, nzbgetprio, False,\n nzbcontent64\n )\n else:\n nzbget_result = nzbGetRPC.appendurl(\n nzb.name + '.nzb', category, nzbgetprio, False, nzb.url\n )\n\n if nzbget_result:\n log.debug('NZB sent to NZBget successfully')\n return True\n else:\n log.warning('NZBget could not add {name}.nzb to the queue',\n {'name': nzb.name})\n return False\n except Exception:\n log.warning('Connect Error to NZBget: could not add {file}.nzb to the'\n ' queue', {'name': nzb.name})\n return False\n", "path": "medusa/clients/nzb/nzbget.py"}]}
| 3,530 | 296 |
gh_patches_debug_4636
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-885
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImportError: cannot import name 'marker_trait' from 'chaco.api'
**Problem Description**
ImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo
**Reproduction Steps:**
```python
python chaco/examples/tutorials/scipy2008/traits_example.py
```
**Expected behavior:**
test past
**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]
</issue>
<code>
[start of examples/tutorials/scipy2008/traits_example.py]
1 from numpy import linspace, sin
2
3 from enable.api import ColorTrait
4 from chaco.api import ArrayPlotData, Plot, marker_trait
5 from enable.api import ComponentEditor
6 from traits.api import HasTraits, Instance, Int
7 from traitsui.api import Group, Item, View
8
9
10 class ScatterPlotTraits(HasTraits):
11
12 plot = Instance(Plot)
13 color = ColorTrait("blue")
14 marker = marker_trait
15 marker_size = Int(4)
16
17 traits_view = View(
18 Group(
19 Item("color", label="Color", style="custom"),
20 Item("marker", label="Marker"),
21 Item("marker_size", label="Size"),
22 Item("plot", editor=ComponentEditor(), show_label=False),
23 orientation="vertical",
24 ),
25 width=800,
26 height=600,
27 resizable=True,
28 title="Chaco Plot",
29 )
30
31 def __init__(self):
32 # Create the data and the PlotData object
33 x = linspace(-14, 14, 100)
34 y = sin(x) * x ** 3
35 plotdata = ArrayPlotData(x=x, y=y)
36 # Create a Plot and associate it with the PlotData
37 plot = Plot(plotdata)
38 # Create a line plot in the Plot
39 self.renderer = plot.plot(("x", "y"), type="scatter", color="blue")[0]
40 self.plot = plot
41
42 def _color_changed(self):
43 self.renderer.color = self.color
44
45 def _marker_changed(self):
46 self.renderer.marker = self.marker
47
48 def _marker_size_changed(self):
49 self.renderer.marker_size = self.marker_size
50
51
52 # ===============================================================================
53 # demo object that is used by the demo.py application.
54 # ===============================================================================
55 demo = ScatterPlotTraits()
56
57 if __name__ == "__main__":
58 demo.configure_traits()
59
[end of examples/tutorials/scipy2008/traits_example.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py
--- a/examples/tutorials/scipy2008/traits_example.py
+++ b/examples/tutorials/scipy2008/traits_example.py
@@ -1,7 +1,7 @@
from numpy import linspace, sin
-from enable.api import ColorTrait
-from chaco.api import ArrayPlotData, Plot, marker_trait
+from enable.api import ColorTrait, marker_trait
+from chaco.api import ArrayPlotData, Plot
from enable.api import ComponentEditor
from traits.api import HasTraits, Instance, Int
from traitsui.api import Group, Item, View
|
{"golden_diff": "diff --git a/examples/tutorials/scipy2008/traits_example.py b/examples/tutorials/scipy2008/traits_example.py\n--- a/examples/tutorials/scipy2008/traits_example.py\n+++ b/examples/tutorials/scipy2008/traits_example.py\n@@ -1,7 +1,7 @@\n from numpy import linspace, sin\n \n-from enable.api import ColorTrait\n-from chaco.api import ArrayPlotData, Plot, marker_trait\n+from enable.api import ColorTrait, marker_trait\n+from chaco.api import ArrayPlotData, Plot\n from enable.api import ComponentEditor\n from traits.api import HasTraits, Instance, Int\n from traitsui.api import Group, Item, View\n", "issue": "ImportError: cannot import name 'marker_trait' from 'chaco.api'\n**Problem Description**\nImportError: cannot import name 'marker_trait' from 'chaco.api' when trying to run a 2008 demo\n\n**Reproduction Steps:**\n\n```python\npython chaco/examples/tutorials/scipy2008/traits_example.py\n```\n\n**Expected behavior:**\ntest past\n\n**OS, Python version:** [MacOS, python3.8 (with or without edm) or 3.11]\n\n", "before_files": [{"content": "from numpy import linspace, sin\n\nfrom enable.api import ColorTrait\nfrom chaco.api import ArrayPlotData, Plot, marker_trait\nfrom enable.api import ComponentEditor\nfrom traits.api import HasTraits, Instance, Int\nfrom traitsui.api import Group, Item, View\n\n\nclass ScatterPlotTraits(HasTraits):\n\n plot = Instance(Plot)\n color = ColorTrait(\"blue\")\n marker = marker_trait\n marker_size = Int(4)\n\n traits_view = View(\n Group(\n Item(\"color\", label=\"Color\", style=\"custom\"),\n Item(\"marker\", label=\"Marker\"),\n Item(\"marker_size\", label=\"Size\"),\n Item(\"plot\", editor=ComponentEditor(), show_label=False),\n orientation=\"vertical\",\n ),\n width=800,\n height=600,\n resizable=True,\n title=\"Chaco Plot\",\n )\n\n def __init__(self):\n # Create the data and the PlotData object\n x = linspace(-14, 14, 100)\n y = sin(x) * x ** 3\n plotdata = ArrayPlotData(x=x, y=y)\n # Create a Plot and associate it with the PlotData\n plot = Plot(plotdata)\n # Create a line plot in the Plot\n self.renderer = plot.plot((\"x\", \"y\"), type=\"scatter\", color=\"blue\")[0]\n self.plot = plot\n\n def _color_changed(self):\n self.renderer.color = self.color\n\n def _marker_changed(self):\n self.renderer.marker = self.marker\n\n def _marker_size_changed(self):\n self.renderer.marker_size = self.marker_size\n\n\n# ===============================================================================\n# demo object that is used by the demo.py application.\n# ===============================================================================\ndemo = ScatterPlotTraits()\n\nif __name__ == \"__main__\":\n demo.configure_traits()\n", "path": "examples/tutorials/scipy2008/traits_example.py"}]}
| 1,169 | 154 |
gh_patches_debug_4639
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-2962
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation Website Has Broken Links: I Can't Submit Pull Requests!
On each page of Docs.Manim.Community there is a Pencil icon if editing needs be for that page.
But now if I click the Pencil for any page it gives a 404!
For example:
If I go on this link:
https://docs.manim.community/en/stable/reference/manim.mobject.geometry.arc.Circle.html
And click the Pencil on the top-right I get a 404!
Fix it!
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # https://www.sphinx-doc.org/en/master/usage/configuration.html
6
7 from __future__ import annotations
8
9 import os
10 import sys
11 from pathlib import Path
12
13 import manim
14
15 # -- Path setup --------------------------------------------------------------
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19
20
21 sys.path.insert(0, os.path.abspath("."))
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "Manim"
27 copyright = "2020-2022, The Manim Community Dev Team"
28 author = "The Manim Community Dev Team"
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx_copybutton",
39 "sphinx.ext.napoleon",
40 "sphinx.ext.autosummary",
41 "sphinx.ext.doctest",
42 "sphinx.ext.extlinks",
43 "sphinx.ext.viewcode",
44 "sphinxext.opengraph",
45 "manim.utils.docbuild.manim_directive",
46 "sphinx.ext.graphviz",
47 "sphinx.ext.inheritance_diagram",
48 "sphinxcontrib.programoutput",
49 "myst_parser",
50 ]
51
52 # Automatically generate stub pages when using the .. autosummary directive
53 autosummary_generate = True
54
55 # generate documentation from type hints
56 autodoc_typehints = "description"
57 autoclass_content = "both"
58
59 # controls whether functions documented by the autofunction directive
60 # appear with their full module names
61 add_module_names = False
62
63 # Add any paths that contain templates here, relative to this directory.
64 templates_path = ["_templates"]
65
66 # Custom section headings in our documentation
67 napoleon_custom_sections = ["Tests", ("Test", "Tests")]
68
69 # List of patterns, relative to source directory, that match files and
70 # directories to ignore when looking for source files.
71 # This pattern also affects html_static_path and html_extra_path.
72 html_extra_path = ["robots.txt"]
73
74 exclude_patterns: list[str] = []
75
76 # -- Options for internationalization ----------------------------------------
77 # Set the destination directory of the localized po files
78 locale_dirs = ["../i18n/"]
79
80 # Splits the text in more pot files.
81 gettext_compact = False
82
83 # Remove useless metadata from po files.
84 gettext_last_translator = ""
85 gettext_language_team = ""
86
87 # -- Options for HTML output -------------------------------------------------
88
89 # The theme to use for HTML and HTML Help pages. See the documentation for
90 # a list of builtin themes.
91 #
92
93 html_theme = "furo"
94 html_favicon = str(Path("_static/favicon.ico"))
95
96 # Add any paths that contain custom static files (such as style sheets) here,
97 # relative to this directory. They are copied after the builtin static files,
98 # so a file named "default.css" will overwrite the builtin "default.css".
99 html_static_path = ["_static"]
100
101 html_theme_options = {
102 "light_logo": "manim-logo-sidebar.svg",
103 "dark_logo": "manim-logo-sidebar-dark.svg",
104 "light_css_variables": {
105 "color-content-foreground": "#000000",
106 "color-background-primary": "#ffffff",
107 "color-background-border": "#ffffff",
108 "color-sidebar-background": "#f8f9fb",
109 "color-brand-content": "#1c00e3",
110 "color-brand-primary": "#192bd0",
111 "color-link": "#c93434",
112 "color-link--hover": "#5b0000",
113 "color-inline-code-background": "#f6f6f6;",
114 "color-foreground-secondary": "#000",
115 },
116 "dark_css_variables": {
117 "color-content-foreground": "#ffffffd9",
118 "color-background-primary": "#131416",
119 "color-background-border": "#303335",
120 "color-sidebar-background": "#1a1c1e",
121 "color-brand-content": "#2196f3",
122 "color-brand-primary": "#007fff",
123 "color-link": "#51ba86",
124 "color-link--hover": "#9cefc6",
125 "color-inline-code-background": "#262626",
126 "color-foreground-secondary": "#ffffffd9",
127 },
128 }
129 html_title = f"Manim Community v{manim.__version__}"
130
131 # This specifies any additional css files that will override the theme's
132 html_css_files = ["custom.css"]
133
134
135 # external links
136 extlinks = {
137 "issue": ("https://github.com/ManimCommunity/manim/issues/%s", "#"),
138 "pr": ("https://github.com/ManimCommunity/manim/pull/%s", "#"),
139 }
140
141 # opengraph settings
142 ogp_image = "https://www.manim.community/logo.png"
143 ogp_site_name = "Manim Community | Documentation"
144 ogp_site_url = "https://docs.manim.community/"
145
146
147 # inheritance_graph settings
148 inheritance_graph_attrs = {
149 "concentrate": True,
150 "size": '""',
151 "splines": "ortho",
152 "nodesep": 0.1,
153 "ranksep": 0.2,
154 }
155
156 inheritance_node_attrs = {
157 "penwidth": 0,
158 "shape": "box",
159 "width": 0.05,
160 "height": 0.05,
161 "margin": 0.05,
162 }
163
164 inheritance_edge_attrs = {
165 "penwidth": 1,
166 }
167
168 html_js_files = [
169 "responsiveSvg.js",
170 ]
171
172 graphviz_output_format = "svg"
173
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -99,6 +99,10 @@
html_static_path = ["_static"]
html_theme_options = {
+ "source_repository": "https://github.com/ManimCommunity/manim/",
+ "source_branch": "main",
+ "source_directory": "docs/source/",
+ "top_of_page_button": None,
"light_logo": "manim-logo-sidebar.svg",
"dark_logo": "manim-logo-sidebar-dark.svg",
"light_css_variables": {
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -99,6 +99,10 @@\n html_static_path = [\"_static\"]\n \n html_theme_options = {\n+ \"source_repository\": \"https://github.com/ManimCommunity/manim/\",\n+ \"source_branch\": \"main\",\n+ \"source_directory\": \"docs/source/\",\n+ \"top_of_page_button\": None,\n \"light_logo\": \"manim-logo-sidebar.svg\",\n \"dark_logo\": \"manim-logo-sidebar-dark.svg\",\n \"light_css_variables\": {\n", "issue": "Documentation Website Has Broken Links: I Can't Submit Pull Requests!\nOn each page of Docs.Manim.Community there is a Pencil icon if editing needs be for that page.\r\nBut now if I click the Pencil for any page it gives a 404!\r\n\r\nFor example:\r\nIf I go on this link:\r\nhttps://docs.manim.community/en/stable/reference/manim.mobject.geometry.arc.Circle.html\r\n\r\nAnd click the Pencil on the top-right I get a 404!\r\n\r\nFix it!\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom pathlib import Path\n\nimport manim\n\n# -- Path setup --------------------------------------------------------------\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n\nsys.path.insert(0, os.path.abspath(\".\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"Manim\"\ncopyright = \"2020-2022, The Manim Community Dev Team\"\nauthor = \"The Manim Community Dev Team\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx_copybutton\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.viewcode\",\n \"sphinxext.opengraph\",\n \"manim.utils.docbuild.manim_directive\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.inheritance_diagram\",\n \"sphinxcontrib.programoutput\",\n \"myst_parser\",\n]\n\n# Automatically generate stub pages when using the .. autosummary directive\nautosummary_generate = True\n\n# generate documentation from type hints\nautodoc_typehints = \"description\"\nautoclass_content = \"both\"\n\n# controls whether functions documented by the autofunction directive\n# appear with their full module names\nadd_module_names = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# Custom section headings in our documentation\nnapoleon_custom_sections = [\"Tests\", (\"Test\", \"Tests\")]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nhtml_extra_path = [\"robots.txt\"]\n\nexclude_patterns: list[str] = []\n\n# -- Options for internationalization ----------------------------------------\n# Set the destination directory of the localized po files\nlocale_dirs = [\"../i18n/\"]\n\n# Splits the text in more pot files.\ngettext_compact = False\n\n# Remove useless metadata from po files.\ngettext_last_translator = \"\"\ngettext_language_team = \"\"\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n\nhtml_theme = \"furo\"\nhtml_favicon = str(Path(\"_static/favicon.ico\"))\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nhtml_theme_options = {\n \"light_logo\": \"manim-logo-sidebar.svg\",\n \"dark_logo\": \"manim-logo-sidebar-dark.svg\",\n \"light_css_variables\": {\n \"color-content-foreground\": \"#000000\",\n \"color-background-primary\": \"#ffffff\",\n \"color-background-border\": \"#ffffff\",\n \"color-sidebar-background\": \"#f8f9fb\",\n \"color-brand-content\": \"#1c00e3\",\n \"color-brand-primary\": \"#192bd0\",\n \"color-link\": \"#c93434\",\n \"color-link--hover\": \"#5b0000\",\n \"color-inline-code-background\": \"#f6f6f6;\",\n \"color-foreground-secondary\": \"#000\",\n },\n \"dark_css_variables\": {\n \"color-content-foreground\": \"#ffffffd9\",\n \"color-background-primary\": \"#131416\",\n \"color-background-border\": \"#303335\",\n \"color-sidebar-background\": \"#1a1c1e\",\n \"color-brand-content\": \"#2196f3\",\n \"color-brand-primary\": \"#007fff\",\n \"color-link\": \"#51ba86\",\n \"color-link--hover\": \"#9cefc6\",\n \"color-inline-code-background\": \"#262626\",\n \"color-foreground-secondary\": \"#ffffffd9\",\n },\n}\nhtml_title = f\"Manim Community v{manim.__version__}\"\n\n# This specifies any additional css files that will override the theme's\nhtml_css_files = [\"custom.css\"]\n\n\n# external links\nextlinks = {\n \"issue\": (\"https://github.com/ManimCommunity/manim/issues/%s\", \"#\"),\n \"pr\": (\"https://github.com/ManimCommunity/manim/pull/%s\", \"#\"),\n}\n\n# opengraph settings\nogp_image = \"https://www.manim.community/logo.png\"\nogp_site_name = \"Manim Community | Documentation\"\nogp_site_url = \"https://docs.manim.community/\"\n\n\n# inheritance_graph settings\ninheritance_graph_attrs = {\n \"concentrate\": True,\n \"size\": '\"\"',\n \"splines\": \"ortho\",\n \"nodesep\": 0.1,\n \"ranksep\": 0.2,\n}\n\ninheritance_node_attrs = {\n \"penwidth\": 0,\n \"shape\": \"box\",\n \"width\": 0.05,\n \"height\": 0.05,\n \"margin\": 0.05,\n}\n\ninheritance_edge_attrs = {\n \"penwidth\": 1,\n}\n\nhtml_js_files = [\n \"responsiveSvg.js\",\n]\n\ngraphviz_output_format = \"svg\"\n", "path": "docs/source/conf.py"}]}
| 2,348 | 134 |
gh_patches_debug_21115
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-1233
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed in Slack Connector
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description
I am trying to build a Slack bot using Opsdroid (master branch). When running `opsdroid start`, I get an error where the Opsdroid bot fails to connect with the Slack Workspace.
## Steps to Reproduce
1. Install opsdroid
```
pip install git+https://github.com/opsdroid/opsdroid.git
```
2. Create `configuration.yaml` with following content
```
welcome-message: true
connectors:
- name: slack
api-token: "<Bot OAuth Token>"
skills:
- name: hello
```
3. Start opsdroid
```
opsdroid start
```
## Expected Functionality
The expected functionality is for the Opsdroid Bot to get connected with Slack Workspace and interact with the Slack user as per the configured skill.
## Experienced Functionality
The Opsdroid Bot failed to connect with the Slack Workspace with the following error:
```
INFO opsdroid.connector.slack: Connecting to Slack
Traceback (most recent call last):
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 936, in _wrap_create_connection
return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa
File "/usr/lib/python3.6/asyncio/base_events.py", line 820, in create_connection
sock, protocol_factory, ssl, server_hostname)
File "/usr/lib/python3.6/asyncio/base_events.py", line 846, in _create_connection_transport
yield from waiter
File "/usr/lib/python3.6/asyncio/sslproto.py", line 505, in data_received
ssldata, appdata = self._sslpipe.feed_ssldata(data)
File "/usr/lib/python3.6/asyncio/sslproto.py", line 201, in feed_ssldata
self._sslobj.do_handshake()
File "/usr/lib/python3.6/ssl.py", line 689, in do_handshake
self._sslobj.do_handshake()
ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/connector/slack/__init__.py", line 55, in connect
self.auth_info = (await self.slack.api_call("auth.test")).data
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 229, in _send
http_verb=http_verb, api_url=api_url, req_args=req_args
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py", line 259, in _request
async with session.request(http_verb, api_url, **req_args) as res:
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 1012, in __aenter__
self._resp = await self._coro
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py", line 483, in _request
timeout=real_timeout
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 523, in connect
proto = await self._create_connection(req, traces, timeout)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 859, in _create_connection
req, traces, timeout)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 1004, in _create_direct_connection
raise last_exc
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 986, in _create_direct_connection
req=req, client_error=client_error)
File "/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py", line 941, in _wrap_create_connection
raise ClientConnectorSSLError(req.connection_key, exc) from exc
aiohttp.client_exceptions.ClientConnectorSSLError: Cannot connect to host www.slack.com:443 ssl:default [[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)]
```
## Versions
- **Opsdroid version:** master branch in git
- **Python version:** 3.6.8
- **OS/Docker version:** Ubuntu 18.04 LTS
## Configuration File
Please include your version of the configuration file below.
```yaml
welcome-message: true
connectors:
- name: slack
api-token: "<Bot OAuth Token>"
skills:
- name: hello
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
</issue>
<code>
[start of opsdroid/connector/slack/__init__.py]
1 """A connector for Slack."""
2 import logging
3 import re
4
5 import slack
6 from emoji import demojize
7
8 from opsdroid.connector import Connector, register_event
9 from opsdroid.events import Message, Reaction
10 from opsdroid.connector.slack.events import Blocks
11
12
13 _LOGGER = logging.getLogger(__name__)
14
15
16 class ConnectorSlack(Connector):
17 """A connector for Slack."""
18
19 def __init__(self, config, opsdroid=None):
20 """Create the connector."""
21 super().__init__(config, opsdroid=opsdroid)
22 _LOGGER.debug(_("Starting Slack connector"))
23 self.name = "slack"
24 self.default_target = config.get("default-room", "#general")
25 self.icon_emoji = config.get("icon-emoji", ":robot_face:")
26 self.token = config["api-token"]
27 self.timeout = config.get("connect-timeout", 10)
28 self.slack = slack.WebClient(token=self.token, run_async=True)
29 self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)
30 self.websocket = None
31 self.bot_name = config.get("bot-name", "opsdroid")
32 self.auth_info = None
33 self.user_info = None
34 self.bot_id = None
35 self.known_users = {}
36 self.keepalive = None
37 self.reconnecting = False
38 self.listening = True
39 self._message_id = 0
40
41 # Register callbacks
42 slack.RTMClient.on(event="message", callback=self.process_message)
43
44 async def connect(self):
45 """Connect to the chat service."""
46 _LOGGER.info(_("Connecting to Slack"))
47
48 try:
49 # The slack library recommends you call `self.slack_rtm.start()`` here but it
50 # seems to mess with the event loop's signal handlers which breaks opsdroid.
51 # Therefore we need to directly call the private `_connect_and_read` method
52 # instead. This method also blocks so we need to dispatch it to the loop as a task.
53 self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())
54
55 self.auth_info = (await self.slack.api_call("auth.test")).data
56 self.user_info = (
57 await self.slack.api_call(
58 "users.info",
59 http_verb="GET",
60 params={"user": self.auth_info["user_id"]},
61 )
62 ).data
63 self.bot_id = self.user_info["user"]["profile"]["bot_id"]
64
65 _LOGGER.debug(_("Connected as %s"), self.bot_name)
66 _LOGGER.debug(_("Using icon %s"), self.icon_emoji)
67 _LOGGER.debug(_("Default room is %s"), self.default_target)
68 _LOGGER.info(_("Connected successfully"))
69 except slack.errors.SlackApiError as error:
70 _LOGGER.error(
71 _(
72 "Unable to connect to Slack due to %s - "
73 "The Slack Connector will not be available."
74 ),
75 error,
76 )
77 except Exception:
78 await self.disconnect()
79 raise
80
81 async def disconnect(self):
82 """Disconnect from Slack."""
83 await self.slack_rtm.stop()
84 self.listening = False
85
86 async def listen(self):
87 """Listen for and parse new messages."""
88
89 async def process_message(self, **payload):
90 """Process a raw message and pass it to the parser."""
91 message = payload["data"]
92
93 # Ignore message edits
94 if "subtype" in message and message["subtype"] == "message_changed":
95 return
96
97 # Ignore own messages
98 if (
99 "subtype" in message
100 and message["subtype"] == "bot_message"
101 and message["bot_id"] == self.bot_id
102 ):
103 return
104
105 # Lookup username
106 _LOGGER.debug(_("Looking up sender username"))
107 try:
108 user_info = await self.lookup_username(message["user"])
109 except ValueError:
110 return
111
112 # Replace usernames in the message
113 _LOGGER.debug(_("Replacing userids in message with usernames"))
114 message["text"] = await self.replace_usernames(message["text"])
115
116 await self.opsdroid.parse(
117 Message(
118 message["text"],
119 user_info["name"],
120 message["channel"],
121 self,
122 raw_event=message,
123 )
124 )
125
126 @register_event(Message)
127 async def send_message(self, message):
128 """Respond with a message."""
129 _LOGGER.debug(
130 _("Responding with: '%s' in room %s"), message.text, message.target
131 )
132 await self.slack.api_call(
133 "chat.postMessage",
134 data={
135 "channel": message.target,
136 "text": message.text,
137 "as_user": False,
138 "username": self.bot_name,
139 "icon_emoji": self.icon_emoji,
140 },
141 )
142
143 @register_event(Blocks)
144 async def send_blocks(self, blocks):
145 """Respond with structured blocks."""
146 _LOGGER.debug(
147 _("Responding with interactive blocks in room %s"), blocks.target
148 )
149 await self.slack.api_call(
150 "chat.postMessage",
151 data={
152 "channel": blocks.target,
153 "username": self.bot_name,
154 "blocks": blocks.blocks,
155 "icon_emoji": self.icon_emoji,
156 },
157 )
158
159 @register_event(Reaction)
160 async def send_reaction(self, reaction):
161 """React to a message."""
162 emoji = demojize(reaction.emoji).replace(":", "")
163 _LOGGER.debug(_("Reacting with: %s"), emoji)
164 try:
165 await self.slack.api_call(
166 "reactions.add",
167 data={
168 "name": emoji,
169 "channel": reaction.target,
170 "timestamp": reaction.linked_event.raw_event["ts"],
171 },
172 )
173 except slack.errors.SlackApiError as error:
174 if "invalid_name" in str(error):
175 _LOGGER.warning(_("Slack does not support the emoji %s"), emoji)
176 else:
177 raise
178
179 async def lookup_username(self, userid):
180 """Lookup a username and cache it."""
181 if userid in self.known_users:
182 user_info = self.known_users[userid]
183 else:
184 response = await self.slack.users_info(user=userid)
185 user_info = response.data["user"]
186 if isinstance(user_info, dict):
187 self.known_users[userid] = user_info
188 else:
189 raise ValueError("Returned user is not a dict.")
190 return user_info
191
192 async def replace_usernames(self, message):
193 """Replace User ID with username in message text."""
194 userids = re.findall(r"\<\@([A-Z0-9]+)(?:\|.+)?\>", message)
195 for userid in userids:
196 user_info = await self.lookup_username(userid)
197 message = message.replace(
198 "<@{userid}>".format(userid=userid), user_info["name"]
199 )
200 return message
201
[end of opsdroid/connector/slack/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -1,6 +1,8 @@
"""A connector for Slack."""
import logging
import re
+import ssl
+import certifi
import slack
from emoji import demojize
@@ -25,8 +27,13 @@
self.icon_emoji = config.get("icon-emoji", ":robot_face:")
self.token = config["api-token"]
self.timeout = config.get("connect-timeout", 10)
- self.slack = slack.WebClient(token=self.token, run_async=True)
- self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)
+ self.ssl_context = ssl.create_default_context(cafile=certifi.where())
+ self.slack = slack.WebClient(
+ token=self.token, run_async=True, ssl=self.ssl_context
+ )
+ self.slack_rtm = slack.RTMClient(
+ token=self.token, run_async=True, ssl=self.ssl_context
+ )
self.websocket = None
self.bot_name = config.get("bot-name", "opsdroid")
self.auth_info = None
|
{"golden_diff": "diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/opsdroid/connector/slack/__init__.py\n+++ b/opsdroid/connector/slack/__init__.py\n@@ -1,6 +1,8 @@\n \"\"\"A connector for Slack.\"\"\"\n import logging\n import re\n+import ssl\n+import certifi\n \n import slack\n from emoji import demojize\n@@ -25,8 +27,13 @@\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n- self.slack = slack.WebClient(token=self.token, run_async=True)\n- self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n+ self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n+ self.slack = slack.WebClient(\n+ token=self.token, run_async=True, ssl=self.ssl_context\n+ )\n+ self.slack_rtm = slack.RTMClient(\n+ token=self.token, run_async=True, ssl=self.ssl_context\n+ )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n", "issue": "ssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed in Slack Connector\n<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->\r\n# Description\r\nI am trying to build a Slack bot using Opsdroid (master branch). When running `opsdroid start`, I get an error where the Opsdroid bot fails to connect with the Slack Workspace.\r\n\r\n## Steps to Reproduce\r\n1. Install opsdroid\r\n```\r\npip install git+https://github.com/opsdroid/opsdroid.git\r\n```\r\n\r\n2. Create `configuration.yaml` with following content\r\n```\r\nwelcome-message: true\r\n\r\nconnectors:\r\n - name: slack\r\n api-token: \"<Bot OAuth Token>\"\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\n3. Start opsdroid\r\n\r\n```\r\nopsdroid start\r\n```\r\n\r\n## Expected Functionality\r\nThe expected functionality is for the Opsdroid Bot to get connected with Slack Workspace and interact with the Slack user as per the configured skill.\r\n\r\n\r\n## Experienced Functionality\r\nThe Opsdroid Bot failed to connect with the Slack Workspace with the following error:\r\n\r\n```\r\nINFO opsdroid.connector.slack: Connecting to Slack\r\nTraceback (most recent call last):\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 936, in _wrap_create_connection\r\n return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa\r\n File \"/usr/lib/python3.6/asyncio/base_events.py\", line 820, in create_connection\r\n sock, protocol_factory, ssl, server_hostname)\r\n File \"/usr/lib/python3.6/asyncio/base_events.py\", line 846, in _create_connection_transport\r\n yield from waiter\r\n File \"/usr/lib/python3.6/asyncio/sslproto.py\", line 505, in data_received\r\n ssldata, appdata = self._sslpipe.feed_ssldata(data)\r\n File \"/usr/lib/python3.6/asyncio/sslproto.py\", line 201, in feed_ssldata\r\n self._sslobj.do_handshake()\r\n File \"/usr/lib/python3.6/ssl.py\", line 689, in do_handshake\r\n self._sslobj.do_handshake()\r\nssl.SSLError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/opsdroid/connector/slack/__init__.py\", line 55, in connect\r\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py\", line 229, in _send\r\n http_verb=http_verb, api_url=api_url, req_args=req_args\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/slack/web/base_client.py\", line 259, in _request\r\n async with session.request(http_verb, api_url, **req_args) as res:\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py\", line 1012, in __aenter__\r\n self._resp = await self._coro\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/client.py\", line 483, in _request\r\n timeout=real_timeout\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 523, in connect\r\n proto = await self._create_connection(req, traces, timeout)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 859, in _create_connection\r\n req, traces, timeout)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 1004, in _create_direct_connection\r\n raise last_exc\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 986, in _create_direct_connection\r\n req=req, client_error=client_error)\r\n File \"/home/daniccan/c8/OpsDroid/c8-alertbot/env/lib/python3.6/site-packages/aiohttp/connector.py\", line 941, in _wrap_create_connection\r\n raise ClientConnectorSSLError(req.connection_key, exc) from exc\r\naiohttp.client_exceptions.ClientConnectorSSLError: Cannot connect to host www.slack.com:443 ssl:default [[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:852)]\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** master branch in git\r\n- **Python version:** 3.6.8\r\n- **OS/Docker version:** Ubuntu 18.04 LTS\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file below.\r\n\r\n```yaml\r\nwelcome-message: true\r\n\r\nconnectors:\r\n - name: slack\r\n api-token: \"<Bot OAuth Token>\"\r\n\r\nskills:\r\n - name: hello\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\r\n\n", "before_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\n\nimport slack\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import Blocks\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"api-token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.slack = slack.WebClient(token=self.token, run_async=True)\n self.slack_rtm = slack.RTMClient(token=self.token, run_async=True)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n _LOGGER.debug(_(\"Connected as %s\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n message[\"text\"],\n user_info[\"name\"],\n message[\"channel\"],\n self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": False,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.raw_event[\"ts\"],\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}]}
| 3,839 | 297 |
gh_patches_debug_22193
|
rasdani/github-patches
|
git_diff
|
DataBiosphere__toil-3590
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type hints to conversions.py
Add type hints to src/toil/lib/encryption/conversions so it can be checked under mypy during linting.
Refers to #3568.
┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-888)
┆Issue Number: TOIL-888
</issue>
<code>
[start of src/toil/lib/conversions.py]
1 """Conversion utilities for mapping memory, disk, core declarations from strings to numbers and vice versa."""
2 import math
3 from typing import Optional, SupportsInt, Tuple
4
5 # See https://en.wikipedia.org/wiki/Binary_prefix
6 BINARY_PREFIXES = ['ki', 'mi', 'gi', 'ti', 'pi', 'ei', 'kib', 'mib', 'gib', 'tib', 'pib', 'eib']
7 DECIMAL_PREFIXES = ['b', 'k', 'm', 'g', 't', 'p', 'e', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb']
8 VALID_PREFIXES = BINARY_PREFIXES + DECIMAL_PREFIXES
9
10
11 def bytes_in_unit(unit: str = 'B') -> int:
12 num_bytes = 1
13 if unit.lower() in ['ki', 'kib']:
14 num_bytes = 1 << 10
15 if unit.lower() in ['mi', 'mib']:
16 num_bytes = 1 << 20
17 if unit.lower() in ['gi', 'gib']:
18 num_bytes = 1 << 30
19 if unit.lower() in ['ti', 'tib']:
20 num_bytes = 1 << 40
21 if unit.lower() in ['pi', 'pib']:
22 num_bytes = 1 << 50
23 if unit.lower() in ['ei', 'eib']:
24 num_bytes = 1 << 60
25
26 if unit.lower() in ['k', 'kb']:
27 num_bytes = 1000
28 if unit.lower() in ['m', 'mb']:
29 num_bytes = 1000 ** 2
30 if unit.lower() in ['g', 'gb']:
31 num_bytes = 1000 ** 3
32 if unit.lower() in ['t', 'tb']:
33 num_bytes = 1000 ** 4
34 if unit.lower() in ['p', 'pb']:
35 num_bytes = 1000 ** 5
36 if unit.lower() in ['e', 'eb']:
37 num_bytes = 1000 ** 6
38 return num_bytes
39
40
41 def convert_units(num: float,
42 src_unit: str,
43 dst_unit: Optional[str] = 'B') -> float:
44 """Returns a float representing the converted input in dst_units."""
45 assert src_unit.lower() in VALID_PREFIXES, f"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}."
46 assert dst_unit.lower() in VALID_PREFIXES, f"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}."
47 return (num * bytes_in_unit(src_unit)) / bytes_in_unit(dst_unit)
48
49
50 def parse_memory_string(string: str) -> Tuple[float, str]:
51 """
52 Given a string representation of some memory (i.e. '1024 Mib'), return the
53 number and unit.
54 """
55 for i, character in enumerate(string):
56 # find the first character of the unit
57 if character not in '0123456789.-_ ':
58 units = string[i:].strip()
59 assert units.lower() in VALID_PREFIXES, f"{units} not a valid unit, valid units are {VALID_PREFIXES}."
60 return float(string[:i]), units
61 return float(string), 'b'
62
63
64 def human2bytes(string: str) -> int:
65 """
66 Given a string representation of some memory (i.e. '1024 Mib'), return the
67 integer number of bytes.
68 """
69 value, unit = parse_memory_string(string)
70 return int(convert_units(value, src_unit=unit, dst_unit='b'))
71
72
73 def bytes2human(n: SupportsInt) -> str:
74 """Return a binary value as a human readable string with units."""
75 n = int(n)
76 if n < 0:
77 raise ValueError("n < 0")
78 elif n < 1:
79 return '0 b'
80
81 power_level = math.floor(math.log(n, 1024))
82 units = ('b', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei')
83
84 unit = units[power_level if power_level < len(units) else -1]
85 value = convert_units(n, "b", unit)
86 return f'{value:.1f} {unit}'
87
[end of src/toil/lib/conversions.py]
[start of contrib/admin/mypy-with-ignore.py]
1 #!/usr/bin/env python3
2 """
3 Runs mypy and ignores files that do not yet have passing type hints.
4
5 Does not type check test files (any path including "src/toil/test").
6 """
7 import os
8 import subprocess
9 import sys
10
11 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa
12 sys.path.insert(0, pkg_root) # noqa
13
14 from src.toil.lib.resources import glob # type: ignore
15
16
17 def main():
18 all_files_to_check = []
19 for d in ['dashboard', 'docker', 'docs', 'src']:
20 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))
21
22 # TODO: Remove these paths as typing is added and mypy conflicts are addressed
23 ignore_paths = [os.path.abspath(f) for f in [
24 'docker/Dockerfile.py',
25 'docs/conf.py',
26 'docs/vendor/sphinxcontrib/fulltoc.py',
27 'docs/vendor/sphinxcontrib/__init__.py',
28 'src/toil/job.py',
29 'src/toil/leader.py',
30 'src/toil/statsAndLogging.py',
31 'src/toil/common.py',
32 'src/toil/realtimeLogger.py',
33 'src/toil/worker.py',
34 'src/toil/serviceManager.py',
35 'src/toil/toilState.py',
36 'src/toil/__init__.py',
37 'src/toil/resource.py',
38 'src/toil/deferred.py',
39 'src/toil/version.py',
40 'src/toil/wdl/utils.py',
41 'src/toil/wdl/wdl_types.py',
42 'src/toil/wdl/wdl_synthesis.py',
43 'src/toil/wdl/wdl_analysis.py',
44 'src/toil/wdl/wdl_functions.py',
45 'src/toil/wdl/toilwdl.py',
46 'src/toil/wdl/versions/draft2.py',
47 'src/toil/wdl/versions/v1.py',
48 'src/toil/wdl/versions/dev.py',
49 'src/toil/provisioners/clusterScaler.py',
50 'src/toil/provisioners/abstractProvisioner.py',
51 'src/toil/provisioners/gceProvisioner.py',
52 'src/toil/provisioners/__init__.py',
53 'src/toil/provisioners/node.py',
54 'src/toil/provisioners/aws/boto2Context.py',
55 'src/toil/provisioners/aws/awsProvisioner.py',
56 'src/toil/provisioners/aws/__init__.py',
57 'src/toil/batchSystems/slurm.py',
58 'src/toil/batchSystems/gridengine.py',
59 'src/toil/batchSystems/singleMachine.py',
60 'src/toil/batchSystems/abstractBatchSystem.py',
61 'src/toil/batchSystems/parasol.py',
62 'src/toil/batchSystems/kubernetes.py',
63 'src/toil/batchSystems/torque.py',
64 'src/toil/batchSystems/options.py',
65 'src/toil/batchSystems/registry.py',
66 'src/toil/batchSystems/lsf.py',
67 'src/toil/batchSystems/__init__.py',
68 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',
69 'src/toil/batchSystems/lsfHelper.py',
70 'src/toil/batchSystems/htcondor.py',
71 'src/toil/batchSystems/mesos/batchSystem.py',
72 'src/toil/batchSystems/mesos/executor.py',
73 'src/toil/batchSystems/mesos/conftest.py',
74 'src/toil/batchSystems/mesos/__init__.py',
75 'src/toil/batchSystems/mesos/test/__init__.py',
76 'src/toil/cwl/conftest.py',
77 'src/toil/cwl/__init__.py',
78 'src/toil/cwl/cwltoil.py',
79 'src/toil/fileStores/cachingFileStore.py',
80 'src/toil/fileStores/abstractFileStore.py',
81 'src/toil/fileStores/nonCachingFileStore.py',
82 'src/toil/fileStores/__init__.py',
83 'src/toil/jobStores/utils.py',
84 'src/toil/jobStores/abstractJobStore.py',
85 'src/toil/jobStores/conftest.py',
86 'src/toil/jobStores/fileJobStore.py',
87 'src/toil/jobStores/__init__.py',
88 'src/toil/jobStores/googleJobStore.py',
89 'src/toil/jobStores/aws/utils.py',
90 'src/toil/jobStores/aws/jobStore.py',
91 'src/toil/jobStores/aws/__init__.py',
92 'src/toil/utils/toilDebugFile.py',
93 'src/toil/utils/toilUpdateEC2Instances.py',
94 'src/toil/utils/toilStatus.py',
95 'src/toil/utils/toilStats.py',
96 'src/toil/utils/toilSshCluster.py',
97 'src/toil/utils/toilMain.py',
98 'src/toil/utils/toilKill.py',
99 'src/toil/utils/__init__.py',
100 'src/toil/utils/toilDestroyCluster.py',
101 'src/toil/utils/toilDebugJob.py',
102 'src/toil/utils/toilRsyncCluster.py',
103 'src/toil/utils/toilClean.py',
104 'src/toil/utils/toilLaunchCluster.py',
105 'src/toil/lib/memoize.py',
106 'src/toil/lib/throttle.py',
107 'src/toil/lib/humanize.py',
108 'src/toil/lib/compatibility.py',
109 'src/toil/lib/iterables.py',
110 'src/toil/lib/bioio.py',
111 'src/toil/lib/ec2.py',
112 'src/toil/lib/conversions.py',
113 'src/toil/lib/ec2nodes.py',
114 'src/toil/lib/misc.py',
115 'src/toil/lib/expando.py',
116 'src/toil/lib/threading.py',
117 'src/toil/lib/exceptions.py',
118 'src/toil/lib/__init__.py',
119 'src/toil/lib/generatedEC2Lists.py',
120 'src/toil/lib/retry.py',
121 'src/toil/lib/objects.py',
122 'src/toil/lib/io.py',
123 'src/toil/lib/docker.py',
124 'src/toil/lib/encryption/_nacl.py',
125 'src/toil/lib/encryption/_dummy.py',
126 'src/toil/lib/encryption/conftest.py',
127 'src/toil/lib/encryption/__init__.py',
128 'src/toil/lib/aws/utils.py',
129 'src/toil/lib/aws/__init__.py'
130 ]]
131
132 filtered_files_to_check = []
133 for file_path in all_files_to_check:
134 if file_path not in ignore_paths and 'src/toil/test' not in file_path:
135 filtered_files_to_check.append(file_path)
136 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?
137 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check
138 p = subprocess.run(args=args, stdout=subprocess.PIPE)
139 result = p.stdout.decode()
140 print(result)
141 if 'Success: no issues found' not in result:
142 exit(1)
143
144
145 if __name__ == '__main__':
146 main()
147
[end of contrib/admin/mypy-with-ignore.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py
--- a/contrib/admin/mypy-with-ignore.py
+++ b/contrib/admin/mypy-with-ignore.py
@@ -109,7 +109,7 @@
'src/toil/lib/iterables.py',
'src/toil/lib/bioio.py',
'src/toil/lib/ec2.py',
- 'src/toil/lib/conversions.py',
+ # 'src/toil/lib/conversions.py',
'src/toil/lib/ec2nodes.py',
'src/toil/lib/misc.py',
'src/toil/lib/expando.py',
diff --git a/src/toil/lib/conversions.py b/src/toil/lib/conversions.py
--- a/src/toil/lib/conversions.py
+++ b/src/toil/lib/conversions.py
@@ -40,7 +40,7 @@
def convert_units(num: float,
src_unit: str,
- dst_unit: Optional[str] = 'B') -> float:
+ dst_unit: str = 'B') -> float:
"""Returns a float representing the converted input in dst_units."""
assert src_unit.lower() in VALID_PREFIXES, f"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}."
assert dst_unit.lower() in VALID_PREFIXES, f"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}."
|
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -109,7 +109,7 @@\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n- 'src/toil/lib/conversions.py',\n+ # 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\ndiff --git a/src/toil/lib/conversions.py b/src/toil/lib/conversions.py\n--- a/src/toil/lib/conversions.py\n+++ b/src/toil/lib/conversions.py\n@@ -40,7 +40,7 @@\n \n def convert_units(num: float,\n src_unit: str,\n- dst_unit: Optional[str] = 'B') -> float:\n+ dst_unit: str = 'B') -> float:\n \"\"\"Returns a float representing the converted input in dst_units.\"\"\"\n assert src_unit.lower() in VALID_PREFIXES, f\"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n assert dst_unit.lower() in VALID_PREFIXES, f\"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n", "issue": "Add type hints to conversions.py\nAdd type hints to src/toil/lib/encryption/conversions so it can be checked under mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-888)\n\u2506Issue Number: TOIL-888\n\n", "before_files": [{"content": "\"\"\"Conversion utilities for mapping memory, disk, core declarations from strings to numbers and vice versa.\"\"\"\nimport math\nfrom typing import Optional, SupportsInt, Tuple\n\n# See https://en.wikipedia.org/wiki/Binary_prefix\nBINARY_PREFIXES = ['ki', 'mi', 'gi', 'ti', 'pi', 'ei', 'kib', 'mib', 'gib', 'tib', 'pib', 'eib']\nDECIMAL_PREFIXES = ['b', 'k', 'm', 'g', 't', 'p', 'e', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb']\nVALID_PREFIXES = BINARY_PREFIXES + DECIMAL_PREFIXES\n\n\ndef bytes_in_unit(unit: str = 'B') -> int:\n num_bytes = 1\n if unit.lower() in ['ki', 'kib']:\n num_bytes = 1 << 10\n if unit.lower() in ['mi', 'mib']:\n num_bytes = 1 << 20\n if unit.lower() in ['gi', 'gib']:\n num_bytes = 1 << 30\n if unit.lower() in ['ti', 'tib']:\n num_bytes = 1 << 40\n if unit.lower() in ['pi', 'pib']:\n num_bytes = 1 << 50\n if unit.lower() in ['ei', 'eib']:\n num_bytes = 1 << 60\n\n if unit.lower() in ['k', 'kb']:\n num_bytes = 1000\n if unit.lower() in ['m', 'mb']:\n num_bytes = 1000 ** 2\n if unit.lower() in ['g', 'gb']:\n num_bytes = 1000 ** 3\n if unit.lower() in ['t', 'tb']:\n num_bytes = 1000 ** 4\n if unit.lower() in ['p', 'pb']:\n num_bytes = 1000 ** 5\n if unit.lower() in ['e', 'eb']:\n num_bytes = 1000 ** 6\n return num_bytes\n\n\ndef convert_units(num: float,\n src_unit: str,\n dst_unit: Optional[str] = 'B') -> float:\n \"\"\"Returns a float representing the converted input in dst_units.\"\"\"\n assert src_unit.lower() in VALID_PREFIXES, f\"{src_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n assert dst_unit.lower() in VALID_PREFIXES, f\"{dst_unit} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return (num * bytes_in_unit(src_unit)) / bytes_in_unit(dst_unit)\n\n\ndef parse_memory_string(string: str) -> Tuple[float, str]:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n number and unit.\n \"\"\"\n for i, character in enumerate(string):\n # find the first character of the unit\n if character not in '0123456789.-_ ':\n units = string[i:].strip()\n assert units.lower() in VALID_PREFIXES, f\"{units} not a valid unit, valid units are {VALID_PREFIXES}.\"\n return float(string[:i]), units\n return float(string), 'b'\n\n\ndef human2bytes(string: str) -> int:\n \"\"\"\n Given a string representation of some memory (i.e. '1024 Mib'), return the\n integer number of bytes.\n \"\"\"\n value, unit = parse_memory_string(string)\n return int(convert_units(value, src_unit=unit, dst_unit='b'))\n\n\ndef bytes2human(n: SupportsInt) -> str:\n \"\"\"Return a binary value as a human readable string with units.\"\"\"\n n = int(n)\n if n < 0:\n raise ValueError(\"n < 0\")\n elif n < 1:\n return '0 b'\n\n power_level = math.floor(math.log(n, 1024))\n units = ('b', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei')\n\n unit = units[power_level if power_level < len(units) else -1]\n value = convert_units(n, \"b\", unit)\n return f'{value:.1f} {unit}'\n", "path": "src/toil/lib/conversions.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilUpdateEC2Instances.py',\n 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/toilSshCluster.py',\n 'src/toil/utils/toilMain.py',\n 'src/toil/utils/toilKill.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilDestroyCluster.py',\n 'src/toil/utils/toilDebugJob.py',\n 'src/toil/utils/toilRsyncCluster.py',\n 'src/toil/utils/toilClean.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/conversions.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/misc.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
| 3,675 | 310 |
gh_patches_debug_20262
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'
### setuptools version
setuptools==60.0.5
### Python version
python 3.10
### OS
macOS intel 12.1
### Additional environment information
_No response_
### Description
python3 -m pip install -U setuptools
### Expected behavior
python3 -m pip install -U setuptools
### How to Reproduce
brew install [email protected]
python3 -m pip install -U setuptools
### Output
```
Collecting setuptools
Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)
|████████████████████████████████| 952 kB 351 kB/s
Installing collected packages: setuptools
Attempting uninstall: setuptools
Found existing installation: setuptools 60.0.5
Uninstalling setuptools-60.0.5:
Successfully uninstalled setuptools-60.0.5
Successfully installed setuptools-60.1.1
```
and when you exit and relogin
```
Error processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:
Traceback (most recent call last):
File "/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py", line 186, in addpackage
exec(line)
File "<string>", line 1, in <module>
AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'
Remainder of file ignored
```
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'
### setuptools version
setuptools==60.0.5
### Python version
python 3.10
### OS
macOS intel 12.1
### Additional environment information
_No response_
### Description
python3 -m pip install -U setuptools
### Expected behavior
python3 -m pip install -U setuptools
### How to Reproduce
brew install [email protected]
python3 -m pip install -U setuptools
### Output
```
Collecting setuptools
Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)
|████████████████████████████████| 952 kB 351 kB/s
Installing collected packages: setuptools
Attempting uninstall: setuptools
Found existing installation: setuptools 60.0.5
Uninstalling setuptools-60.0.5:
Successfully uninstalled setuptools-60.0.5
Successfully installed setuptools-60.1.1
```
and when you exit and relogin
```
Error processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:
Traceback (most recent call last):
File "/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py", line 186, in addpackage
exec(line)
File "<string>", line 1, in <module>
AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'
Remainder of file ignored
```
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of _distutils_hack/__init__.py]
1 import sys
2 import os
3 import re
4 import importlib
5 import warnings
6 import contextlib
7
8
9 is_pypy = '__pypy__' in sys.builtin_module_names
10
11
12 warnings.filterwarnings('ignore',
13 r'.+ distutils\b.+ deprecated',
14 DeprecationWarning)
15
16
17 def warn_distutils_present():
18 if 'distutils' not in sys.modules:
19 return
20 if is_pypy and sys.version_info < (3, 7):
21 # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
22 # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
23 return
24 warnings.warn(
25 "Distutils was imported before Setuptools, but importing Setuptools "
26 "also replaces the `distutils` module in `sys.modules`. This may lead "
27 "to undesirable behaviors or errors. To avoid these issues, avoid "
28 "using distutils directly, ensure that setuptools is installed in the "
29 "traditional way (e.g. not an editable install), and/or make sure "
30 "that setuptools is always imported before distutils.")
31
32
33 def clear_distutils():
34 if 'distutils' not in sys.modules:
35 return
36 warnings.warn("Setuptools is replacing distutils.")
37 mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
38 for name in mods:
39 del sys.modules[name]
40
41
42 def enabled():
43 """
44 Allow selection of distutils by environment variable.
45 """
46 which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
47 return which == 'local'
48
49
50 def ensure_local_distutils():
51 clear_distutils()
52
53 # With the DistutilsMetaFinder in place,
54 # perform an import to cause distutils to be
55 # loaded from setuptools._distutils. Ref #2906.
56 with shim():
57 importlib.import_module('distutils')
58
59 # check that submodules load as expected
60 core = importlib.import_module('distutils.core')
61 assert '_distutils' in core.__file__, core.__file__
62
63
64 def do_override():
65 """
66 Ensure that the local copy of distutils is preferred over stdlib.
67
68 See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
69 for more motivation.
70 """
71 if enabled():
72 warn_distutils_present()
73 ensure_local_distutils()
74
75
76 class DistutilsMetaFinder:
77 def find_spec(self, fullname, path, target=None):
78 if path is not None:
79 return
80
81 method_name = 'spec_for_{fullname}'.format(**locals())
82 method = getattr(self, method_name, lambda: None)
83 return method()
84
85 def spec_for_distutils(self):
86 import importlib.abc
87 import importlib.util
88
89 # In cases of path manipulation during sitecustomize,
90 # Setuptools might actually not be present even though
91 # the hook has been loaded. Allow the caller to fall
92 # back to stdlib behavior. See #2980.
93 if not importlib.util.find_spec('setuptools'):
94 return
95
96 class DistutilsLoader(importlib.abc.Loader):
97
98 def create_module(self, spec):
99 return importlib.import_module('setuptools._distutils')
100
101 def exec_module(self, module):
102 pass
103
104 return importlib.util.spec_from_loader('distutils', DistutilsLoader())
105
106 def spec_for_pip(self):
107 """
108 Ensure stdlib distutils when running under pip.
109 See pypa/pip#8761 for rationale.
110 """
111 if self.pip_imported_during_build():
112 return
113 clear_distutils()
114 self.spec_for_distutils = lambda: None
115
116 @classmethod
117 def pip_imported_during_build(cls):
118 """
119 Detect if pip is being imported in a build script. Ref #2355.
120 """
121 import traceback
122 return any(
123 cls.frame_file_is_setup(frame)
124 for frame, line in traceback.walk_stack(None)
125 )
126
127 @staticmethod
128 def frame_file_is_setup(frame):
129 """
130 Return True if the indicated frame suggests a setup.py file.
131 """
132 # some frames may not have __file__ (#2940)
133 return frame.f_globals.get('__file__', '').endswith('setup.py')
134
135
136 DISTUTILS_FINDER = DistutilsMetaFinder()
137
138
139 def ensure_shim():
140 DISTUTILS_FINDER in sys.meta_path or add_shim()
141
142
143 @contextlib.contextmanager
144 def shim():
145 add_shim()
146 try:
147 yield
148 finally:
149 remove_shim()
150
151
152 def add_shim():
153 sys.meta_path.insert(0, DISTUTILS_FINDER)
154
155
156 def remove_shim():
157 try:
158 sys.meta_path.remove(DISTUTILS_FINDER)
159 except ValueError:
160 pass
161
[end of _distutils_hack/__init__.py]
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import textwrap
6
7 import setuptools
8 from setuptools.command.install import install
9
10 here = os.path.dirname(__file__)
11
12
13 package_data = dict(
14 setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],
15 )
16
17 force_windows_specific_files = (
18 os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower()
19 not in ("", "0", "false", "no")
20 )
21
22 include_windows_files = sys.platform == 'win32' or force_windows_specific_files
23
24 if include_windows_files:
25 package_data.setdefault('setuptools', []).extend(['*.exe'])
26 package_data.setdefault('setuptools.command', []).extend(['*.xml'])
27
28
29 def pypi_link(pkg_filename):
30 """
31 Given the filename, including md5 fragment, construct the
32 dependency link for PyPI.
33 """
34 root = 'https://files.pythonhosted.org/packages/source'
35 name, sep, rest = pkg_filename.partition('-')
36 parts = root, name[0], name, pkg_filename
37 return '/'.join(parts)
38
39
40 class install_with_pth(install):
41 """
42 Custom install command to install a .pth file for distutils patching.
43
44 This hack is necessary because there's no standard way to install behavior
45 on startup (and it's debatable if there should be one). This hack (ab)uses
46 the `extra_path` behavior in Setuptools to install a `.pth` file with
47 implicit behavior on startup to give higher precedence to the local version
48 of `distutils` over the version from the standard library.
49
50 Please do not replicate this behavior.
51 """
52
53 _pth_name = 'distutils-precedence'
54 _pth_contents = textwrap.dedent("""
55 import os
56 var = 'SETUPTOOLS_USE_DISTUTILS'
57 enabled = os.environ.get(var, 'local') == 'local'
58 enabled and __import__('_distutils_hack').ensure_shim()
59 """).lstrip().replace('\n', '; ')
60
61 def initialize_options(self):
62 install.initialize_options(self)
63 self.extra_path = self._pth_name, self._pth_contents
64
65 def finalize_options(self):
66 install.finalize_options(self)
67 self._restore_install_lib()
68
69 def _restore_install_lib(self):
70 """
71 Undo secondary effect of `extra_path` adding to `install_lib`
72 """
73 suffix = os.path.relpath(self.install_lib, self.install_libbase)
74
75 if suffix.strip() == self._pth_contents.strip():
76 self.install_lib = self.install_libbase
77
78
79 setup_params = dict(
80 cmdclass={'install': install_with_pth},
81 package_data=package_data,
82 )
83
84 if __name__ == '__main__':
85 # allow setup.py to run from another directory
86 here and os.chdir(here)
87 dist = setuptools.setup(**setup_params)
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py
--- a/_distutils_hack/__init__.py
+++ b/_distutils_hack/__init__.py
@@ -136,20 +136,20 @@
DISTUTILS_FINDER = DistutilsMetaFinder()
-def ensure_shim():
- DISTUTILS_FINDER in sys.meta_path or add_shim()
+def add_shim():
+ DISTUTILS_FINDER in sys.meta_path or insert_shim()
@contextlib.contextmanager
def shim():
- add_shim()
+ insert_shim()
try:
yield
finally:
remove_shim()
-def add_shim():
+def insert_shim():
sys.meta_path.insert(0, DISTUTILS_FINDER)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,7 +55,7 @@
import os
var = 'SETUPTOOLS_USE_DISTUTILS'
enabled = os.environ.get(var, 'local') == 'local'
- enabled and __import__('_distutils_hack').ensure_shim()
+ enabled and __import__('_distutils_hack').add_shim()
""").lstrip().replace('\n', '; ')
def initialize_options(self):
|
{"golden_diff": "diff --git a/_distutils_hack/__init__.py b/_distutils_hack/__init__.py\n--- a/_distutils_hack/__init__.py\n+++ b/_distutils_hack/__init__.py\n@@ -136,20 +136,20 @@\n DISTUTILS_FINDER = DistutilsMetaFinder()\n \n \n-def ensure_shim():\n- DISTUTILS_FINDER in sys.meta_path or add_shim()\n+def add_shim():\n+ DISTUTILS_FINDER in sys.meta_path or insert_shim()\n \n \n @contextlib.contextmanager\n def shim():\n- add_shim()\n+ insert_shim()\n try:\n yield\n finally:\n remove_shim()\n \n \n-def add_shim():\n+def insert_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,7 +55,7 @@\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n- enabled and __import__('_distutils_hack').ensure_shim()\n+ enabled and __import__('_distutils_hack').add_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n \n def initialize_options(self):\n", "issue": "AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\n### setuptools version\r\n\r\nsetuptools==60.0.5\r\n\r\n### Python version\r\n\r\npython 3.10\r\n\r\n### OS\r\n\r\nmacOS intel 12.1\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### Expected behavior\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### How to Reproduce\r\n\r\nbrew install [email protected]\r\npython3 -m pip install -U setuptools\r\n\r\n### Output\r\n\r\n```\r\nCollecting setuptools\r\n Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 952 kB 351 kB/s \r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 60.0.5\r\n Uninstalling setuptools-60.0.5:\r\n Successfully uninstalled setuptools-60.0.5\r\nSuccessfully installed setuptools-60.1.1\r\n```\r\n\r\nand when you exit and relogin\r\n\r\n```\r\nError processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:\r\n\r\n Traceback (most recent call last):\r\n File \"/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py\", line 186, in addpackage\r\n exec(line)\r\n File \"<string>\", line 1, in <module>\r\n AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\r\n\r\nRemainder of file ignored\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the PSF Code of Conduct\nAttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\n### setuptools version\r\n\r\nsetuptools==60.0.5\r\n\r\n### Python version\r\n\r\npython 3.10\r\n\r\n### OS\r\n\r\nmacOS intel 12.1\r\n\r\n### Additional environment information\r\n\r\n_No response_\r\n\r\n### Description\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### Expected behavior\r\n\r\npython3 -m pip install -U setuptools\r\n\r\n### How to Reproduce\r\n\r\nbrew install [email protected]\r\npython3 -m pip install -U setuptools\r\n\r\n### Output\r\n\r\n```\r\nCollecting setuptools\r\n Downloading setuptools-60.1.1-py3-none-any.whl (952 kB)\r\n |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 952 kB 351 kB/s \r\nInstalling collected packages: setuptools\r\n Attempting uninstall: setuptools\r\n Found existing installation: setuptools 60.0.5\r\n Uninstalling setuptools-60.0.5:\r\n Successfully uninstalled setuptools-60.0.5\r\nSuccessfully installed setuptools-60.1.1\r\n```\r\n\r\nand when you exit and relogin\r\n\r\n```\r\nError processing line 1 of /usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/distutils-precedence.pth:\r\n\r\n Traceback (most recent call last):\r\n File \"/usr/local/Cellar/[email protected]/3.10.1/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site.py\", line 186, in addpackage\r\n exec(line)\r\n File \"<string>\", line 1, in <module>\r\n AttributeError: module '_distutils_hack' has no attribute 'ensure_shim'\r\n\r\nRemainder of file ignored\r\n```\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import sys\nimport os\nimport re\nimport importlib\nimport warnings\nimport contextlib\n\n\nis_pypy = '__pypy__' in sys.builtin_module_names\n\n\nwarnings.filterwarnings('ignore',\n r'.+ distutils\\b.+ deprecated',\n DeprecationWarning)\n\n\ndef warn_distutils_present():\n if 'distutils' not in sys.modules:\n return\n if is_pypy and sys.version_info < (3, 7):\n # PyPy for 3.6 unconditionally imports distutils, so bypass the warning\n # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250\n return\n warnings.warn(\n \"Distutils was imported before Setuptools, but importing Setuptools \"\n \"also replaces the `distutils` module in `sys.modules`. This may lead \"\n \"to undesirable behaviors or errors. To avoid these issues, avoid \"\n \"using distutils directly, ensure that setuptools is installed in the \"\n \"traditional way (e.g. not an editable install), and/or make sure \"\n \"that setuptools is always imported before distutils.\")\n\n\ndef clear_distutils():\n if 'distutils' not in sys.modules:\n return\n warnings.warn(\"Setuptools is replacing distutils.\")\n mods = [name for name in sys.modules if re.match(r'distutils\\b', name)]\n for name in mods:\n del sys.modules[name]\n\n\ndef enabled():\n \"\"\"\n Allow selection of distutils by environment variable.\n \"\"\"\n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')\n return which == 'local'\n\n\ndef ensure_local_distutils():\n clear_distutils()\n\n # With the DistutilsMetaFinder in place,\n # perform an import to cause distutils to be\n # loaded from setuptools._distutils. Ref #2906.\n with shim():\n importlib.import_module('distutils')\n\n # check that submodules load as expected\n core = importlib.import_module('distutils.core')\n assert '_distutils' in core.__file__, core.__file__\n\n\ndef do_override():\n \"\"\"\n Ensure that the local copy of distutils is preferred over stdlib.\n\n See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401\n for more motivation.\n \"\"\"\n if enabled():\n warn_distutils_present()\n ensure_local_distutils()\n\n\nclass DistutilsMetaFinder:\n def find_spec(self, fullname, path, target=None):\n if path is not None:\n return\n\n method_name = 'spec_for_{fullname}'.format(**locals())\n method = getattr(self, method_name, lambda: None)\n return method()\n\n def spec_for_distutils(self):\n import importlib.abc\n import importlib.util\n\n # In cases of path manipulation during sitecustomize,\n # Setuptools might actually not be present even though\n # the hook has been loaded. Allow the caller to fall\n # back to stdlib behavior. See #2980.\n if not importlib.util.find_spec('setuptools'):\n return\n\n class DistutilsLoader(importlib.abc.Loader):\n\n def create_module(self, spec):\n return importlib.import_module('setuptools._distutils')\n\n def exec_module(self, module):\n pass\n\n return importlib.util.spec_from_loader('distutils', DistutilsLoader())\n\n def spec_for_pip(self):\n \"\"\"\n Ensure stdlib distutils when running under pip.\n See pypa/pip#8761 for rationale.\n \"\"\"\n if self.pip_imported_during_build():\n return\n clear_distutils()\n self.spec_for_distutils = lambda: None\n\n @classmethod\n def pip_imported_during_build(cls):\n \"\"\"\n Detect if pip is being imported in a build script. Ref #2355.\n \"\"\"\n import traceback\n return any(\n cls.frame_file_is_setup(frame)\n for frame, line in traceback.walk_stack(None)\n )\n\n @staticmethod\n def frame_file_is_setup(frame):\n \"\"\"\n Return True if the indicated frame suggests a setup.py file.\n \"\"\"\n # some frames may not have __file__ (#2940)\n return frame.f_globals.get('__file__', '').endswith('setup.py')\n\n\nDISTUTILS_FINDER = DistutilsMetaFinder()\n\n\ndef ensure_shim():\n DISTUTILS_FINDER in sys.meta_path or add_shim()\n\n\[email protected]\ndef shim():\n add_shim()\n try:\n yield\n finally:\n remove_shim()\n\n\ndef add_shim():\n sys.meta_path.insert(0, DISTUTILS_FINDER)\n\n\ndef remove_shim():\n try:\n sys.meta_path.remove(DISTUTILS_FINDER)\n except ValueError:\n pass\n", "path": "_distutils_hack/__init__.py"}, {"content": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\nfrom setuptools.command.install import install\n\nhere = os.path.dirname(__file__)\n\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = sys.platform == 'win32' or force_windows_specific_files\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nclass install_with_pth(install):\n \"\"\"\n Custom install command to install a .pth file for distutils patching.\n\n This hack is necessary because there's no standard way to install behavior\n on startup (and it's debatable if there should be one). This hack (ab)uses\n the `extra_path` behavior in Setuptools to install a `.pth` file with\n implicit behavior on startup to give higher precedence to the local version\n of `distutils` over the version from the standard library.\n\n Please do not replicate this behavior.\n \"\"\"\n\n _pth_name = 'distutils-precedence'\n _pth_contents = textwrap.dedent(\"\"\"\n import os\n var = 'SETUPTOOLS_USE_DISTUTILS'\n enabled = os.environ.get(var, 'local') == 'local'\n enabled and __import__('_distutils_hack').ensure_shim()\n \"\"\").lstrip().replace('\\n', '; ')\n\n def initialize_options(self):\n install.initialize_options(self)\n self.extra_path = self._pth_name, self._pth_contents\n\n def finalize_options(self):\n install.finalize_options(self)\n self._restore_install_lib()\n\n def _restore_install_lib(self):\n \"\"\"\n Undo secondary effect of `extra_path` adding to `install_lib`\n \"\"\"\n suffix = os.path.relpath(self.install_lib, self.install_libbase)\n\n if suffix.strip() == self._pth_contents.strip():\n self.install_lib = self.install_libbase\n\n\nsetup_params = dict(\n cmdclass={'install': install_with_pth},\n package_data=package_data,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}]}
| 3,689 | 300 |
gh_patches_debug_20785
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-8035
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prevent Matplotlib's backend from being collected more than once
**Is your feature request related to a problem? Please describe.**
Hi, I've noticed that some of `Matplotlib`'s backend are collected more than once during building. While this does not cause building to fail, it would be more readable to remove duplicate items from the list shown in the building log.
**Describe the solution you'd like**
Prevent `Matplotlib`'s backend from being collected into the list.
**Describe alternatives you've considered**
Remove duplicate items from the list after collection.
**Additional context**
`PyInstaller` 6.0
```
136360 INFO: Loading module hook 'hook-matplotlib.backends.py' from 'D:\\Python\\lib\\site-packages\\PyInstaller\\hooks'...
136361 INFO: Matplotlib backend selection method: automatic discovery of used backends
136374 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'E:\\Wordless\\wordless\\wl_main.py': ['Qt5Agg']
136380 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'sympy.testing.runtests': ['Agg', 'Agg']
136391 INFO: The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: ['Qt5Agg', 'Agg', 'Agg']. If your backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure the backend collection via hook options (see: https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks).
136391 INFO: Selected matplotlib backends: ['Qt5Agg', 'Agg', 'Agg']
```
</issue>
<code>
[start of PyInstaller/hooks/hook-matplotlib.backends.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2013-2023, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 from PyInstaller.compat import is_darwin
13 from PyInstaller.utils.hooks import logger, get_hook_config
14 from PyInstaller import isolated
15
16
17 @isolated.decorate
18 def _get_configured_default_backend():
19 """
20 Return the configured default matplotlib backend name, if available as matplotlib.rcParams['backend'] (or overridden
21 by MPLBACKEND environment variable. If the value of matplotlib.rcParams['backend'] corresponds to the auto-sentinel
22 object, returns None
23 """
24 import matplotlib
25 # matplotlib.rcParams overrides the __getitem__ implementation and attempts to determine and load the default
26 # backend using pyplot.switch_backend(). Therefore, use dict.__getitem__().
27 val = dict.__getitem__(matplotlib.rcParams, 'backend')
28 if isinstance(val, str):
29 return val
30 return None
31
32
33 @isolated.decorate
34 def _list_available_mpl_backends():
35 """
36 Returns the names of all available matplotlib backends.
37 """
38 import matplotlib
39 return matplotlib.rcsetup.all_backends
40
41
42 @isolated.decorate
43 def _check_mpl_backend_importable(module_name):
44 """
45 Attempts to import the given module name (matplotlib backend module).
46
47 Exceptions are propagated to caller.
48 """
49 __import__(module_name)
50
51
52 # Bytecode scanning
53 def _recursive_scan_code_objects_for_mpl_use(co):
54 """
55 Recursively scan the bytecode for occurrences of matplotlib.use() or mpl.use() calls with const arguments, and
56 collect those arguments into list of used matplotlib backend names.
57 """
58
59 from PyInstaller.depend.bytecode import any_alias, recursive_function_calls
60
61 mpl_use_names = {
62 *any_alias("matplotlib.use"),
63 *any_alias("mpl.use"), # matplotlib is commonly aliased as mpl
64 }
65
66 backends = []
67 for calls in recursive_function_calls(co).values():
68 for name, args in calls:
69 # matplotlib.use(backend) or matplotlib.use(backend, force)
70 # We support only literal arguments. Similarly, kwargs are
71 # not supported.
72 if len(args) not in {1, 2} or not isinstance(args[0], str):
73 continue
74 if name in mpl_use_names:
75 backends.append(args[0])
76
77 return backends
78
79
80 def _backend_module_name(name):
81 """
82 Converts matplotlib backend name to its corresponding module name.
83
84 Equivalent to matplotlib.cbook._backend_module_name().
85 """
86 if name.startswith("module://"):
87 return name[9:]
88 return f"matplotlib.backends.backend_{name.lower()}"
89
90
91 def _autodetect_used_backends(hook_api):
92 """
93 Returns a list of automatically-discovered matplotlib backends in use, or the name of the default matplotlib
94 backend. Implements the 'auto' backend selection method.
95 """
96 # Scan the code for matplotlib.use()
97 modulegraph = hook_api.analysis.graph
98 mpl_code_objs = modulegraph.get_code_using("matplotlib")
99 used_backends = []
100 for name, co in mpl_code_objs.items():
101 co_backends = _recursive_scan_code_objects_for_mpl_use(co)
102 if co_backends:
103 logger.info(
104 "Discovered Matplotlib backend(s) via `matplotlib.use()` call in module %r: %r", name, co_backends
105 )
106 used_backends += co_backends
107
108 if used_backends:
109 HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'
110 logger.info(
111 "The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: %r. If your "
112 "backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure "
113 "the backend collection via hook options (see: %s).", used_backends, HOOK_CONFIG_DOCS
114 )
115 return used_backends
116
117 # Determine the default matplotlib backend.
118 #
119 # Ideally, this would be done by calling ``matplotlib.get_backend()``. However, that function tries to switch to the
120 # default backend (calling ``matplotlib.pyplot.switch_backend()``), which seems to occasionally fail on our linux CI
121 # with an error and, on other occasions, returns the headless Agg backend instead of the GUI one (even with display
122 # server running). Furthermore, using ``matplotlib.get_backend()`` returns headless 'Agg' when display server is
123 # unavailable, which is not ideal for automated builds.
124 #
125 # Therefore, we try to emulate ``matplotlib.get_backend()`` ourselves. First, we try to obtain the configured
126 # default backend from settings (rcparams and/or MPLBACKEND environment variable). If that is unavailable, we try to
127 # find the first importable GUI-based backend, using the same list as matplotlib.pyplot.switch_backend() uses for
128 # automatic backend selection. The difference is that we only test whether the backend module is importable, without
129 # trying to switch to it.
130 default_backend = _get_configured_default_backend() # isolated sub-process
131 if default_backend:
132 logger.info("Found configured default matplotlib backend: %s", default_backend)
133 return [default_backend]
134
135 candidates = ["Qt5Agg", "Gtk3Agg", "TkAgg", "WxAgg"]
136 if is_darwin:
137 candidates = ["MacOSX"] + candidates
138 logger.info("Trying determine the default backend as first importable candidate from the list: %r", candidates)
139
140 for candidate in candidates:
141 try:
142 module_name = _backend_module_name(candidate)
143 _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.
144 except Exception:
145 continue
146 return [candidate]
147
148 # Fall back to headless Agg backend
149 logger.info("None of the backend candidates could be imported; falling back to headless Agg!")
150 return ['Agg']
151
152
153 def _collect_all_importable_backends(hook_api):
154 """
155 Returns a list of all importable matplotlib backends. Implements the 'all' backend selection method.
156 """
157 # List of the human-readable names of all available backends.
158 backend_names = _list_available_mpl_backends() # NOTE: retrieved in an isolated sub-process.
159 logger.info("All available matplotlib backends: %r", backend_names)
160
161 # Try to import the module(s).
162 importable_backends = []
163
164 # List of backends to exclude; Qt4 is not supported by PyInstaller anymore.
165 exclude_backends = {'Qt4Agg', 'Qt4Cairo'}
166
167 # Ignore "CocoaAgg" on OSes other than Mac OS; attempting to import it on other OSes halts the current
168 # (sub)process without printing output or raising exceptions, preventing reliable detection. Apply the
169 # same logic for the (newer) "MacOSX" backend.
170 if not is_darwin:
171 exclude_backends |= {'CocoaAgg', 'MacOSX'}
172
173 # For safety, attempt to import each backend in an isolated sub-process.
174 for backend_name in backend_names:
175 if backend_name in exclude_backends:
176 logger.info(' Matplotlib backend %r: excluded', backend_name)
177 continue
178
179 try:
180 module_name = _backend_module_name(backend_name)
181 _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.
182 except Exception:
183 # Backend is not importable, for whatever reason.
184 logger.info(' Matplotlib backend %r: ignored due to import error', backend_name)
185 continue
186
187 logger.info(' Matplotlib backend %r: added', backend_name)
188 importable_backends.append(backend_name)
189
190 return importable_backends
191
192
193 def hook(hook_api):
194 # Backend collection setting
195 backends_method = get_hook_config(hook_api, 'matplotlib', 'backends')
196 if backends_method is None:
197 backends_method = 'auto' # default method
198
199 # Select backend(s)
200 if backends_method == 'auto':
201 logger.info("Matplotlib backend selection method: automatic discovery of used backends")
202 backend_names = _autodetect_used_backends(hook_api)
203 elif backends_method == 'all':
204 logger.info("Matplotlib backend selection method: collection of all importable backends")
205 backend_names = _collect_all_importable_backends(hook_api)
206 else:
207 logger.info("Matplotlib backend selection method: user-provided name(s)")
208 if isinstance(backends_method, str):
209 backend_names = [backends_method]
210 else:
211 assert isinstance(backends_method, list), "User-provided backend name(s) must be either a string or a list!"
212 backend_names = backends_method
213
214 logger.info("Selected matplotlib backends: %r", backend_names)
215
216 # Set module names as hiddenimports
217 module_names = [_backend_module_name(backend) for backend in backend_names] # backend name -> module name
218 hook_api.add_imports(*module_names)
219
[end of PyInstaller/hooks/hook-matplotlib.backends.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PyInstaller/hooks/hook-matplotlib.backends.py b/PyInstaller/hooks/hook-matplotlib.backends.py
--- a/PyInstaller/hooks/hook-matplotlib.backends.py
+++ b/PyInstaller/hooks/hook-matplotlib.backends.py
@@ -105,6 +105,9 @@
)
used_backends += co_backends
+ # Deduplicate and sort the list of used backends before displaying it.
+ used_backends = sorted(set(used_backends))
+
if used_backends:
HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'
logger.info(
@@ -211,6 +214,9 @@
assert isinstance(backends_method, list), "User-provided backend name(s) must be either a string or a list!"
backend_names = backends_method
+ # Deduplicate and sort the list of selected backends before displaying it.
+ backend_names = sorted(set(backend_names))
+
logger.info("Selected matplotlib backends: %r", backend_names)
# Set module names as hiddenimports
|
{"golden_diff": "diff --git a/PyInstaller/hooks/hook-matplotlib.backends.py b/PyInstaller/hooks/hook-matplotlib.backends.py\n--- a/PyInstaller/hooks/hook-matplotlib.backends.py\n+++ b/PyInstaller/hooks/hook-matplotlib.backends.py\n@@ -105,6 +105,9 @@\n )\n used_backends += co_backends\n \n+ # Deduplicate and sort the list of used backends before displaying it.\n+ used_backends = sorted(set(used_backends))\n+\n if used_backends:\n HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'\n logger.info(\n@@ -211,6 +214,9 @@\n assert isinstance(backends_method, list), \"User-provided backend name(s) must be either a string or a list!\"\n backend_names = backends_method\n \n+ # Deduplicate and sort the list of selected backends before displaying it.\n+ backend_names = sorted(set(backend_names))\n+\n logger.info(\"Selected matplotlib backends: %r\", backend_names)\n \n # Set module names as hiddenimports\n", "issue": "Prevent Matplotlib's backend from being collected more than once\n**Is your feature request related to a problem? Please describe.**\r\nHi, I've noticed that some of `Matplotlib`'s backend are collected more than once during building. While this does not cause building to fail, it would be more readable to remove duplicate items from the list shown in the building log.\r\n\r\n**Describe the solution you'd like**\r\nPrevent `Matplotlib`'s backend from being collected into the list.\r\n\r\n**Describe alternatives you've considered**\r\nRemove duplicate items from the list after collection.\r\n\r\n**Additional context**\r\n`PyInstaller` 6.0\r\n```\r\n136360 INFO: Loading module hook 'hook-matplotlib.backends.py' from 'D:\\\\Python\\\\lib\\\\site-packages\\\\PyInstaller\\\\hooks'...\r\n136361 INFO: Matplotlib backend selection method: automatic discovery of used backends\r\n136374 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'E:\\\\Wordless\\\\wordless\\\\wl_main.py': ['Qt5Agg']\r\n136380 INFO: Discovered Matplotlib backend(s) via `matplotlib.use()` call in module 'sympy.testing.runtests': ['Agg', 'Agg']\r\n136391 INFO: The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: ['Qt5Agg', 'Agg', 'Agg']. If your backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure the backend collection via hook options (see: https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks).\r\n136391 INFO: Selected matplotlib backends: ['Qt5Agg', 'Agg', 'Agg']\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nfrom PyInstaller.compat import is_darwin\nfrom PyInstaller.utils.hooks import logger, get_hook_config\nfrom PyInstaller import isolated\n\n\[email protected]\ndef _get_configured_default_backend():\n \"\"\"\n Return the configured default matplotlib backend name, if available as matplotlib.rcParams['backend'] (or overridden\n by MPLBACKEND environment variable. If the value of matplotlib.rcParams['backend'] corresponds to the auto-sentinel\n object, returns None\n \"\"\"\n import matplotlib\n # matplotlib.rcParams overrides the __getitem__ implementation and attempts to determine and load the default\n # backend using pyplot.switch_backend(). Therefore, use dict.__getitem__().\n val = dict.__getitem__(matplotlib.rcParams, 'backend')\n if isinstance(val, str):\n return val\n return None\n\n\[email protected]\ndef _list_available_mpl_backends():\n \"\"\"\n Returns the names of all available matplotlib backends.\n \"\"\"\n import matplotlib\n return matplotlib.rcsetup.all_backends\n\n\[email protected]\ndef _check_mpl_backend_importable(module_name):\n \"\"\"\n Attempts to import the given module name (matplotlib backend module).\n\n Exceptions are propagated to caller.\n \"\"\"\n __import__(module_name)\n\n\n# Bytecode scanning\ndef _recursive_scan_code_objects_for_mpl_use(co):\n \"\"\"\n Recursively scan the bytecode for occurrences of matplotlib.use() or mpl.use() calls with const arguments, and\n collect those arguments into list of used matplotlib backend names.\n \"\"\"\n\n from PyInstaller.depend.bytecode import any_alias, recursive_function_calls\n\n mpl_use_names = {\n *any_alias(\"matplotlib.use\"),\n *any_alias(\"mpl.use\"), # matplotlib is commonly aliased as mpl\n }\n\n backends = []\n for calls in recursive_function_calls(co).values():\n for name, args in calls:\n # matplotlib.use(backend) or matplotlib.use(backend, force)\n # We support only literal arguments. Similarly, kwargs are\n # not supported.\n if len(args) not in {1, 2} or not isinstance(args[0], str):\n continue\n if name in mpl_use_names:\n backends.append(args[0])\n\n return backends\n\n\ndef _backend_module_name(name):\n \"\"\"\n Converts matplotlib backend name to its corresponding module name.\n\n Equivalent to matplotlib.cbook._backend_module_name().\n \"\"\"\n if name.startswith(\"module://\"):\n return name[9:]\n return f\"matplotlib.backends.backend_{name.lower()}\"\n\n\ndef _autodetect_used_backends(hook_api):\n \"\"\"\n Returns a list of automatically-discovered matplotlib backends in use, or the name of the default matplotlib\n backend. Implements the 'auto' backend selection method.\n \"\"\"\n # Scan the code for matplotlib.use()\n modulegraph = hook_api.analysis.graph\n mpl_code_objs = modulegraph.get_code_using(\"matplotlib\")\n used_backends = []\n for name, co in mpl_code_objs.items():\n co_backends = _recursive_scan_code_objects_for_mpl_use(co)\n if co_backends:\n logger.info(\n \"Discovered Matplotlib backend(s) via `matplotlib.use()` call in module %r: %r\", name, co_backends\n )\n used_backends += co_backends\n\n if used_backends:\n HOOK_CONFIG_DOCS = 'https://pyinstaller.org/en/stable/hooks-config.html#matplotlib-hooks'\n logger.info(\n \"The following Matplotlib backends were discovered by scanning for `matplotlib.use()` calls: %r. If your \"\n \"backend of choice is not in this list, either add a `matplotlib.use()` call to your code, or configure \"\n \"the backend collection via hook options (see: %s).\", used_backends, HOOK_CONFIG_DOCS\n )\n return used_backends\n\n # Determine the default matplotlib backend.\n #\n # Ideally, this would be done by calling ``matplotlib.get_backend()``. However, that function tries to switch to the\n # default backend (calling ``matplotlib.pyplot.switch_backend()``), which seems to occasionally fail on our linux CI\n # with an error and, on other occasions, returns the headless Agg backend instead of the GUI one (even with display\n # server running). Furthermore, using ``matplotlib.get_backend()`` returns headless 'Agg' when display server is\n # unavailable, which is not ideal for automated builds.\n #\n # Therefore, we try to emulate ``matplotlib.get_backend()`` ourselves. First, we try to obtain the configured\n # default backend from settings (rcparams and/or MPLBACKEND environment variable). If that is unavailable, we try to\n # find the first importable GUI-based backend, using the same list as matplotlib.pyplot.switch_backend() uses for\n # automatic backend selection. The difference is that we only test whether the backend module is importable, without\n # trying to switch to it.\n default_backend = _get_configured_default_backend() # isolated sub-process\n if default_backend:\n logger.info(\"Found configured default matplotlib backend: %s\", default_backend)\n return [default_backend]\n\n candidates = [\"Qt5Agg\", \"Gtk3Agg\", \"TkAgg\", \"WxAgg\"]\n if is_darwin:\n candidates = [\"MacOSX\"] + candidates\n logger.info(\"Trying determine the default backend as first importable candidate from the list: %r\", candidates)\n\n for candidate in candidates:\n try:\n module_name = _backend_module_name(candidate)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n continue\n return [candidate]\n\n # Fall back to headless Agg backend\n logger.info(\"None of the backend candidates could be imported; falling back to headless Agg!\")\n return ['Agg']\n\n\ndef _collect_all_importable_backends(hook_api):\n \"\"\"\n Returns a list of all importable matplotlib backends. Implements the 'all' backend selection method.\n \"\"\"\n # List of the human-readable names of all available backends.\n backend_names = _list_available_mpl_backends() # NOTE: retrieved in an isolated sub-process.\n logger.info(\"All available matplotlib backends: %r\", backend_names)\n\n # Try to import the module(s).\n importable_backends = []\n\n # List of backends to exclude; Qt4 is not supported by PyInstaller anymore.\n exclude_backends = {'Qt4Agg', 'Qt4Cairo'}\n\n # Ignore \"CocoaAgg\" on OSes other than Mac OS; attempting to import it on other OSes halts the current\n # (sub)process without printing output or raising exceptions, preventing reliable detection. Apply the\n # same logic for the (newer) \"MacOSX\" backend.\n if not is_darwin:\n exclude_backends |= {'CocoaAgg', 'MacOSX'}\n\n # For safety, attempt to import each backend in an isolated sub-process.\n for backend_name in backend_names:\n if backend_name in exclude_backends:\n logger.info(' Matplotlib backend %r: excluded', backend_name)\n continue\n\n try:\n module_name = _backend_module_name(backend_name)\n _check_mpl_backend_importable(module_name) # NOTE: uses an isolated sub-process.\n except Exception:\n # Backend is not importable, for whatever reason.\n logger.info(' Matplotlib backend %r: ignored due to import error', backend_name)\n continue\n\n logger.info(' Matplotlib backend %r: added', backend_name)\n importable_backends.append(backend_name)\n\n return importable_backends\n\n\ndef hook(hook_api):\n # Backend collection setting\n backends_method = get_hook_config(hook_api, 'matplotlib', 'backends')\n if backends_method is None:\n backends_method = 'auto' # default method\n\n # Select backend(s)\n if backends_method == 'auto':\n logger.info(\"Matplotlib backend selection method: automatic discovery of used backends\")\n backend_names = _autodetect_used_backends(hook_api)\n elif backends_method == 'all':\n logger.info(\"Matplotlib backend selection method: collection of all importable backends\")\n backend_names = _collect_all_importable_backends(hook_api)\n else:\n logger.info(\"Matplotlib backend selection method: user-provided name(s)\")\n if isinstance(backends_method, str):\n backend_names = [backends_method]\n else:\n assert isinstance(backends_method, list), \"User-provided backend name(s) must be either a string or a list!\"\n backend_names = backends_method\n\n logger.info(\"Selected matplotlib backends: %r\", backend_names)\n\n # Set module names as hiddenimports\n module_names = [_backend_module_name(backend) for backend in backend_names] # backend name -> module name\n hook_api.add_imports(*module_names)\n", "path": "PyInstaller/hooks/hook-matplotlib.backends.py"}]}
| 3,518 | 247 |
gh_patches_debug_16497
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-2528
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Vision raises TypeError: b'...' is not JSON serializable on Python 3
Simple reproducible case:
``` python
import io
import os
from google.cloud import vision
vision_client = vision.Client()
file_name = os.path.join(
os.path.dirname(__file__),
'resources/wakeupcat.jpg')
with io.open(file_name, 'rb') as image_file:
image = vision_client.image(
content=image_file.read())
labels = image.detect_labels()
print('Labels:')
for label in labels:
print(label.description)
```
works fine on Python 2, fails on Python 3.
</issue>
<code>
[start of vision/google/cloud/vision/image.py]
1 # Copyright 2016 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Image represented by either a URI or byte stream."""
16
17
18 from base64 import b64encode
19
20 from google.cloud._helpers import _to_bytes
21 from google.cloud.vision.entity import EntityAnnotation
22 from google.cloud.vision.face import Face
23 from google.cloud.vision.feature import Feature
24 from google.cloud.vision.feature import FeatureTypes
25 from google.cloud.vision.color import ImagePropertiesAnnotation
26 from google.cloud.vision.safe import SafeSearchAnnotation
27
28
29 class Image(object):
30 """Image representation containing information to be annotate.
31
32 :type content: bytes
33 :param content: Byte stream of an image.
34
35 :type source_uri: str
36 :param source_uri: Google Cloud Storage URI of image.
37
38 :type client: :class:`~google.cloud.vision.client.Client`
39 :param client: Instance of Vision client.
40 """
41
42 def __init__(self, client, content=None, source_uri=None):
43 self.client = client
44 self._content = None
45 self._source = None
46
47 if source_uri:
48 self._source = source_uri
49 else:
50 self._content = b64encode(_to_bytes(content))
51
52 def as_dict(self):
53 """Generate dictionary structure for request.
54
55 :rtype: dict
56 :returns: Dictionary with source information for image.
57 """
58 if self.content:
59 return {
60 'content': self.content
61 }
62 else:
63 return {
64 'source': {
65 'gcs_image_uri': self.source
66 }
67 }
68
69 @property
70 def content(self):
71 """Base64 encoded image content.
72
73 :rtype: str
74 :returns: Base64 encoded image bytes.
75 """
76 return self._content
77
78 @property
79 def source(self):
80 """Google Cloud Storage URI.
81
82 :rtype: str
83 :returns: String of Google Cloud Storage URI.
84 """
85 return self._source
86
87 def _detect_annotation(self, feature):
88 """Generic method for detecting a single annotation.
89
90 :type feature: :class:`~google.cloud.vision.feature.Feature`
91 :param feature: The ``Feature`` indication the type of annotation to
92 perform.
93
94 :rtype: list
95 :returns: List of
96 :class:`~google.cloud.vision.entity.EntityAnnotation`.
97 """
98 reverse_types = {
99 'LABEL_DETECTION': 'labelAnnotations',
100 'LANDMARK_DETECTION': 'landmarkAnnotations',
101 'LOGO_DETECTION': 'logoAnnotations',
102 'TEXT_DETECTION': 'textAnnotations',
103 }
104 detected_objects = []
105 result = self.client.annotate(self, [feature])
106 for response in result[reverse_types[feature.feature_type]]:
107 detected_object = EntityAnnotation.from_api_repr(response)
108 detected_objects.append(detected_object)
109 return detected_objects
110
111 def detect_faces(self, limit=10):
112 """Detect faces in image.
113
114 :type limit: int
115 :param limit: The number of faces to try and detect.
116
117 :rtype: list
118 :returns: List of :class:`~google.cloud.vision.face.Face`.
119 """
120 faces = []
121 face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit)
122 result = self.client.annotate(self, [face_detection_feature])
123 for face_response in result['faceAnnotations']:
124 face = Face.from_api_repr(face_response)
125 faces.append(face)
126
127 return faces
128
129 def detect_labels(self, limit=10):
130 """Detect labels that describe objects in an image.
131
132 :type limit: int
133 :param limit: The maximum number of labels to try and detect.
134
135 :rtype: list
136 :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`
137 """
138 feature = Feature(FeatureTypes.LABEL_DETECTION, limit)
139 return self._detect_annotation(feature)
140
141 def detect_landmarks(self, limit=10):
142 """Detect landmarks in an image.
143
144 :type limit: int
145 :param limit: The maximum number of landmarks to find.
146
147 :rtype: list
148 :returns: List of
149 :class:`~google.cloud.vision.entity.EntityAnnotation`.
150 """
151 feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)
152 return self._detect_annotation(feature)
153
154 def detect_logos(self, limit=10):
155 """Detect logos in an image.
156
157 :type limit: int
158 :param limit: The maximum number of logos to find.
159
160 :rtype: list
161 :returns: List of
162 :class:`~google.cloud.vision.entity.EntityAnnotation`.
163 """
164 feature = Feature(FeatureTypes.LOGO_DETECTION, limit)
165 return self._detect_annotation(feature)
166
167 def detect_properties(self, limit=10):
168 """Detect the color properties of an image.
169
170 :type limit: int
171 :param limit: The maximum number of image properties to find.
172
173 :rtype: list
174 :returns: List of
175 :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.
176 """
177 feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)
178 result = self.client.annotate(self, [feature])
179 response = result['imagePropertiesAnnotation']
180 return ImagePropertiesAnnotation.from_api_repr(response)
181
182 def detect_safe_search(self, limit=10):
183 """Retreive safe search properties from an image.
184
185 :type limit: int
186 :param limit: The number of faces to try and detect.
187
188 :rtype: list
189 :returns: List of
190 :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.
191 """
192 safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,
193 limit)
194 result = self.client.annotate(self, [safe_detection_feature])
195 safe_search_response = result['safeSearchAnnotation']
196 return SafeSearchAnnotation.from_api_repr(safe_search_response)
197
198 def detect_text(self, limit=10):
199 """Detect text in an image.
200
201 :type limit: int
202 :param limit: The maximum instances of text to find.
203
204 :rtype: list
205 :returns: List of
206 :class:`~google.cloud.vision.entity.EntityAnnotation`.
207 """
208 feature = Feature(FeatureTypes.TEXT_DETECTION, limit)
209 return self._detect_annotation(feature)
210
[end of vision/google/cloud/vision/image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py
--- a/vision/google/cloud/vision/image.py
+++ b/vision/google/cloud/vision/image.py
@@ -18,6 +18,7 @@
from base64 import b64encode
from google.cloud._helpers import _to_bytes
+from google.cloud._helpers import _bytes_to_unicode
from google.cloud.vision.entity import EntityAnnotation
from google.cloud.vision.face import Face
from google.cloud.vision.feature import Feature
@@ -47,7 +48,7 @@
if source_uri:
self._source = source_uri
else:
- self._content = b64encode(_to_bytes(content))
+ self._content = _bytes_to_unicode(b64encode(_to_bytes(content)))
def as_dict(self):
"""Generate dictionary structure for request.
|
{"golden_diff": "diff --git a/vision/google/cloud/vision/image.py b/vision/google/cloud/vision/image.py\n--- a/vision/google/cloud/vision/image.py\n+++ b/vision/google/cloud/vision/image.py\n@@ -18,6 +18,7 @@\n from base64 import b64encode\n \n from google.cloud._helpers import _to_bytes\n+from google.cloud._helpers import _bytes_to_unicode\n from google.cloud.vision.entity import EntityAnnotation\n from google.cloud.vision.face import Face\n from google.cloud.vision.feature import Feature\n@@ -47,7 +48,7 @@\n if source_uri:\n self._source = source_uri\n else:\n- self._content = b64encode(_to_bytes(content))\n+ self._content = _bytes_to_unicode(b64encode(_to_bytes(content)))\n \n def as_dict(self):\n \"\"\"Generate dictionary structure for request.\n", "issue": "Vision raises TypeError: b'...' is not JSON serializable on Python 3\nSimple reproducible case:\n\n``` python\nimport io\nimport os\n\nfrom google.cloud import vision\n\nvision_client = vision.Client()\n\nfile_name = os.path.join(\n os.path.dirname(__file__),\n 'resources/wakeupcat.jpg')\n\nwith io.open(file_name, 'rb') as image_file:\n image = vision_client.image(\n content=image_file.read())\n\nlabels = image.detect_labels()\n\nprint('Labels:')\nfor label in labels:\n print(label.description)\n```\n\nworks fine on Python 2, fails on Python 3.\n\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Image represented by either a URI or byte stream.\"\"\"\n\n\nfrom base64 import b64encode\n\nfrom google.cloud._helpers import _to_bytes\nfrom google.cloud.vision.entity import EntityAnnotation\nfrom google.cloud.vision.face import Face\nfrom google.cloud.vision.feature import Feature\nfrom google.cloud.vision.feature import FeatureTypes\nfrom google.cloud.vision.color import ImagePropertiesAnnotation\nfrom google.cloud.vision.safe import SafeSearchAnnotation\n\n\nclass Image(object):\n \"\"\"Image representation containing information to be annotate.\n\n :type content: bytes\n :param content: Byte stream of an image.\n\n :type source_uri: str\n :param source_uri: Google Cloud Storage URI of image.\n\n :type client: :class:`~google.cloud.vision.client.Client`\n :param client: Instance of Vision client.\n \"\"\"\n\n def __init__(self, client, content=None, source_uri=None):\n self.client = client\n self._content = None\n self._source = None\n\n if source_uri:\n self._source = source_uri\n else:\n self._content = b64encode(_to_bytes(content))\n\n def as_dict(self):\n \"\"\"Generate dictionary structure for request.\n\n :rtype: dict\n :returns: Dictionary with source information for image.\n \"\"\"\n if self.content:\n return {\n 'content': self.content\n }\n else:\n return {\n 'source': {\n 'gcs_image_uri': self.source\n }\n }\n\n @property\n def content(self):\n \"\"\"Base64 encoded image content.\n\n :rtype: str\n :returns: Base64 encoded image bytes.\n \"\"\"\n return self._content\n\n @property\n def source(self):\n \"\"\"Google Cloud Storage URI.\n\n :rtype: str\n :returns: String of Google Cloud Storage URI.\n \"\"\"\n return self._source\n\n def _detect_annotation(self, feature):\n \"\"\"Generic method for detecting a single annotation.\n\n :type feature: :class:`~google.cloud.vision.feature.Feature`\n :param feature: The ``Feature`` indication the type of annotation to\n perform.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n reverse_types = {\n 'LABEL_DETECTION': 'labelAnnotations',\n 'LANDMARK_DETECTION': 'landmarkAnnotations',\n 'LOGO_DETECTION': 'logoAnnotations',\n 'TEXT_DETECTION': 'textAnnotations',\n }\n detected_objects = []\n result = self.client.annotate(self, [feature])\n for response in result[reverse_types[feature.feature_type]]:\n detected_object = EntityAnnotation.from_api_repr(response)\n detected_objects.append(detected_object)\n return detected_objects\n\n def detect_faces(self, limit=10):\n \"\"\"Detect faces in image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.face.Face`.\n \"\"\"\n faces = []\n face_detection_feature = Feature(FeatureTypes.FACE_DETECTION, limit)\n result = self.client.annotate(self, [face_detection_feature])\n for face_response in result['faceAnnotations']:\n face = Face.from_api_repr(face_response)\n faces.append(face)\n\n return faces\n\n def detect_labels(self, limit=10):\n \"\"\"Detect labels that describe objects in an image.\n\n :type limit: int\n :param limit: The maximum number of labels to try and detect.\n\n :rtype: list\n :returns: List of :class:`~google.cloud.vision.entity.EntityAnnotation`\n \"\"\"\n feature = Feature(FeatureTypes.LABEL_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_landmarks(self, limit=10):\n \"\"\"Detect landmarks in an image.\n\n :type limit: int\n :param limit: The maximum number of landmarks to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LANDMARK_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_logos(self, limit=10):\n \"\"\"Detect logos in an image.\n\n :type limit: int\n :param limit: The maximum number of logos to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.LOGO_DETECTION, limit)\n return self._detect_annotation(feature)\n\n def detect_properties(self, limit=10):\n \"\"\"Detect the color properties of an image.\n\n :type limit: int\n :param limit: The maximum number of image properties to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.color.ImagePropertiesAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.IMAGE_PROPERTIES, limit)\n result = self.client.annotate(self, [feature])\n response = result['imagePropertiesAnnotation']\n return ImagePropertiesAnnotation.from_api_repr(response)\n\n def detect_safe_search(self, limit=10):\n \"\"\"Retreive safe search properties from an image.\n\n :type limit: int\n :param limit: The number of faces to try and detect.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.sage.SafeSearchAnnotation`.\n \"\"\"\n safe_detection_feature = Feature(FeatureTypes.SAFE_SEARCH_DETECTION,\n limit)\n result = self.client.annotate(self, [safe_detection_feature])\n safe_search_response = result['safeSearchAnnotation']\n return SafeSearchAnnotation.from_api_repr(safe_search_response)\n\n def detect_text(self, limit=10):\n \"\"\"Detect text in an image.\n\n :type limit: int\n :param limit: The maximum instances of text to find.\n\n :rtype: list\n :returns: List of\n :class:`~google.cloud.vision.entity.EntityAnnotation`.\n \"\"\"\n feature = Feature(FeatureTypes.TEXT_DETECTION, limit)\n return self._detect_annotation(feature)\n", "path": "vision/google/cloud/vision/image.py"}]}
| 2,662 | 193 |
gh_patches_debug_19957
|
rasdani/github-patches
|
git_diff
|
huggingface__accelerate-43
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`
Currently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?
</issue>
<code>
[start of src/accelerate/optimizer.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import torch
16
17 from .state import AcceleratorState, DistributedType, is_tpu_available
18
19
20 if is_tpu_available():
21 import torch_xla.core.xla_model as xm
22
23
24 def move_to_device(state, device):
25 if isinstance(state, (list, tuple)):
26 return type(state)(move_to_device(t, device) for t in state)
27 elif isinstance(state, dict):
28 return type(state)({k: move_to_device(v, device) for k, v in state.items()})
29 elif isinstance(state, torch.Tensor):
30 return state.to(device)
31 return state
32
33
34 class AcceleratedOptimizer(torch.optim.Optimizer):
35 """
36 Internal wrapper around a torch optimizer.
37
38 Args:
39 optimizer (:obj:`torch.optim.optimizer.Optimizer`):
40 The optimizer to wrap.
41 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):
42 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
43 :obj:`optimizer` on the right device.
44 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):
45 The scaler to use in the step function if training with mixed precision.
46 """
47
48 def __init__(self, optimizer, device_placement=True, scaler=None):
49 self.optimizer = optimizer
50 self.scaler = scaler
51 self.state = AcceleratorState()
52
53 # Handle device placement
54 if device_placement:
55 state_dict = self.optimizer.state_dict()
56 if self.state.distributed_type == DistributedType.TPU:
57 xm.send_cpu_data_to_device(state_dict, self.state.device)
58 else:
59 state_dict = move_to_device(state_dict, self.state.device)
60 self.optimizer.load_state_dict(state_dict)
61
62 @property
63 def param_groups(self):
64 return self.optimizer.param_groups
65
66 @param_groups.setter
67 def param_groups(self, param_groups):
68 self.optimizer.param_groups = param_groups
69
70 @property
71 def defaults(self):
72 return self.optimizer.defaults
73
74 @defaults.setter
75 def defaults(self, defaults):
76 self.optimizer.defaults = defaults
77
78 def add_param_group(self, param_group):
79 self.optimizer.add_param_group(param_group)
80
81 def load_state_dict(self, state_dict):
82 if self.state.distributed_type == DistributedType.TPU and self.device_placement:
83 xm.send_cpu_data_to_device(state_dict, self.state.device)
84 self.optimizer.load_state_dict(state_dict)
85
86 def state_dict(self):
87 return self.optimizer.state_dict()
88
89 def zero_grad(self):
90 self.optimizer.zero_grad()
91
92 def step(self):
93 if self.state.distributed_type == DistributedType.TPU:
94 xm.optimizer_step(self.optimizer)
95 elif self.scaler is not None:
96 self.scaler.step(self.optimizer)
97 self.scaler.update()
98 else:
99 self.optimizer.step()
100
101 def _switch_parameters(self, parameters_map):
102 for param_group in self.optimizer.param_groups:
103 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
104
[end of src/accelerate/optimizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -14,6 +14,8 @@
import torch
+from packaging import version
+
from .state import AcceleratorState, DistributedType, is_tpu_available
@@ -86,8 +88,18 @@
def state_dict(self):
return self.optimizer.state_dict()
- def zero_grad(self):
- self.optimizer.zero_grad()
+ def zero_grad(self, set_to_none=None):
+ if version.parse(torch.__version__) < version.parse("1.7.0"):
+ if set_to_none is not None:
+ raise ValueError(
+ "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
+ f"earlier versions (found version {torch.__version__})."
+ )
+ self.optimizer.zero_grad()
+ else:
+ if set_to_none is not None:
+ set_to_none = False
+ self.optimizer.zero_grad(set_to_none=set_to_none)
def step(self):
if self.state.distributed_type == DistributedType.TPU:
|
{"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -14,6 +14,8 @@\n \n import torch\n \n+from packaging import version\n+\n from .state import AcceleratorState, DistributedType, is_tpu_available\n \n \n@@ -86,8 +88,18 @@\n def state_dict(self):\n return self.optimizer.state_dict()\n \n- def zero_grad(self):\n- self.optimizer.zero_grad()\n+ def zero_grad(self, set_to_none=None):\n+ if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n+ if set_to_none is not None:\n+ raise ValueError(\n+ \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n+ f\"earlier versions (found version {torch.__version__}).\"\n+ )\n+ self.optimizer.zero_grad()\n+ else:\n+ if set_to_none is not None:\n+ set_to_none = False\n+ self.optimizer.zero_grad(set_to_none=set_to_none)\n \n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n", "issue": "AcceleratedOptimizer `zero_grad` argument not supported: `set_to_none`\nCurrently the `AcceleratedOptimizer` class doesn't support the argument `set_to_none`, is this an intentional exclusion?\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return type(state)(move_to_device(t, device) for t in state)\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def step(self):\n if self.state.distributed_type == DistributedType.TPU:\n xm.optimizer_step(self.optimizer)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n", "path": "src/accelerate/optimizer.py"}]}
| 1,570 | 280 |
gh_patches_debug_35610
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-712
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Credit card numbers' validity is not enforced
Credit card numbers generated by `credit_card_number()` are not always valid.
### Steps to reproduce
```python
fake = Faker()
fake.credit_card_number()
```
### Expected behavior
The credit card numbers generated should always be valid ones.
### Actual behavior
Some returned numbers are invalid. For example: "3337083863794024", "3337957986872777", "3088184514627042". You can verify it with http://www.validcreditcardnumber.com/
The problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html
Could the generator be modified to ensure that credit card numbers are always valid?
Credit card numbers' validity is not enforced
Credit card numbers generated by `credit_card_number()` are not always valid.
### Steps to reproduce
```python
fake = Faker()
fake.credit_card_number()
```
### Expected behavior
The credit card numbers generated should always be valid ones.
### Actual behavior
Some returned numbers are invalid. For example: "3337083863794024", "3337957986872777", "3088184514627042". You can verify it with http://www.validcreditcardnumber.com/
The problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html
Could the generator be modified to ensure that credit card numbers are always valid?
</issue>
<code>
[start of faker/providers/credit_card/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from collections import OrderedDict
4
5 from .. import BaseProvider
6
7
8 class CreditCard(object):
9
10 def __init__(
11 self,
12 name,
13 prefixes,
14 length=16,
15 security_code='CVC',
16 security_code_length=3):
17 self.name = name
18 self.prefixes = prefixes
19 self.length = length
20 self.security_code = security_code
21 self.security_code_length = security_code_length
22
23
24 class Provider(BaseProvider):
25
26 prefix_maestro = ['5018', '5020', '5038', '5612', '5893',
27 '6304', '6759', '6761', '6762', '6763', '0604', '6390']
28 prefix_mastercard = ['51', '52', '53', '54', '55']
29 prefix_visa = ['4']
30 prefix_amex = ['34', '37']
31 prefix_discover = ['6011']
32 prefix_diners = ['300', '301', '302', '303', '304', '305']
33 prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']
34 prefix_jcb15 = ['2100', '1800']
35 prefix_voyager = ['8699']
36
37 credit_card_types = OrderedDict((
38 ('maestro', CreditCard('Maestro',
39 prefix_maestro, 12, security_code='CVV')),
40 ('mastercard', CreditCard('Mastercard',
41 prefix_mastercard, 16, security_code='CVV')),
42 ('visa16', CreditCard('VISA 16 digit', prefix_visa)),
43 ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),
44 ('amex', CreditCard('American Express', prefix_amex,
45 15, security_code='CID', security_code_length=4)),
46 ('discover', CreditCard('Discover', prefix_discover)),
47 ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),
48 ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),
49 ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),
50 ('voyager', CreditCard('Voyager', prefix_voyager, 15)),
51 ))
52 credit_card_types['visa'] = credit_card_types['visa16']
53 credit_card_types['jcb'] = credit_card_types['jcb16']
54
55 luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,
56 '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}
57
58 def credit_card_provider(self, card_type=None):
59 """ Returns the provider's name of the credit card. """
60 if card_type is None:
61 card_type = self.random_element(self.credit_card_types.keys())
62 return self._credit_card_type(card_type).name
63
64 def credit_card_number(self, card_type=None):
65 """ Returns a valid credit card number. """
66 card = self._credit_card_type(card_type)
67 prefix = self.random_element(card.prefixes)
68 number = self._generate_number(prefix, card.length)
69 return number
70
71 def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):
72 expire_date = self.generator.date_time_between(start, end)
73 return expire_date.strftime(date_format)
74
75 def credit_card_full(self, card_type=None):
76 card = self._credit_card_type(card_type)
77
78 tpl = ('{provider}\n'
79 '{owner}\n'
80 '{number} {expire_date}\n'
81 '{security}: {security_nb}\n')
82
83 tpl = tpl.format(provider=card.name,
84 owner=self.generator.parse(
85 "{{first_name}} {{last_name}}"),
86 number=self.credit_card_number(card),
87 expire_date=self.credit_card_expire(),
88 security=card.security_code,
89 security_nb=self.credit_card_security_code(card))
90
91 return self.generator.parse(tpl)
92
93 def credit_card_security_code(self, card_type=None):
94 """ Returns a security code string. """
95 sec_len = self._credit_card_type(card_type).security_code_length
96 return self.numerify('#' * sec_len)
97
98 def _credit_card_type(self, card_type=None):
99 """ Returns a random credit card type instance. """
100 if card_type is None:
101 card_type = self.random_element(self.credit_card_types.keys())
102 elif isinstance(card_type, CreditCard):
103 return card_type
104 return self.credit_card_types[card_type]
105
106 def _generate_number(self, prefix, length):
107 """
108 'prefix' is the start of the CC number as a string, any number of digits.
109 'length' is the length of the CC number to generate. Typically 13 or 16
110 """
111 number = prefix
112 # Generate random char digits
113 number += '#' * (length - len(prefix) - 1)
114 number = self.numerify(number)
115 reverse = number[::-1]
116 # Calculate sum
117 tot = 0
118 pos = 0
119 while pos < length - 1:
120 tot += Provider.luhn_lookup[reverse[pos]]
121 if pos != (length - 2):
122 tot += int(reverse[pos + 1])
123 pos += 2
124 # Calculate check digit
125 check_digit = (10 - (tot % 10)) % 10
126 number += str(check_digit)
127 return number
128
[end of faker/providers/credit_card/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py
--- a/faker/providers/credit_card/__init__.py
+++ b/faker/providers/credit_card/__init__.py
@@ -23,16 +23,19 @@
class Provider(BaseProvider):
- prefix_maestro = ['5018', '5020', '5038', '5612', '5893',
+ # Prefixes from:
+ # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29
+ # * https://www.regular-expressions.info/creditcard.html
+ # * https://creditcardjs.com/credit-card-type-detection
+ prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',
'6304', '6759', '6761', '6762', '6763', '0604', '6390']
- prefix_mastercard = ['51', '52', '53', '54', '55']
+ prefix_mastercard = ['51', '52', '53', '54', '55', '222%']
prefix_visa = ['4']
prefix_amex = ['34', '37']
- prefix_discover = ['6011']
- prefix_diners = ['300', '301', '302', '303', '304', '305']
- prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']
- prefix_jcb15 = ['2100', '1800']
- prefix_voyager = ['8699']
+ prefix_discover = ['6011', '65']
+ prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']
+ prefix_jcb16 = ['35']
+ prefix_jcb15 = ['2131', '1800']
credit_card_types = OrderedDict((
('maestro', CreditCard('Maestro',
@@ -47,7 +50,6 @@
('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),
('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),
('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),
- ('voyager', CreditCard('Voyager', prefix_voyager, 15)),
))
credit_card_types['visa'] = credit_card_types['visa16']
credit_card_types['jcb'] = credit_card_types['jcb16']
@@ -65,7 +67,7 @@
""" Returns a valid credit card number. """
card = self._credit_card_type(card_type)
prefix = self.random_element(card.prefixes)
- number = self._generate_number(prefix, card.length)
+ number = self._generate_number(self.numerify(prefix), card.length)
return number
def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):
|
{"golden_diff": "diff --git a/faker/providers/credit_card/__init__.py b/faker/providers/credit_card/__init__.py\n--- a/faker/providers/credit_card/__init__.py\n+++ b/faker/providers/credit_card/__init__.py\n@@ -23,16 +23,19 @@\n \n class Provider(BaseProvider):\n \n- prefix_maestro = ['5018', '5020', '5038', '5612', '5893',\n+ # Prefixes from:\n+ # * https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_.28IIN.29\n+ # * https://www.regular-expressions.info/creditcard.html\n+ # * https://creditcardjs.com/credit-card-type-detection\n+ prefix_maestro = ['5018', '5020', '5038', '56##', '57##', '58##',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n- prefix_mastercard = ['51', '52', '53', '54', '55']\n+ prefix_mastercard = ['51', '52', '53', '54', '55', '222%']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n- prefix_discover = ['6011']\n- prefix_diners = ['300', '301', '302', '303', '304', '305']\n- prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']\n- prefix_jcb15 = ['2100', '1800']\n- prefix_voyager = ['8699']\n+ prefix_discover = ['6011', '65']\n+ prefix_diners = ['300', '301', '302', '303', '304', '305', '36', '38']\n+ prefix_jcb16 = ['35']\n+ prefix_jcb15 = ['2131', '1800']\n \n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n@@ -47,7 +50,6 @@\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n- ('voyager', CreditCard('Voyager', prefix_voyager, 15)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n@@ -65,7 +67,7 @@\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n- number = self._generate_number(prefix, card.length)\n+ number = self._generate_number(self.numerify(prefix), card.length)\n return number\n \n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n", "issue": "Credit card numbers' validity is not enforced\nCredit card numbers generated by `credit_card_number()` are not always valid.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\nfake = Faker()\r\nfake.credit_card_number()\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe credit card numbers generated should always be valid ones.\r\n\r\n### Actual behavior\r\n\r\nSome returned numbers are invalid. For example: \"3337083863794024\", \"3337957986872777\", \"3088184514627042\". You can verify it with http://www.validcreditcardnumber.com/\r\n\r\nThe problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html\r\n\r\nCould the generator be modified to ensure that credit card numbers are always valid?\nCredit card numbers' validity is not enforced\nCredit card numbers generated by `credit_card_number()` are not always valid.\r\n\r\n### Steps to reproduce\r\n\r\n```python\r\nfake = Faker()\r\nfake.credit_card_number()\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe credit card numbers generated should always be valid ones.\r\n\r\n### Actual behavior\r\n\r\nSome returned numbers are invalid. For example: \"3337083863794024\", \"3337957986872777\", \"3088184514627042\". You can verify it with http://www.validcreditcardnumber.com/\r\n\r\nThe problem is that the issuer identification is not always correct. You can check that by performing a RegEx, see as an example https://www.regular-expressions.info/creditcard.html\r\n\r\nCould the generator be modified to ensure that credit card numbers are always valid?\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom collections import OrderedDict\n\nfrom .. import BaseProvider\n\n\nclass CreditCard(object):\n\n def __init__(\n self,\n name,\n prefixes,\n length=16,\n security_code='CVC',\n security_code_length=3):\n self.name = name\n self.prefixes = prefixes\n self.length = length\n self.security_code = security_code\n self.security_code_length = security_code_length\n\n\nclass Provider(BaseProvider):\n\n prefix_maestro = ['5018', '5020', '5038', '5612', '5893',\n '6304', '6759', '6761', '6762', '6763', '0604', '6390']\n prefix_mastercard = ['51', '52', '53', '54', '55']\n prefix_visa = ['4']\n prefix_amex = ['34', '37']\n prefix_discover = ['6011']\n prefix_diners = ['300', '301', '302', '303', '304', '305']\n prefix_jcb16 = ['3088', '3096', '3112', '3158', '3337', '3528']\n prefix_jcb15 = ['2100', '1800']\n prefix_voyager = ['8699']\n\n credit_card_types = OrderedDict((\n ('maestro', CreditCard('Maestro',\n prefix_maestro, 12, security_code='CVV')),\n ('mastercard', CreditCard('Mastercard',\n prefix_mastercard, 16, security_code='CVV')),\n ('visa16', CreditCard('VISA 16 digit', prefix_visa)),\n ('visa13', CreditCard('VISA 13 digit', prefix_visa, 13)),\n ('amex', CreditCard('American Express', prefix_amex,\n 15, security_code='CID', security_code_length=4)),\n ('discover', CreditCard('Discover', prefix_discover)),\n ('diners', CreditCard('Diners Club / Carte Blanche', prefix_diners, 14)),\n ('jcb15', CreditCard('JCB 15 digit', prefix_jcb15, 15)),\n ('jcb16', CreditCard('JCB 16 digit', prefix_jcb16)),\n ('voyager', CreditCard('Voyager', prefix_voyager, 15)),\n ))\n credit_card_types['visa'] = credit_card_types['visa16']\n credit_card_types['jcb'] = credit_card_types['jcb16']\n\n luhn_lookup = {'0': 0, '1': 2, '2': 4, '3': 6, '4': 8,\n '5': 1, '6': 3, '7': 5, '8': 7, '9': 9}\n\n def credit_card_provider(self, card_type=None):\n \"\"\" Returns the provider's name of the credit card. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n return self._credit_card_type(card_type).name\n\n def credit_card_number(self, card_type=None):\n \"\"\" Returns a valid credit card number. \"\"\"\n card = self._credit_card_type(card_type)\n prefix = self.random_element(card.prefixes)\n number = self._generate_number(prefix, card.length)\n return number\n\n def credit_card_expire(self, start='now', end='+10y', date_format='%m/%y'):\n expire_date = self.generator.date_time_between(start, end)\n return expire_date.strftime(date_format)\n\n def credit_card_full(self, card_type=None):\n card = self._credit_card_type(card_type)\n\n tpl = ('{provider}\\n'\n '{owner}\\n'\n '{number} {expire_date}\\n'\n '{security}: {security_nb}\\n')\n\n tpl = tpl.format(provider=card.name,\n owner=self.generator.parse(\n \"{{first_name}} {{last_name}}\"),\n number=self.credit_card_number(card),\n expire_date=self.credit_card_expire(),\n security=card.security_code,\n security_nb=self.credit_card_security_code(card))\n\n return self.generator.parse(tpl)\n\n def credit_card_security_code(self, card_type=None):\n \"\"\" Returns a security code string. \"\"\"\n sec_len = self._credit_card_type(card_type).security_code_length\n return self.numerify('#' * sec_len)\n\n def _credit_card_type(self, card_type=None):\n \"\"\" Returns a random credit card type instance. \"\"\"\n if card_type is None:\n card_type = self.random_element(self.credit_card_types.keys())\n elif isinstance(card_type, CreditCard):\n return card_type\n return self.credit_card_types[card_type]\n\n def _generate_number(self, prefix, length):\n \"\"\"\n 'prefix' is the start of the CC number as a string, any number of digits.\n 'length' is the length of the CC number to generate. Typically 13 or 16\n \"\"\"\n number = prefix\n # Generate random char digits\n number += '#' * (length - len(prefix) - 1)\n number = self.numerify(number)\n reverse = number[::-1]\n # Calculate sum\n tot = 0\n pos = 0\n while pos < length - 1:\n tot += Provider.luhn_lookup[reverse[pos]]\n if pos != (length - 2):\n tot += int(reverse[pos + 1])\n pos += 2\n # Calculate check digit\n check_digit = (10 - (tot % 10)) % 10\n number += str(check_digit)\n return number\n", "path": "faker/providers/credit_card/__init__.py"}]}
| 2,519 | 821 |
gh_patches_debug_19468
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-58
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Default connector and default room
When writing a skill which originates from something other than a message (e.g cron #26) the response may need to know which room to post into.
Most chat clients have a default room, like `#general` in Slack. This could be available as a property in the connector so that skills can easily access it.
e.g
``` python
@non_message_decorator()
def myskill(opsdroid):
for connector in opsdroid.connectors:
message = Message("Message text", connector.default_room, None, connector)
connector.respond(message)
```
It should also be possible to override the default room in the connector config.
``` yaml
connectors:
slack:
default-room: "#random"
```
</issue>
<code>
[start of opsdroid/core.py]
1 """Core components of OpsDroid."""
2
3 import logging
4 import sys
5 import weakref
6 import asyncio
7
8 from opsdroid.memory import Memory
9 from opsdroid.connector import Connector
10 from opsdroid.database import Database
11 from opsdroid.loader import Loader
12 from opsdroid.parsers.regex import parse_regex
13 from opsdroid.parsers.apiai import parse_apiai
14
15
16 class OpsDroid():
17 """Root object for opsdroid."""
18
19 # pylint: disable=too-many-instance-attributes
20 # All are reasonable in this case.
21
22 instances = []
23
24 def __init__(self):
25 """Start opsdroid."""
26 self.bot_name = 'opsdroid'
27 self.sys_status = 0
28 self.connectors = []
29 self.connector_tasks = []
30 self.eventloop = asyncio.get_event_loop()
31 self.skills = []
32 self.memory = Memory()
33 self.loader = Loader(self)
34 self.config = {}
35 logging.info("Created main opsdroid object")
36
37 def __enter__(self):
38 """Add self to existing instances."""
39 if len(self.__class__.instances) == 0:
40 self.__class__.instances.append(weakref.proxy(self))
41 else:
42 self.critical("opsdroid has already been started", 1)
43 return self
44
45 def __exit__(self, exc_type, exc_value, traceback):
46 """Remove self from existing instances."""
47 self.__class__.instances = []
48
49 def exit(self):
50 """Exit application."""
51 logging.info("Exiting application with return code " +
52 str(self.sys_status))
53 if self.eventloop.is_running():
54 self.eventloop.stop()
55 sys.exit(self.sys_status)
56
57 def critical(self, error, code):
58 """Exit due to unrecoverable error."""
59 self.sys_status = code
60 logging.critical(error)
61 print("Error: " + error)
62 self.exit()
63
64 def load(self):
65 """Load configuration."""
66 self.config = self.loader.load_config_file([
67 "./configuration.yaml",
68 "~/.opsdroid/configuration.yaml",
69 "/etc/opsdroid/configuration.yaml"
70 ])
71
72 def start_loop(self):
73 """Start the event loop."""
74 connectors, databases, skills = self.loader.load_config(self.config)
75 if databases is not None:
76 self.start_databases(databases)
77 self.setup_skills(skills)
78 self.start_connector_tasks(connectors)
79 try:
80 self.eventloop.run_forever()
81 except (KeyboardInterrupt, EOFError):
82 print('') # Prints a character return for return to shell
83 logging.info("Keyboard interrupt, exiting.")
84 self.exit()
85
86 def setup_skills(self, skills):
87 """Call the setup function on the passed in skills."""
88 for skill in skills:
89 try:
90 skill["module"].setup(self)
91 except AttributeError:
92 pass
93
94 def start_connector_tasks(self, connectors):
95 """Start the connectors."""
96 for connector_module in connectors:
97 for _, cls in connector_module["module"].__dict__.items():
98 if isinstance(cls, type) and \
99 issubclass(cls, Connector) and\
100 cls is not Connector:
101 connector_module["config"]["bot-name"] = self.bot_name
102 connector = cls(connector_module["config"])
103 self.connectors.append(connector)
104
105 if len(connectors) > 0:
106 for connector in self.connectors:
107 self.eventloop.run_until_complete(connector.connect(self))
108 for connector in self.connectors:
109 task = self.eventloop.create_task(connector.listen(self))
110 self.connector_tasks.append(task)
111 else:
112 self.critical("All connectors failed to load", 1)
113
114 def start_databases(self, databases):
115 """Start the databases."""
116 if len(databases) == 0:
117 logging.debug(databases)
118 logging.warning("All databases failed to load")
119 for database_module in databases:
120 for name, cls in database_module["module"].__dict__.items():
121 if isinstance(cls, type) and \
122 issubclass(cls, Database) and \
123 cls is not Database:
124 logging.debug("Adding database: " + name)
125 database = cls(database_module["config"])
126 self.memory.databases.append(database)
127 self.eventloop.run_until_complete(database.connect(self))
128
129 async def parse(self, message):
130 """Parse a string against all skills."""
131 if message.text.strip() != "":
132 logging.debug("Parsing input: " + message.text)
133
134 await parse_regex(self, message)
135
136 if "parsers" in self.config and "apiai" in self.config["parsers"]:
137 await parse_apiai(self, message)
138
[end of opsdroid/core.py]
[start of opsdroid/connector.py]
1 """A base class for connectors to inherit from."""
2
3 from opsdroid.message import Message # NOQA # pylint: disable=unused-import
4
5
6 class Connector():
7 """A base connector.
8
9 Connectors are used to interact with a given chat service.
10
11 """
12
13 def __init__(self, config):
14 """Setup the connector.
15
16 Set some basic properties from the connector config such as the name
17 of this connector and the name the bot should appear with in chat
18 service.
19
20 Args:
21 config (dict): The config for this connector specified in the
22 `configuration.yaml` file.
23
24 """
25 self.name = ""
26 self.config = config
27
28 async def connect(self, opsdroid):
29 """Connect to chat service.
30
31 This method should create a connection to the desired chat service.
32 It should also be possible to call it multiple times in the event of
33 being disconnected.
34
35 Args:
36 opsdroid (OpsDroid): An instance of the opsdroid core.
37
38 """
39 raise NotImplementedError
40
41 async def listen(self, opsdroid):
42 """Listen to chat service and parse all messages.
43
44 This method should block the thread with an infinite loop and create
45 Message objects for chat messages coming from the service. It should
46 then call `await opsdroid.parse(message)` on those messages.
47
48 As the method should include some kind of `while True` all messages
49 from the chat service should be "awaited" asyncronously to avoid
50 blocking the thread.
51
52 Args:
53 opsdroid (OpsDroid): An instance of the opsdroid core.
54
55 """
56 raise NotImplementedError
57
58 async def respond(self, message):
59 """Send a message back to the chat service.
60
61 The message object will have a `text` property which should be sent
62 back to the chat service. It may also have a `room` and `user` property
63 which gives information on where the message should be directed.
64
65 Args:
66 message (Message): A message received by the connector.
67
68 Returns:
69 bool: True for message successfully sent. False otherwise.
70
71 """
72 raise NotImplementedError
73
[end of opsdroid/connector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/connector.py b/opsdroid/connector.py
--- a/opsdroid/connector.py
+++ b/opsdroid/connector.py
@@ -24,6 +24,7 @@
"""
self.name = ""
self.config = config
+ self.default_room = None
async def connect(self, opsdroid):
"""Connect to chat service.
diff --git a/opsdroid/core.py b/opsdroid/core.py
--- a/opsdroid/core.py
+++ b/opsdroid/core.py
@@ -46,6 +46,18 @@
"""Remove self from existing instances."""
self.__class__.instances = []
+ @property
+ def default_connector(self):
+ """Return the default connector."""
+ default_connector = None
+ for connector in self.connectors:
+ if "default" in connector.config and connector.config["default"]:
+ default_connector = connector
+ break
+ if default_connector is None:
+ default_connector = self.connectors[0]
+ return default_connector
+
def exit(self):
"""Exit application."""
logging.info("Exiting application with return code " +
|
{"golden_diff": "diff --git a/opsdroid/connector.py b/opsdroid/connector.py\n--- a/opsdroid/connector.py\n+++ b/opsdroid/connector.py\n@@ -24,6 +24,7 @@\n \"\"\"\n self.name = \"\"\n self.config = config\n+ self.default_room = None\n \n async def connect(self, opsdroid):\n \"\"\"Connect to chat service.\ndiff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -46,6 +46,18 @@\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n \n+ @property\n+ def default_connector(self):\n+ \"\"\"Return the default connector.\"\"\"\n+ default_connector = None\n+ for connector in self.connectors:\n+ if \"default\" in connector.config and connector.config[\"default\"]:\n+ default_connector = connector\n+ break\n+ if default_connector is None:\n+ default_connector = self.connectors[0]\n+ return default_connector\n+\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n", "issue": "Default connector and default room\nWhen writing a skill which originates from something other than a message (e.g cron #26) the response may need to know which room to post into. \n\nMost chat clients have a default room, like `#general` in Slack. This could be available as a property in the connector so that skills can easily access it. \n\ne.g\n\n``` python\n@non_message_decorator()\ndef myskill(opsdroid):\n for connector in opsdroid.connectors:\n message = Message(\"Message text\", connector.default_room, None, connector)\n connector.respond(message)\n```\n\nIt should also be possible to override the default room in the connector config.\n\n``` yaml\nconnectors:\n slack:\n default-room: \"#random\"\n```\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nimport asyncio\n\nfrom opsdroid.memory import Memory\nfrom opsdroid.connector import Connector\nfrom opsdroid.database import Database\nfrom opsdroid.loader import Loader\nfrom opsdroid.parsers.regex import parse_regex\nfrom opsdroid.parsers.apiai import parse_apiai\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n # pylint: disable=too-many-instance-attributes\n # All are reasonable in this case.\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_tasks = []\n self.eventloop = asyncio.get_event_loop()\n self.skills = []\n self.memory = Memory()\n self.loader = Loader(self)\n self.config = {}\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n if self.eventloop.is_running():\n self.eventloop.stop()\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def load(self):\n \"\"\"Load configuration.\"\"\"\n self.config = self.loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n\n def start_loop(self):\n \"\"\"Start the event loop.\"\"\"\n connectors, databases, skills = self.loader.load_config(self.config)\n if databases is not None:\n self.start_databases(databases)\n self.setup_skills(skills)\n self.start_connector_tasks(connectors)\n try:\n self.eventloop.run_forever()\n except (KeyboardInterrupt, EOFError):\n print('') # Prints a character return for return to shell\n logging.info(\"Keyboard interrupt, exiting.\")\n self.exit()\n\n def setup_skills(self, skills):\n \"\"\"Call the setup function on the passed in skills.\"\"\"\n for skill in skills:\n try:\n skill[\"module\"].setup(self)\n except AttributeError:\n pass\n\n def start_connector_tasks(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n for connector_module in connectors:\n for _, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Connector) and\\\n cls is not Connector:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n\n if len(connectors) > 0:\n for connector in self.connectors:\n self.eventloop.run_until_complete(connector.connect(self))\n for connector in self.connectors:\n task = self.eventloop.create_task(connector.listen(self))\n self.connector_tasks.append(task)\n else:\n self.critical(\"All connectors failed to load\", 1)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.debug(databases)\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \\\n issubclass(cls, Database) and \\\n cls is not Database:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n self.eventloop.run_until_complete(database.connect(self))\n\n async def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n\n await parse_regex(self, message)\n\n if \"parsers\" in self.config and \"apiai\" in self.config[\"parsers\"]:\n await parse_apiai(self, message)\n", "path": "opsdroid/core.py"}, {"content": "\"\"\"A base class for connectors to inherit from.\"\"\"\n\nfrom opsdroid.message import Message # NOQA # pylint: disable=unused-import\n\n\nclass Connector():\n \"\"\"A base connector.\n\n Connectors are used to interact with a given chat service.\n\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Setup the connector.\n\n Set some basic properties from the connector config such as the name\n of this connector and the name the bot should appear with in chat\n service.\n\n Args:\n config (dict): The config for this connector specified in the\n `configuration.yaml` file.\n\n \"\"\"\n self.name = \"\"\n self.config = config\n\n async def connect(self, opsdroid):\n \"\"\"Connect to chat service.\n\n This method should create a connection to the desired chat service.\n It should also be possible to call it multiple times in the event of\n being disconnected.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def listen(self, opsdroid):\n \"\"\"Listen to chat service and parse all messages.\n\n This method should block the thread with an infinite loop and create\n Message objects for chat messages coming from the service. It should\n then call `await opsdroid.parse(message)` on those messages.\n\n As the method should include some kind of `while True` all messages\n from the chat service should be \"awaited\" asyncronously to avoid\n blocking the thread.\n\n Args:\n opsdroid (OpsDroid): An instance of the opsdroid core.\n\n \"\"\"\n raise NotImplementedError\n\n async def respond(self, message):\n \"\"\"Send a message back to the chat service.\n\n The message object will have a `text` property which should be sent\n back to the chat service. It may also have a `room` and `user` property\n which gives information on where the message should be directed.\n\n Args:\n message (Message): A message received by the connector.\n\n Returns:\n bool: True for message successfully sent. False otherwise.\n\n \"\"\"\n raise NotImplementedError\n", "path": "opsdroid/connector.py"}]}
| 2,602 | 265 |
gh_patches_debug_26756
|
rasdani/github-patches
|
git_diff
|
boto__boto-709
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CloudFormation Bug in Parsing Describe Stacks Response
Currently the boto.cloudformation.CloudFormationConnection.describe__stacks() call does _not_ correctly parse the Capabilities section of the XML Response.
This makes the parser assume the Capabilities section is part of the main Stack element. This means that, for example, if a Stack does contain a Capabilities section and an Outputs section, the Outputs section will not correctly parse either, and instead, the Outputs member element will get parsed as top level Stack elements.
This is an issue with the 2.3.0 release.
</issue>
<code>
[start of boto/cloudformation/stack.py]
1 from datetime import datetime
2
3 from boto.resultset import ResultSet
4
5 class Stack:
6 def __init__(self, connection=None):
7 self.connection = connection
8 self.creation_time = None
9 self.description = None
10 self.disable_rollback = None
11 self.notification_arns = []
12 self.outputs = []
13 self.parameters = []
14 self.stack_id = None
15 self.stack_status = None
16 self.stack_name = None
17 self.stack_name_reason = None
18 self.timeout_in_minutes = None
19
20 def startElement(self, name, attrs, connection):
21 if name == "Parameters":
22 self.parameters = ResultSet([('member', Parameter)])
23 return self.parameters
24 elif name == "Outputs":
25 self.outputs = ResultSet([('member', Output)])
26 return self.outputs
27 else:
28 return None
29
30 def endElement(self, name, value, connection):
31 if name == 'CreationTime':
32 self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
33 elif name == "Description":
34 self.description = value
35 elif name == "DisableRollback":
36 self.disable_rollback = bool(value)
37 elif name == "NotificationARNs":
38 self.notification_arns = value
39 elif name == 'StackId':
40 self.stack_id = value
41 elif name == 'StackName':
42 self.stack_name = value
43 elif name == 'StackStatus':
44 self.stack_status = value
45 elif name == "StackStatusReason":
46 self.stack_status_reason = value
47 elif name == "TimeoutInMinutes":
48 self.timeout_in_minutes = int(value)
49 elif name == "member":
50 pass
51 else:
52 setattr(self, name, value)
53
54 def delete(self):
55 return self.connection.delete_stack(stack_name_or_id=self.stack_id)
56
57 def describe_events(self, next_token=None):
58 return self.connection.describe_stack_events(
59 stack_name_or_id=self.stack_id,
60 next_token=next_token
61 )
62
63 def describe_resource(self, logical_resource_id):
64 return self.connection.describe_stack_resource(
65 stack_name_or_id=self.stack_id,
66 logical_resource_id=logical_resource_id
67 )
68
69 def describe_resources(self, logical_resource_id=None,
70 physical_resource_id=None):
71 return self.connection.describe_stack_resources(
72 stack_name_or_id=self.stack_id,
73 logical_resource_id=logical_resource_id,
74 physical_resource_id=physical_resource_id
75 )
76
77 def list_resources(self, next_token=None):
78 return self.connection.list_stack_resources(
79 stack_name_or_id=self.stack_id,
80 next_token=next_token
81 )
82
83 def update(self):
84 rs = self.connection.describe_stacks(self.stack_id)
85 if len(rs) == 1 and rs[0].stack_id == self.stack_id:
86 self.__dict__.update(rs[0].__dict__)
87 else:
88 raise ValueError("%s is not a valid Stack ID or Name" %
89 self.stack_id)
90
91 def get_template(self):
92 return self.connection.get_template(stack_name_or_id=self.stack_id)
93
94 class StackSummary:
95 def __init__(self, connection=None):
96 self.connection = connection
97 self.stack_id = None
98 self.stack_status = None
99 self.stack_name = None
100 self.creation_time = None
101 self.deletion_time = None
102 self.template_description = None
103
104 def startElement(self, name, attrs, connection):
105 return None
106
107 def endElement(self, name, value, connection):
108 if name == 'StackId':
109 self.stack_id = value
110 elif name == 'StackStatus':
111 self.stack_status = value
112 elif name == 'StackName':
113 self.stack_name = value
114 elif name == 'CreationTime':
115 self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
116 elif name == "DeletionTime":
117 self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
118 elif name == 'TemplateDescription':
119 self.template_description = value
120 elif name == "member":
121 pass
122 else:
123 setattr(self, name, value)
124
125 class Parameter:
126 def __init__(self, connection=None):
127 self.connection = None
128 self.key = None
129 self.value = None
130
131 def startElement(self, name, attrs, connection):
132 return None
133
134 def endElement(self, name, value, connection):
135 if name == "ParameterKey":
136 self.key = value
137 elif name == "ParameterValue":
138 self.value = value
139 else:
140 setattr(self, name, value)
141
142 def __repr__(self):
143 return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
144
145 class Output:
146 def __init__(self, connection=None):
147 self.connection = connection
148 self.description = None
149 self.key = None
150 self.value = None
151
152 def startElement(self, name, attrs, connection):
153 return None
154
155 def endElement(self, name, value, connection):
156 if name == "Description":
157 self.description = value
158 elif name == "OutputKey":
159 self.key = value
160 elif name == "OutputValue":
161 self.value = value
162 else:
163 setattr(self, name, value)
164
165 def __repr__(self):
166 return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
167
168 class StackResource:
169 def __init__(self, connection=None):
170 self.connection = connection
171 self.description = None
172 self.logical_resource_id = None
173 self.physical_resource_id = None
174 self.resource_status = None
175 self.resource_status_reason = None
176 self.resource_type = None
177 self.stack_id = None
178 self.stack_name = None
179 self.timestamp = None
180
181 def startElement(self, name, attrs, connection):
182 return None
183
184 def endElement(self, name, value, connection):
185 if name == "Description":
186 self.description = value
187 elif name == "LogicalResourceId":
188 self.logical_resource_id = value
189 elif name == "PhysicalResourceId":
190 self.physical_resource_id = value
191 elif name == "ResourceStatus":
192 self.resource_status = value
193 elif name == "ResourceStatusReason":
194 self.resource_status_reason = value
195 elif name == "ResourceType":
196 self.resource_type = value
197 elif name == "StackId":
198 self.stack_id = value
199 elif name == "StackName":
200 self.stack_name = value
201 elif name == "Timestamp":
202 self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
203 else:
204 setattr(self, name, value)
205
206 def __repr__(self):
207 return "StackResource:%s (%s)" % (self.logical_resource_id,
208 self.resource_type)
209
210 class StackResourceSummary:
211 def __init__(self, connection=None):
212 self.connection = connection
213 self.last_updated_timestamp = None
214 self.logical_resource_id = None
215 self.physical_resource_id = None
216 self.resource_status = None
217 self.resource_status_reason = None
218 self.resource_type = None
219
220 def startElement(self, name, attrs, connection):
221 return None
222
223 def endElement(self, name, value, connection):
224 if name == "LastUpdatedTimestamp":
225 self.last_updated_timestampe = datetime.strptime(value,
226 '%Y-%m-%dT%H:%M:%SZ')
227 elif name == "LogicalResourceId":
228 self.logical_resource_id = value
229 elif name == "PhysicalResourceId":
230 self.physical_resource_id = value
231 elif name == "ResourceStatus":
232 self.resource_status = value
233 elif name == "ResourceStatusReason":
234 self.resource_status_reason = value
235 elif name == "ResourceType":
236 self.resource_type = value
237 else:
238 setattr(self, name, value)
239
240 def __repr__(self):
241 return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
242 self.resource_type)
243
244 class StackEvent:
245 valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
246 "DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
247 def __init__(self, connection=None):
248 self.connection = connection
249 self.event_id = None
250 self.logical_resource_id = None
251 self.physical_resource_id = None
252 self.resource_properties = None
253 self.resource_status = None
254 self.resource_status_reason = None
255 self.resource_type = None
256 self.stack_id = None
257 self.stack_name = None
258 self.timestamp = None
259
260 def startElement(self, name, attrs, connection):
261 return None
262
263 def endElement(self, name, value, connection):
264 if name == "EventId":
265 self.event_id = value
266 elif name == "LogicalResourceId":
267 self.logical_resource_id = value
268 elif name == "PhysicalResourceId":
269 self.physical_resource_id = value
270 elif name == "ResourceProperties":
271 self.resource_properties = value
272 elif name == "ResourceStatus":
273 self.resource_status = value
274 elif name == "ResourceStatusReason":
275 self.resource_status_reason = value
276 elif name == "ResourceType":
277 self.resource_type = value
278 elif name == "StackId":
279 self.stack_id = value
280 elif name == "StackName":
281 self.stack_name = value
282 elif name == "Timestamp":
283 self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
284 else:
285 setattr(self, name, value)
286
287 def __repr__(self):
288 return "StackEvent %s %s %s" % (self.resource_type,
289 self.logical_resource_id, self.resource_status)
290
[end of boto/cloudformation/stack.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py
--- a/boto/cloudformation/stack.py
+++ b/boto/cloudformation/stack.py
@@ -11,6 +11,7 @@
self.notification_arns = []
self.outputs = []
self.parameters = []
+ self.capabilities = []
self.stack_id = None
self.stack_status = None
self.stack_name = None
@@ -24,6 +25,9 @@
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
+ elif name == "Capabilities":
+ self.capabilities = ResultSet([('member', Capability)])
+ return self.capabilities
else:
return None
@@ -165,6 +169,20 @@
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
+class Capability:
+ def __init__(self, connection=None):
+ self.connection = None
+ self.value = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ self.value = value
+
+ def __repr__(self):
+ return "Capability:\"%s\"" % (self.value)
+
class StackResource:
def __init__(self, connection=None):
self.connection = connection
|
{"golden_diff": "diff --git a/boto/cloudformation/stack.py b/boto/cloudformation/stack.py\n--- a/boto/cloudformation/stack.py\n+++ b/boto/cloudformation/stack.py\n@@ -11,6 +11,7 @@\n self.notification_arns = []\n self.outputs = []\n self.parameters = []\n+ self.capabilities = []\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n@@ -24,6 +25,9 @@\n elif name == \"Outputs\":\n self.outputs = ResultSet([('member', Output)])\n return self.outputs\n+ elif name == \"Capabilities\":\n+ self.capabilities = ResultSet([('member', Capability)])\n+ return self.capabilities\n else:\n return None\n \n@@ -165,6 +169,20 @@\n def __repr__(self):\n return \"Output:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n \n+class Capability:\n+ def __init__(self, connection=None):\n+ self.connection = None\n+ self.value = None\n+\n+ def startElement(self, name, attrs, connection):\n+ return None\n+\n+ def endElement(self, name, value, connection):\n+ self.value = value\n+\n+ def __repr__(self):\n+ return \"Capability:\\\"%s\\\"\" % (self.value)\n+\n class StackResource:\n def __init__(self, connection=None):\n self.connection = connection\n", "issue": "CloudFormation Bug in Parsing Describe Stacks Response\nCurrently the boto.cloudformation.CloudFormationConnection.describe__stacks() call does _not_ correctly parse the Capabilities section of the XML Response. \n\nThis makes the parser assume the Capabilities section is part of the main Stack element. This means that, for example, if a Stack does contain a Capabilities section and an Outputs section, the Outputs section will not correctly parse either, and instead, the Outputs member element will get parsed as top level Stack elements.\n\nThis is an issue with the 2.3.0 release.\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom boto.resultset import ResultSet\n\nclass Stack:\n def __init__(self, connection=None):\n self.connection = connection\n self.creation_time = None\n self.description = None\n self.disable_rollback = None\n self.notification_arns = []\n self.outputs = []\n self.parameters = []\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.stack_name_reason = None\n self.timeout_in_minutes = None\n\n def startElement(self, name, attrs, connection):\n if name == \"Parameters\":\n self.parameters = ResultSet([('member', Parameter)])\n return self.parameters\n elif name == \"Outputs\":\n self.outputs = ResultSet([('member', Output)])\n return self.outputs\n else:\n return None\n\n def endElement(self, name, value, connection):\n if name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"Description\":\n self.description = value\n elif name == \"DisableRollback\":\n self.disable_rollback = bool(value)\n elif name == \"NotificationARNs\":\n self.notification_arns = value\n elif name == 'StackId':\n self.stack_id = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == \"StackStatusReason\":\n self.stack_status_reason = value\n elif name == \"TimeoutInMinutes\":\n self.timeout_in_minutes = int(value)\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\n def delete(self):\n return self.connection.delete_stack(stack_name_or_id=self.stack_id)\n\n def describe_events(self, next_token=None):\n return self.connection.describe_stack_events(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def describe_resource(self, logical_resource_id):\n return self.connection.describe_stack_resource(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id\n )\n\n def describe_resources(self, logical_resource_id=None,\n physical_resource_id=None):\n return self.connection.describe_stack_resources(\n stack_name_or_id=self.stack_id,\n logical_resource_id=logical_resource_id,\n physical_resource_id=physical_resource_id\n )\n\n def list_resources(self, next_token=None):\n return self.connection.list_stack_resources(\n stack_name_or_id=self.stack_id,\n next_token=next_token\n )\n\n def update(self):\n rs = self.connection.describe_stacks(self.stack_id)\n if len(rs) == 1 and rs[0].stack_id == self.stack_id:\n self.__dict__.update(rs[0].__dict__)\n else:\n raise ValueError(\"%s is not a valid Stack ID or Name\" %\n self.stack_id)\n\n def get_template(self):\n return self.connection.get_template(stack_name_or_id=self.stack_id)\n\nclass StackSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.stack_id = None\n self.stack_status = None\n self.stack_name = None\n self.creation_time = None\n self.deletion_time = None\n self.template_description = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == 'StackId':\n self.stack_id = value\n elif name == 'StackStatus':\n self.stack_status = value\n elif name == 'StackName':\n self.stack_name = value\n elif name == 'CreationTime':\n self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"DeletionTime\":\n self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n elif name == 'TemplateDescription':\n self.template_description = value\n elif name == \"member\":\n pass\n else:\n setattr(self, name, value)\n\nclass Parameter:\n def __init__(self, connection=None):\n self.connection = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"ParameterKey\":\n self.key = value\n elif name == \"ParameterValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Parameter:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass Output:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.key = None\n self.value = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"OutputKey\":\n self.key = value\n elif name == \"OutputValue\":\n self.value = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"Output:\\\"%s\\\"=\\\"%s\\\"\" % (self.key, self.value)\n\nclass StackResource:\n def __init__(self, connection=None):\n self.connection = connection\n self.description = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"Description\":\n self.description = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResource:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackResourceSummary:\n def __init__(self, connection=None):\n self.connection = connection\n self.last_updated_timestamp = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"LastUpdatedTimestamp\":\n self.last_updated_timestampe = datetime.strptime(value,\n '%Y-%m-%dT%H:%M:%SZ')\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackResourceSummary:%s (%s)\" % (self.logical_resource_id,\n self.resource_type)\n\nclass StackEvent:\n valid_states = (\"CREATE_IN_PROGRESS\", \"CREATE_FAILED\", \"CREATE_COMPLETE\",\n \"DELETE_IN_PROGRESS\", \"DELETE_FAILED\", \"DELETE_COMPLETE\")\n def __init__(self, connection=None):\n self.connection = connection\n self.event_id = None\n self.logical_resource_id = None\n self.physical_resource_id = None\n self.resource_properties = None\n self.resource_status = None\n self.resource_status_reason = None\n self.resource_type = None\n self.stack_id = None\n self.stack_name = None\n self.timestamp = None\n\n def startElement(self, name, attrs, connection):\n return None\n\n def endElement(self, name, value, connection):\n if name == \"EventId\":\n self.event_id = value\n elif name == \"LogicalResourceId\":\n self.logical_resource_id = value\n elif name == \"PhysicalResourceId\":\n self.physical_resource_id = value\n elif name == \"ResourceProperties\":\n self.resource_properties = value\n elif name == \"ResourceStatus\":\n self.resource_status = value\n elif name == \"ResourceStatusReason\":\n self.resource_status_reason = value\n elif name == \"ResourceType\":\n self.resource_type = value\n elif name == \"StackId\":\n self.stack_id = value\n elif name == \"StackName\":\n self.stack_name = value\n elif name == \"Timestamp\":\n self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')\n else:\n setattr(self, name, value)\n\n def __repr__(self):\n return \"StackEvent %s %s %s\" % (self.resource_type,\n self.logical_resource_id, self.resource_status)\n", "path": "boto/cloudformation/stack.py"}]}
| 3,505 | 321 |
gh_patches_debug_12535
|
rasdani/github-patches
|
git_diff
|
conda__conda-6918
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecate 'conda help' in favor of --help CLI flag
With the install on Windows 10:
_platform : win-64 conda version : 4.3.30
conda is private : False conda-env version : 4.3.30
conda-build version : 3.0.22 python version : 3.6.3.final.0
requests version : 2.18.4 root environment : C:\Anaconda3 (writable)_
the command: `conda help install` (or other similar, say, conda help create, etc.) produces the message
`C:\Anaconda3\python.exe: can't open file 'C:\Anaconda3\Scripts\conda': [Errno 2] No such file or directory`
This syntax works under Linux (Ubuntu), but of course, there is no just "conda" file in Windows Anaconda Scripts.
</issue>
<code>
[start of conda/cli/main_help.py]
1 # (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io
2 # All Rights Reserved
3 #
4 # conda is distributed under the terms of the BSD 3-clause license.
5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
6 from __future__ import absolute_import, division, print_function, unicode_literals
7
8 import subprocess
9 import sys
10
11
12 def execute(args, parser):
13 if not args.command:
14 parser.print_help()
15 return
16 subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
17
[end of conda/cli/main_help.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py
--- a/conda/cli/main_help.py
+++ b/conda/cli/main_help.py
@@ -5,7 +5,6 @@
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function, unicode_literals
-import subprocess
import sys
@@ -13,4 +12,7 @@
if not args.command:
parser.print_help()
return
- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover
+ print("ERROR: The 'conda help' command is deprecated.\n"
+ "Instead use 'conda %s --help'." % args.command,
+ file=sys.stderr)
+ return 1
|
{"golden_diff": "diff --git a/conda/cli/main_help.py b/conda/cli/main_help.py\n--- a/conda/cli/main_help.py\n+++ b/conda/cli/main_help.py\n@@ -5,7 +5,6 @@\n # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import subprocess\n import sys\n \n \n@@ -13,4 +12,7 @@\n if not args.command:\n parser.print_help()\n return\n- subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n+ print(\"ERROR: The 'conda help' command is deprecated.\\n\"\n+ \"Instead use 'conda %s --help'.\" % args.command,\n+ file=sys.stderr)\n+ return 1\n", "issue": "deprecate 'conda help' in favor of --help CLI flag\nWith the install on Windows 10:\r\n\r\n _platform : win-64 conda version : 4.3.30\r\n conda is private : False conda-env version : 4.3.30\r\n conda-build version : 3.0.22 python version : 3.6.3.final.0\r\n requests version : 2.18.4 root environment : C:\\Anaconda3 (writable)_\r\n\r\nthe command: `conda help install` (or other similar, say, conda help create, etc.) produces the message\r\n\r\n`C:\\Anaconda3\\python.exe: can't open file 'C:\\Anaconda3\\Scripts\\conda': [Errno 2] No such file or directory`\r\n\r\nThis syntax works under Linux (Ubuntu), but of course, there is no just \"conda\" file in Windows Anaconda Scripts.\n", "before_files": [{"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport subprocess\nimport sys\n\n\ndef execute(args, parser):\n if not args.command:\n parser.print_help()\n return\n subprocess.call([sys.executable, sys.argv[0], args.command, '-h']) # pragma: no cover\n", "path": "conda/cli/main_help.py"}]}
| 910 | 184 |
gh_patches_debug_2373
|
rasdani/github-patches
|
git_diff
|
netbox-community__netbox-14828
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only one event rule triggers for a content type
### Deployment Type
Self-hosted
### NetBox Version
v3.7.0
### Python Version
3.11
### Steps to Reproduce
Setup:
1. Create a webhook: Name = Test, URL = http://127.0.0.1:9000
2. Create event rule 1:
- Name = Rule 1
- Content types = Prefix
- select Updates
- Condition = `{ "and": [{"attr": "status.value", "value": "deprecated"}]}`
- Action type = Webhook
- Webhook = Test
3. Create event rule 2:
- Name = Rule 2
- Content types = Prefix
- select Updates
- Condition = `{ "and": [{"attr": "status.value", "value": "active"}]}`
- Action type = Webhook
- Webhook = Test
4. Start webhook receiver (`python manage.py webhook_receiver`), or observe the webhooks happen in some other way
(Sorry, couldn't figure out the correct condition syntax without using the "and" operator)
Demo:
5. Create a prefix, like 10.1.2.0/24, status = Active (the defaults)
6. Edit the prefix: change its status to **Deprecated**
7. Edit the prefix again: change its status to **Active**
### Expected Behavior
Webhook is run **twice**: first when prefix status was changed to **Deprecated** (step 6), second when changed to **Active** again (step 7).
### Observed Behavior
Webhook is run **only once**, that's in step 6, but not in step 7.
Additionally: If Rule 1 is disabled, and steps 6 and 7 are executed again, now the webhook is run in step 7.
Looks like only the first enabled event rule is run for a specific object type.
</issue>
<code>
[start of netbox/extras/events.py]
1 import logging
2
3 from django.conf import settings
4 from django.contrib.auth import get_user_model
5 from django.contrib.contenttypes.models import ContentType
6 from django.core.exceptions import ObjectDoesNotExist
7 from django.utils import timezone
8 from django.utils.module_loading import import_string
9 from django_rq import get_queue
10
11 from core.models import Job
12 from netbox.config import get_config
13 from netbox.constants import RQ_QUEUE_DEFAULT
14 from netbox.registry import registry
15 from utilities.api import get_serializer_for_model
16 from utilities.rqworker import get_rq_retry
17 from utilities.utils import serialize_object
18 from .choices import *
19 from .models import EventRule, ScriptModule
20
21 logger = logging.getLogger('netbox.events_processor')
22
23
24 def serialize_for_event(instance):
25 """
26 Return a serialized representation of the given instance suitable for use in a queued event.
27 """
28 serializer_class = get_serializer_for_model(instance.__class__)
29 serializer_context = {
30 'request': None,
31 }
32 serializer = serializer_class(instance, context=serializer_context)
33
34 return serializer.data
35
36
37 def get_snapshots(instance, action):
38 snapshots = {
39 'prechange': getattr(instance, '_prechange_snapshot', None),
40 'postchange': None,
41 }
42 if action != ObjectChangeActionChoices.ACTION_DELETE:
43 # Use model's serialize_object() method if defined; fall back to serialize_object() utility function
44 if hasattr(instance, 'serialize_object'):
45 snapshots['postchange'] = instance.serialize_object()
46 else:
47 snapshots['postchange'] = serialize_object(instance)
48
49 return snapshots
50
51
52 def enqueue_object(queue, instance, user, request_id, action):
53 """
54 Enqueue a serialized representation of a created/updated/deleted object for the processing of
55 events once the request has completed.
56 """
57 # Determine whether this type of object supports event rules
58 app_label = instance._meta.app_label
59 model_name = instance._meta.model_name
60 if model_name not in registry['model_features']['event_rules'].get(app_label, []):
61 return
62
63 queue.append({
64 'content_type': ContentType.objects.get_for_model(instance),
65 'object_id': instance.pk,
66 'event': action,
67 'data': serialize_for_event(instance),
68 'snapshots': get_snapshots(instance, action),
69 'username': user.username,
70 'request_id': request_id
71 })
72
73
74 def process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):
75 try:
76 user = get_user_model().objects.get(username=username)
77 except ObjectDoesNotExist:
78 user = None
79
80 for event_rule in event_rules:
81
82 # Evaluate event rule conditions (if any)
83 if not event_rule.eval_conditions(data):
84 return
85
86 # Webhooks
87 if event_rule.action_type == EventRuleActionChoices.WEBHOOK:
88
89 # Select the appropriate RQ queue
90 queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)
91 rq_queue = get_queue(queue_name)
92
93 # Compile the task parameters
94 params = {
95 "event_rule": event_rule,
96 "model_name": model_name,
97 "event": event,
98 "data": data,
99 "snapshots": snapshots,
100 "timestamp": timezone.now().isoformat(),
101 "username": username,
102 "retry": get_rq_retry()
103 }
104 if snapshots:
105 params["snapshots"] = snapshots
106 if request_id:
107 params["request_id"] = request_id
108
109 # Enqueue the task
110 rq_queue.enqueue(
111 "extras.webhooks.send_webhook",
112 **params
113 )
114
115 # Scripts
116 elif event_rule.action_type == EventRuleActionChoices.SCRIPT:
117 # Resolve the script from action parameters
118 script_module = event_rule.action_object
119 script_name = event_rule.action_parameters['script_name']
120 script = script_module.scripts[script_name]()
121
122 # Enqueue a Job to record the script's execution
123 Job.enqueue(
124 "extras.scripts.run_script",
125 instance=script_module,
126 name=script.class_name,
127 user=user,
128 data=data
129 )
130
131 else:
132 raise ValueError(f"Unknown action type for an event rule: {event_rule.action_type}")
133
134
135 def process_event_queue(events):
136 """
137 Flush a list of object representation to RQ for EventRule processing.
138 """
139 events_cache = {
140 'type_create': {},
141 'type_update': {},
142 'type_delete': {},
143 }
144
145 for data in events:
146 action_flag = {
147 ObjectChangeActionChoices.ACTION_CREATE: 'type_create',
148 ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',
149 ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',
150 }[data['event']]
151 content_type = data['content_type']
152
153 # Cache applicable Event Rules
154 if content_type not in events_cache[action_flag]:
155 events_cache[action_flag][content_type] = EventRule.objects.filter(
156 **{action_flag: True},
157 content_types=content_type,
158 enabled=True
159 )
160 event_rules = events_cache[action_flag][content_type]
161
162 process_event_rules(
163 event_rules, content_type.model, data['event'], data['data'], data['username'],
164 snapshots=data['snapshots'], request_id=data['request_id']
165 )
166
167
168 def flush_events(queue):
169 """
170 Flush a list of object representation to RQ for webhook processing.
171 """
172 if queue:
173 for name in settings.EVENTS_PIPELINE:
174 try:
175 func = import_string(name)
176 func(queue)
177 except Exception as e:
178 logger.error(f"Cannot import events pipeline {name} error: {e}")
179
[end of netbox/extras/events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netbox/extras/events.py b/netbox/extras/events.py
--- a/netbox/extras/events.py
+++ b/netbox/extras/events.py
@@ -81,7 +81,7 @@
# Evaluate event rule conditions (if any)
if not event_rule.eval_conditions(data):
- return
+ continue
# Webhooks
if event_rule.action_type == EventRuleActionChoices.WEBHOOK:
|
{"golden_diff": "diff --git a/netbox/extras/events.py b/netbox/extras/events.py\n--- a/netbox/extras/events.py\n+++ b/netbox/extras/events.py\n@@ -81,7 +81,7 @@\n \n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n- return\n+ continue\n \n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n", "issue": "Only one event rule triggers for a content type\n### Deployment Type\r\n\r\nSelf-hosted\r\n\r\n### NetBox Version\r\n\r\nv3.7.0\r\n\r\n### Python Version\r\n\r\n3.11\r\n\r\n### Steps to Reproduce\r\n\r\nSetup:\r\n\r\n1. Create a webhook: Name = Test, URL = http://127.0.0.1:9000\r\n2. Create event rule 1:\r\n - Name = Rule 1\r\n - Content types = Prefix\r\n - select Updates\r\n - Condition = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"deprecated\"}]}`\r\n - Action type = Webhook\r\n - Webhook = Test\r\n3. Create event rule 2:\r\n - Name = Rule 2\r\n - Content types = Prefix\r\n - select Updates\r\n - Condition = `{ \"and\": [{\"attr\": \"status.value\", \"value\": \"active\"}]}`\r\n - Action type = Webhook\r\n - Webhook = Test\r\n4. Start webhook receiver (`python manage.py webhook_receiver`), or observe the webhooks happen in some other way\r\n\r\n(Sorry, couldn't figure out the correct condition syntax without using the \"and\" operator)\r\n\r\nDemo:\r\n\r\n5. Create a prefix, like 10.1.2.0/24, status = Active (the defaults)\r\n6. Edit the prefix: change its status to **Deprecated**\r\n7. Edit the prefix again: change its status to **Active**\r\n\r\n\r\n### Expected Behavior\r\n\r\nWebhook is run **twice**: first when prefix status was changed to **Deprecated** (step 6), second when changed to **Active** again (step 7).\r\n\r\n### Observed Behavior\r\n\r\nWebhook is run **only once**, that's in step 6, but not in step 7.\r\n\r\nAdditionally: If Rule 1 is disabled, and steps 6 and 7 are executed again, now the webhook is run in step 7.\r\n\r\nLooks like only the first enabled event rule is run for a specific object type.\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils import timezone\nfrom django.utils.module_loading import import_string\nfrom django_rq import get_queue\n\nfrom core.models import Job\nfrom netbox.config import get_config\nfrom netbox.constants import RQ_QUEUE_DEFAULT\nfrom netbox.registry import registry\nfrom utilities.api import get_serializer_for_model\nfrom utilities.rqworker import get_rq_retry\nfrom utilities.utils import serialize_object\nfrom .choices import *\nfrom .models import EventRule, ScriptModule\n\nlogger = logging.getLogger('netbox.events_processor')\n\n\ndef serialize_for_event(instance):\n \"\"\"\n Return a serialized representation of the given instance suitable for use in a queued event.\n \"\"\"\n serializer_class = get_serializer_for_model(instance.__class__)\n serializer_context = {\n 'request': None,\n }\n serializer = serializer_class(instance, context=serializer_context)\n\n return serializer.data\n\n\ndef get_snapshots(instance, action):\n snapshots = {\n 'prechange': getattr(instance, '_prechange_snapshot', None),\n 'postchange': None,\n }\n if action != ObjectChangeActionChoices.ACTION_DELETE:\n # Use model's serialize_object() method if defined; fall back to serialize_object() utility function\n if hasattr(instance, 'serialize_object'):\n snapshots['postchange'] = instance.serialize_object()\n else:\n snapshots['postchange'] = serialize_object(instance)\n\n return snapshots\n\n\ndef enqueue_object(queue, instance, user, request_id, action):\n \"\"\"\n Enqueue a serialized representation of a created/updated/deleted object for the processing of\n events once the request has completed.\n \"\"\"\n # Determine whether this type of object supports event rules\n app_label = instance._meta.app_label\n model_name = instance._meta.model_name\n if model_name not in registry['model_features']['event_rules'].get(app_label, []):\n return\n\n queue.append({\n 'content_type': ContentType.objects.get_for_model(instance),\n 'object_id': instance.pk,\n 'event': action,\n 'data': serialize_for_event(instance),\n 'snapshots': get_snapshots(instance, action),\n 'username': user.username,\n 'request_id': request_id\n })\n\n\ndef process_event_rules(event_rules, model_name, event, data, username, snapshots=None, request_id=None):\n try:\n user = get_user_model().objects.get(username=username)\n except ObjectDoesNotExist:\n user = None\n\n for event_rule in event_rules:\n\n # Evaluate event rule conditions (if any)\n if not event_rule.eval_conditions(data):\n return\n\n # Webhooks\n if event_rule.action_type == EventRuleActionChoices.WEBHOOK:\n\n # Select the appropriate RQ queue\n queue_name = get_config().QUEUE_MAPPINGS.get('webhook', RQ_QUEUE_DEFAULT)\n rq_queue = get_queue(queue_name)\n\n # Compile the task parameters\n params = {\n \"event_rule\": event_rule,\n \"model_name\": model_name,\n \"event\": event,\n \"data\": data,\n \"snapshots\": snapshots,\n \"timestamp\": timezone.now().isoformat(),\n \"username\": username,\n \"retry\": get_rq_retry()\n }\n if snapshots:\n params[\"snapshots\"] = snapshots\n if request_id:\n params[\"request_id\"] = request_id\n\n # Enqueue the task\n rq_queue.enqueue(\n \"extras.webhooks.send_webhook\",\n **params\n )\n\n # Scripts\n elif event_rule.action_type == EventRuleActionChoices.SCRIPT:\n # Resolve the script from action parameters\n script_module = event_rule.action_object\n script_name = event_rule.action_parameters['script_name']\n script = script_module.scripts[script_name]()\n\n # Enqueue a Job to record the script's execution\n Job.enqueue(\n \"extras.scripts.run_script\",\n instance=script_module,\n name=script.class_name,\n user=user,\n data=data\n )\n\n else:\n raise ValueError(f\"Unknown action type for an event rule: {event_rule.action_type}\")\n\n\ndef process_event_queue(events):\n \"\"\"\n Flush a list of object representation to RQ for EventRule processing.\n \"\"\"\n events_cache = {\n 'type_create': {},\n 'type_update': {},\n 'type_delete': {},\n }\n\n for data in events:\n action_flag = {\n ObjectChangeActionChoices.ACTION_CREATE: 'type_create',\n ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',\n ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',\n }[data['event']]\n content_type = data['content_type']\n\n # Cache applicable Event Rules\n if content_type not in events_cache[action_flag]:\n events_cache[action_flag][content_type] = EventRule.objects.filter(\n **{action_flag: True},\n content_types=content_type,\n enabled=True\n )\n event_rules = events_cache[action_flag][content_type]\n\n process_event_rules(\n event_rules, content_type.model, data['event'], data['data'], data['username'],\n snapshots=data['snapshots'], request_id=data['request_id']\n )\n\n\ndef flush_events(queue):\n \"\"\"\n Flush a list of object representation to RQ for webhook processing.\n \"\"\"\n if queue:\n for name in settings.EVENTS_PIPELINE:\n try:\n func = import_string(name)\n func(queue)\n except Exception as e:\n logger.error(f\"Cannot import events pipeline {name} error: {e}\")\n", "path": "netbox/extras/events.py"}]}
| 2,590 | 96 |
gh_patches_debug_19502
|
rasdani/github-patches
|
git_diff
|
dask__distributed-7404
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Zstandard compression is not thread safe
**Describe the issue**:
In `compression.py` we create a singleton `ZstdCompressor` and `ZstdDecompressor` object. The docs for the Zstandard library [state](https://github.com/indygreg/python-zstandard/blob/main/zstandard/backend_cffi.py#L1764):
> Unless specified otherwise, assume that no two methods of ``ZstdCompressor`` instances can be called from multiple Python threads simultaneously. In other words, assume instances are not thread safe unless stated otherwise.
**Minimal Complete Verifiable Example**:
This can trivially be reproduced by introducing threading to the `test_maybe_compress` test in `test_protocol.py`
Some errors that I've reproduced range from compression/decompression failure, to seg faults. Examples for searchability:
```
Fatal Python error: Segmentation fault
Thread 0x000000030df25000 (most recent call first):
File "/Users/dwragge/code/distributed/distributed/protocol/compression.py", line 92 in zstd_compress
File "/Users/dwragge/code/distributed/distributed/protocol/compression.py", line 186 in maybe_compress
File "/Users/dwragge/code/distributed/distributed/protocol/tests/test_protocol.py", line 148 in test_compress_decompress
File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 870 in run
File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 932 in _bootstrap_inner
File "/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py", line 890 in _bootstrap
```
- `zstd.ZstdError: cannot compress: Src size is incorrect`
- `zstd.ZstdError: cannot compress: Operation not authorized at current processing stage`
- `zstd.ZstdError: decompression error: Corrupted block detected`
- `zstd.ZstdError: error determining content size from frame header`
- `zstd.ZstdError: decompression error: Unknown frame descriptor`
- `zstd.ZstdError: decompression error: Destination buffer is too small`
This manifests in production as errors such as:
- `distributed.core - ERROR - Exception while handling op get_data`
- `distributed.worker - ERROR - failed during get data with tcp://a.b.c.d:39195 -> tcp://d.e.f.g:43787`
- `distributed.protocol.core - CRITICAL - Failed to deserialize`
Amongst others, including clear lost workers, and `CommClosedError`s.
**Anything else we need to know?**:
The fix is trivial (instantiate a new Zstd(De)Compressor instance for each compression. I will raise a PR to fix plus tests.
</issue>
<code>
[start of distributed/protocol/compression.py]
1 """
2 Record known compressors
3
4 Includes utilities for determining whether or not to compress
5 """
6 from __future__ import annotations
7
8 import logging
9 from collections.abc import Callable
10 from contextlib import suppress
11 from random import randint
12 from typing import Literal
13
14 from packaging.version import parse as parse_version
15 from tlz import identity
16
17 import dask
18
19 from distributed.utils import ensure_memoryview, nbytes, no_default
20
21 compressions: dict[
22 str | None | Literal[False],
23 dict[Literal["compress", "decompress"], Callable[[bytes], bytes]],
24 ] = {None: {"compress": identity, "decompress": identity}}
25
26 compressions[False] = compressions[None] # alias
27
28
29 default_compression = None
30
31
32 logger = logging.getLogger(__name__)
33
34
35 with suppress(ImportError):
36 import zlib
37
38 compressions["zlib"] = {"compress": zlib.compress, "decompress": zlib.decompress}
39
40 with suppress(ImportError):
41 import snappy
42
43 # In python-snappy 0.5.3, support for the Python Buffer Protocol was added.
44 # This is needed to handle other objects (like `memoryview`s) without
45 # copying to `bytes` first.
46 #
47 # Note: `snappy.__version__` doesn't exist in a release yet.
48 # So do a little test that will fail if snappy is not 0.5.3 or later.
49 try:
50 snappy.compress(memoryview(b""))
51 except TypeError:
52 raise ImportError("Need snappy >= 0.5.3")
53
54 compressions["snappy"] = {
55 "compress": snappy.compress,
56 "decompress": snappy.decompress,
57 }
58 default_compression = "snappy"
59
60 with suppress(ImportError):
61 import lz4
62
63 # Required to use `lz4.block` APIs and Python Buffer Protocol support.
64 if parse_version(lz4.__version__) < parse_version("0.23.1"):
65 raise ImportError("Need lz4 >= 0.23.1")
66
67 from lz4.block import compress as lz4_compress
68 from lz4.block import decompress as lz4_decompress
69
70 compressions["lz4"] = {
71 "compress": lz4_compress,
72 "decompress": lz4_decompress,
73 }
74 default_compression = "lz4"
75
76
77 with suppress(ImportError):
78 import zstandard
79
80 # Required for Python Buffer Protocol support.
81 if parse_version(zstandard.__version__) < parse_version("0.9.0"):
82 raise ImportError("Need zstandard >= 0.9.0")
83
84 zstd_compressor = zstandard.ZstdCompressor(
85 level=dask.config.get("distributed.comm.zstd.level"),
86 threads=dask.config.get("distributed.comm.zstd.threads"),
87 )
88
89 zstd_decompressor = zstandard.ZstdDecompressor()
90
91 def zstd_compress(data):
92 return zstd_compressor.compress(data)
93
94 def zstd_decompress(data):
95 return zstd_decompressor.decompress(data)
96
97 compressions["zstd"] = {"compress": zstd_compress, "decompress": zstd_decompress}
98
99
100 def get_default_compression():
101 default = dask.config.get("distributed.comm.compression")
102 if default == "auto":
103 return default_compression
104 if default in compressions:
105 return default
106 raise ValueError(
107 "Default compression '%s' not found.\n"
108 "Choices include auto, %s"
109 % (default, ", ".join(sorted(map(str, compressions))))
110 )
111
112
113 get_default_compression()
114
115
116 def byte_sample(b, size, n):
117 """Sample a bytestring from many locations
118
119 Parameters
120 ----------
121 b : bytes or memoryview
122 size : int
123 target size of each sample to collect
124 (may be smaller if samples collide)
125 n : int
126 number of samples to collect
127 """
128 assert size >= 0 and n >= 0
129 if size == 0 or n == 0:
130 return memoryview(b"")
131
132 b = ensure_memoryview(b)
133
134 parts = n * [None]
135 max_start = b.nbytes - size
136 start = randint(0, max_start)
137 for i in range(n - 1):
138 next_start = randint(0, max_start)
139 end = min(start + size, next_start)
140 parts[i] = b[start:end]
141 start = next_start
142 parts[-1] = b[start : start + size]
143
144 if n == 1:
145 return parts[0]
146 else:
147 return memoryview(b"".join(parts))
148
149
150 def maybe_compress(
151 payload,
152 min_size=10_000,
153 sample_size=10_000,
154 nsamples=5,
155 compression=no_default,
156 ):
157 """
158 Maybe compress payload
159
160 1. We don't compress small messages
161 2. We sample the payload in a few spots, compress that, and if it doesn't
162 do any good we return the original
163 3. We then compress the full original, it it doesn't compress well then we
164 return the original
165 4. We return the compressed result
166 """
167 if compression is no_default:
168 compression = dask.config.get("distributed.comm.compression")
169 if not compression:
170 return None, payload
171 if not (min_size <= nbytes(payload) <= 2**31):
172 # Either too small to bother
173 # or too large (compression libraries often fail)
174 return None, payload
175
176 # Normalize function arguments
177 if compression == "auto":
178 compression = default_compression
179 compress = compressions[compression]["compress"]
180
181 # Take a view of payload for efficient usage
182 mv = ensure_memoryview(payload)
183
184 # Try compressing a sample to see if it compresses well
185 sample = byte_sample(mv, sample_size, nsamples)
186 if len(compress(sample)) <= 0.9 * sample.nbytes:
187 # Try compressing the real thing and check how compressed it is
188 compressed = compress(mv)
189 if len(compressed) <= 0.9 * mv.nbytes:
190 return compression, compressed
191 # Skip compression as the sample or the data didn't compress well
192 return None, payload
193
194
195 def decompress(header, frames):
196 """Decompress frames according to information in the header"""
197 return [
198 compressions[c]["decompress"](frame)
199 for c, frame in zip(header["compression"], frames)
200 ]
201
[end of distributed/protocol/compression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/protocol/compression.py b/distributed/protocol/compression.py
--- a/distributed/protocol/compression.py
+++ b/distributed/protocol/compression.py
@@ -81,17 +81,15 @@
if parse_version(zstandard.__version__) < parse_version("0.9.0"):
raise ImportError("Need zstandard >= 0.9.0")
- zstd_compressor = zstandard.ZstdCompressor(
- level=dask.config.get("distributed.comm.zstd.level"),
- threads=dask.config.get("distributed.comm.zstd.threads"),
- )
-
- zstd_decompressor = zstandard.ZstdDecompressor()
-
def zstd_compress(data):
+ zstd_compressor = zstandard.ZstdCompressor(
+ level=dask.config.get("distributed.comm.zstd.level"),
+ threads=dask.config.get("distributed.comm.zstd.threads"),
+ )
return zstd_compressor.compress(data)
def zstd_decompress(data):
+ zstd_decompressor = zstandard.ZstdDecompressor()
return zstd_decompressor.decompress(data)
compressions["zstd"] = {"compress": zstd_compress, "decompress": zstd_decompress}
|
{"golden_diff": "diff --git a/distributed/protocol/compression.py b/distributed/protocol/compression.py\n--- a/distributed/protocol/compression.py\n+++ b/distributed/protocol/compression.py\n@@ -81,17 +81,15 @@\n if parse_version(zstandard.__version__) < parse_version(\"0.9.0\"):\n raise ImportError(\"Need zstandard >= 0.9.0\")\n \n- zstd_compressor = zstandard.ZstdCompressor(\n- level=dask.config.get(\"distributed.comm.zstd.level\"),\n- threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n- )\n-\n- zstd_decompressor = zstandard.ZstdDecompressor()\n-\n def zstd_compress(data):\n+ zstd_compressor = zstandard.ZstdCompressor(\n+ level=dask.config.get(\"distributed.comm.zstd.level\"),\n+ threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n+ )\n return zstd_compressor.compress(data)\n \n def zstd_decompress(data):\n+ zstd_decompressor = zstandard.ZstdDecompressor()\n return zstd_decompressor.decompress(data)\n \n compressions[\"zstd\"] = {\"compress\": zstd_compress, \"decompress\": zstd_decompress}\n", "issue": "Zstandard compression is not thread safe\n**Describe the issue**:\r\n\r\nIn `compression.py` we create a singleton `ZstdCompressor` and `ZstdDecompressor` object. The docs for the Zstandard library [state](https://github.com/indygreg/python-zstandard/blob/main/zstandard/backend_cffi.py#L1764):\r\n\r\n> Unless specified otherwise, assume that no two methods of ``ZstdCompressor`` instances can be called from multiple Python threads simultaneously. In other words, assume instances are not thread safe unless stated otherwise.\r\n\r\n**Minimal Complete Verifiable Example**:\r\nThis can trivially be reproduced by introducing threading to the `test_maybe_compress` test in `test_protocol.py`\r\n\r\nSome errors that I've reproduced range from compression/decompression failure, to seg faults. Examples for searchability:\r\n\r\n```\r\nFatal Python error: Segmentation fault\r\n\r\nThread 0x000000030df25000 (most recent call first):\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/compression.py\", line 92 in zstd_compress\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/compression.py\", line 186 in maybe_compress\r\n File \"/Users/dwragge/code/distributed/distributed/protocol/tests/test_protocol.py\", line 148 in test_compress_decompress\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 870 in run\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 932 in _bootstrap_inner\r\n File \"/Users/dwragge/miniconda/envs/dask-distributed/lib/python3.8/threading.py\", line 890 in _bootstrap\r\n```\r\n\r\n- `zstd.ZstdError: cannot compress: Src size is incorrect`\r\n- `zstd.ZstdError: cannot compress: Operation not authorized at current processing stage`\r\n- `zstd.ZstdError: decompression error: Corrupted block detected`\r\n- `zstd.ZstdError: error determining content size from frame header`\r\n- `zstd.ZstdError: decompression error: Unknown frame descriptor`\r\n- `zstd.ZstdError: decompression error: Destination buffer is too small`\r\n\r\nThis manifests in production as errors such as:\r\n\r\n- `distributed.core - ERROR - Exception while handling op get_data`\r\n- `distributed.worker - ERROR - failed during get data with tcp://a.b.c.d:39195 -> tcp://d.e.f.g:43787`\r\n- `distributed.protocol.core - CRITICAL - Failed to deserialize`\r\n\r\nAmongst others, including clear lost workers, and `CommClosedError`s.\r\n\r\n**Anything else we need to know?**:\r\n\r\nThe fix is trivial (instantiate a new Zstd(De)Compressor instance for each compression. I will raise a PR to fix plus tests.\n", "before_files": [{"content": "\"\"\"\nRecord known compressors\n\nIncludes utilities for determining whether or not to compress\n\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom collections.abc import Callable\nfrom contextlib import suppress\nfrom random import randint\nfrom typing import Literal\n\nfrom packaging.version import parse as parse_version\nfrom tlz import identity\n\nimport dask\n\nfrom distributed.utils import ensure_memoryview, nbytes, no_default\n\ncompressions: dict[\n str | None | Literal[False],\n dict[Literal[\"compress\", \"decompress\"], Callable[[bytes], bytes]],\n] = {None: {\"compress\": identity, \"decompress\": identity}}\n\ncompressions[False] = compressions[None] # alias\n\n\ndefault_compression = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nwith suppress(ImportError):\n import zlib\n\n compressions[\"zlib\"] = {\"compress\": zlib.compress, \"decompress\": zlib.decompress}\n\nwith suppress(ImportError):\n import snappy\n\n # In python-snappy 0.5.3, support for the Python Buffer Protocol was added.\n # This is needed to handle other objects (like `memoryview`s) without\n # copying to `bytes` first.\n #\n # Note: `snappy.__version__` doesn't exist in a release yet.\n # So do a little test that will fail if snappy is not 0.5.3 or later.\n try:\n snappy.compress(memoryview(b\"\"))\n except TypeError:\n raise ImportError(\"Need snappy >= 0.5.3\")\n\n compressions[\"snappy\"] = {\n \"compress\": snappy.compress,\n \"decompress\": snappy.decompress,\n }\n default_compression = \"snappy\"\n\nwith suppress(ImportError):\n import lz4\n\n # Required to use `lz4.block` APIs and Python Buffer Protocol support.\n if parse_version(lz4.__version__) < parse_version(\"0.23.1\"):\n raise ImportError(\"Need lz4 >= 0.23.1\")\n\n from lz4.block import compress as lz4_compress\n from lz4.block import decompress as lz4_decompress\n\n compressions[\"lz4\"] = {\n \"compress\": lz4_compress,\n \"decompress\": lz4_decompress,\n }\n default_compression = \"lz4\"\n\n\nwith suppress(ImportError):\n import zstandard\n\n # Required for Python Buffer Protocol support.\n if parse_version(zstandard.__version__) < parse_version(\"0.9.0\"):\n raise ImportError(\"Need zstandard >= 0.9.0\")\n\n zstd_compressor = zstandard.ZstdCompressor(\n level=dask.config.get(\"distributed.comm.zstd.level\"),\n threads=dask.config.get(\"distributed.comm.zstd.threads\"),\n )\n\n zstd_decompressor = zstandard.ZstdDecompressor()\n\n def zstd_compress(data):\n return zstd_compressor.compress(data)\n\n def zstd_decompress(data):\n return zstd_decompressor.decompress(data)\n\n compressions[\"zstd\"] = {\"compress\": zstd_compress, \"decompress\": zstd_decompress}\n\n\ndef get_default_compression():\n default = dask.config.get(\"distributed.comm.compression\")\n if default == \"auto\":\n return default_compression\n if default in compressions:\n return default\n raise ValueError(\n \"Default compression '%s' not found.\\n\"\n \"Choices include auto, %s\"\n % (default, \", \".join(sorted(map(str, compressions))))\n )\n\n\nget_default_compression()\n\n\ndef byte_sample(b, size, n):\n \"\"\"Sample a bytestring from many locations\n\n Parameters\n ----------\n b : bytes or memoryview\n size : int\n target size of each sample to collect\n (may be smaller if samples collide)\n n : int\n number of samples to collect\n \"\"\"\n assert size >= 0 and n >= 0\n if size == 0 or n == 0:\n return memoryview(b\"\")\n\n b = ensure_memoryview(b)\n\n parts = n * [None]\n max_start = b.nbytes - size\n start = randint(0, max_start)\n for i in range(n - 1):\n next_start = randint(0, max_start)\n end = min(start + size, next_start)\n parts[i] = b[start:end]\n start = next_start\n parts[-1] = b[start : start + size]\n\n if n == 1:\n return parts[0]\n else:\n return memoryview(b\"\".join(parts))\n\n\ndef maybe_compress(\n payload,\n min_size=10_000,\n sample_size=10_000,\n nsamples=5,\n compression=no_default,\n):\n \"\"\"\n Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if compression is no_default:\n compression = dask.config.get(\"distributed.comm.compression\")\n if not compression:\n return None, payload\n if not (min_size <= nbytes(payload) <= 2**31):\n # Either too small to bother\n # or too large (compression libraries often fail)\n return None, payload\n\n # Normalize function arguments\n if compression == \"auto\":\n compression = default_compression\n compress = compressions[compression][\"compress\"]\n\n # Take a view of payload for efficient usage\n mv = ensure_memoryview(payload)\n\n # Try compressing a sample to see if it compresses well\n sample = byte_sample(mv, sample_size, nsamples)\n if len(compress(sample)) <= 0.9 * sample.nbytes:\n # Try compressing the real thing and check how compressed it is\n compressed = compress(mv)\n if len(compressed) <= 0.9 * mv.nbytes:\n return compression, compressed\n # Skip compression as the sample or the data didn't compress well\n return None, payload\n\n\ndef decompress(header, frames):\n \"\"\"Decompress frames according to information in the header\"\"\"\n return [\n compressions[c][\"decompress\"](frame)\n for c, frame in zip(header[\"compression\"], frames)\n ]\n", "path": "distributed/protocol/compression.py"}]}
| 3,101 | 276 |
gh_patches_debug_19446
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-370
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError in Jinja2 call
I'm fairly certain this is a Jinja2 bug (submitted [here](https://github.com/mitsuhiko/jinja2/issues/396)), but the ultimate impact is that `cookiecutter==0.9.0` is broken for me (on Python 2 and Python 3).
Steps to reproduce:
```
$ pip2 install cookiecutter==0.9.0 && cookiecutter gh:jacebrowning/template-python --no-input
```
```
Traceback (most recent call last):
File "/opt/boxen/homebrew/bin/cookiecutter", line 9, in <module>
load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 169, in main
cookiecutter(args.input_dir, args.checkout, args.no_input)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py", line 100, in cookiecutter
context['cookiecutter'] = prompt_for_config(context, no_input)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/prompt.py", line 29, in prompt_for_config
val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 841, in from_string
return cls.from_code(self, self.compile(source), globals, None)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py", line 542, in compile
source = optimize(source, self)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/optimizer.py", line 27, in optimize
return optimizer.visit(node)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 39, in visit
return self.generic_visit(node, *args, **kwargs)
File "/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py", line 59, in generic_visit
for field, old_value in node.iter_fields():
AttributeError: 'int' object has no attribute 'iter_fields'
```
This still works with `cookiecutter==0.8.0`:
```
$ pip2 install cookiecutter==0.8.0 && cookiecutter gh:jacebrowning/template-python --no-input
```
My environment:
- OS X 10.10.1
- Python 2.7.9
- pip 1.5.6
</issue>
<code>
[start of cookiecutter/compat.py]
1 import os
2 import sys
3
4 PY3 = sys.version_info[0] == 3
5 OLD_PY2 = sys.version_info[:2] < (2, 7)
6
7 if PY3: # pragma: no cover
8 input_str = 'builtins.input'
9 iteritems = lambda d: iter(d.items())
10 from unittest.mock import patch
11 from io import StringIO
12
13 def read_response(prompt=''):
14 """
15 Prompt the user for a response.
16
17 Prints the given prompt (which should be a Unicode string),
18 and returns the text entered by the user as a Unicode string.
19
20 :param prompt: A Unicode string that is presented to the user.
21 """
22 # The Python 3 input function does exactly what we want
23 return input(prompt)
24
25 else: # pragma: no cover
26 from __builtin__ import raw_input
27 input = raw_input
28 input_str = '__builtin__.raw_input'
29 iteritems = lambda d: d.iteritems()
30 from mock import patch
31 from cStringIO import StringIO
32
33 def read_response(prompt=''):
34 """
35 Prompt the user for a response.
36
37 Prints the given prompt (which should be a Unicode string),
38 and returns the text entered by the user as a Unicode string.
39
40 :param prompt: A Unicode string that is presented to the user.
41 """
42 # For Python 2, raw_input takes a byte string argument for the prompt.
43 # This must be encoded using the encoding used by sys.stdout.
44 # The result is a byte string encoding using sys.stdin.encoding.
45 # However, if the program is not being run interactively, sys.stdout
46 # and sys.stdin may not have encoding attributes.
47 # In that case we don't print a prompt (stdin/out isn't interactive,
48 # so prompting is pointless), and we assume the returned data is
49 # encoded using sys.getdefaultencoding(). This may not be right,
50 # but it's likely the best we can do.
51 # Isn't Python 2 encoding support wonderful? :-)
52 if sys.stdout.encoding:
53 prompt = prompt.encode(sys.stdout.encoding)
54 else:
55 prompt = ''
56 enc = sys.stdin.encoding or sys.getdefaultencoding()
57 return raw_input(prompt).decode(enc)
58
59
60 if PY3: # Forced testing
61
62 from shutil import which
63
64 else: # Forced testing
65
66 def is_exe(program):
67 """
68 Returns whether or not a file is an executable.
69 """
70 return os.path.isfile(program) and os.access(program, os.X_OK)
71
72 def which(cmd, mode=os.F_OK | os.X_OK, path=None):
73 """Given a command, mode, and a PATH string, return the path which
74 conforms to the given mode on the PATH, or None if there is no such
75 file.
76 `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
77 of os.environ.get("PATH"), or can be overridden with a custom search
78 path.
79
80 Note: This function was backported from the Python 3 source code.
81 """
82 # Check that a given file can be accessed with the correct mode.
83 # Additionally check that `file` is not a directory, as on Windows
84 # directories pass the os.access check.
85 def _access_check(fn, mode):
86 return (os.path.exists(fn) and os.access(fn, mode)
87 and not os.path.isdir(fn))
88
89 # If we're given a path with a directory part, look it up directly
90 # rather than referring to PATH directories. This includes checking
91 # relative to the current directory, e.g. ./script
92 if os.path.dirname(cmd):
93 if _access_check(cmd, mode):
94 return cmd
95 return None
96
97 if path is None:
98 path = os.environ.get("PATH", os.defpath)
99 if not path:
100 return None
101 path = path.split(os.pathsep)
102
103 if sys.platform == "win32":
104 # The current directory takes precedence on Windows.
105 if os.curdir not in path:
106 path.insert(0, os.curdir)
107
108 # PATHEXT is necessary to check on Windows.
109 pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
110 # See if the given file matches any of the expected path
111 # extensions. This will allow us to short circuit when given
112 # "python.exe". If it does match, only test that one, otherwise we
113 # have to try others.
114 if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
115 files = [cmd]
116 else:
117 files = [cmd + ext for ext in pathext]
118 else:
119 # On other platforms you don't have things like PATHEXT to tell you
120 # what file suffixes are executable, so just pass on cmd as-is.
121 files = [cmd]
122
123 seen = set()
124 for dir in path:
125 normdir = os.path.normcase(dir)
126 if normdir not in seen:
127 seen.add(normdir)
128 for thefile in files:
129 name = os.path.join(dir, thefile)
130 if _access_check(name, mode):
131 return name
132 return None
133
134 _hush_pyflakes = (patch, StringIO, which)
135
[end of cookiecutter/compat.py]
[start of cookiecutter/prompt.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 """
5 cookiecutter.prompt
6 ---------------------
7
8 Functions for prompting the user for project info.
9 """
10
11 from __future__ import unicode_literals
12 import sys
13
14 from .compat import iteritems, read_response
15 from jinja2.environment import Environment
16
17
18 def prompt_for_config(context, no_input=False):
19 """
20 Prompts the user to enter new config, using context as a source for the
21 field names and sample values.
22
23 :param no_input: Prompt the user at command line for manual configuration?
24 """
25 cookiecutter_dict = {}
26 env = Environment()
27
28 for key, raw in iteritems(context['cookiecutter']):
29 val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)
30
31 if not no_input:
32 prompt = '{0} (default is "{1}")? '.format(key, val)
33
34 new_val = read_response(prompt).strip()
35
36 if new_val != '':
37 val = new_val
38
39 cookiecutter_dict[key] = val
40 return cookiecutter_dict
41
42
43 def query_yes_no(question, default='yes'):
44 """
45 Ask a yes/no question via `read_response()` and return their answer.
46
47 :param question: A string that is presented to the user.
48 :param default: The presumed answer if the user just hits <Enter>.
49 It must be "yes" (the default), "no" or None (meaning
50 an answer is required of the user).
51
52 The "answer" return value is one of "yes" or "no".
53
54 Adapted from
55 http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
56 http://code.activestate.com/recipes/577058/
57
58 """
59 valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
60 if default is None:
61 prompt = ' [y/n] '
62 elif default == 'yes':
63 prompt = ' [Y/n] '
64 elif default == 'no':
65 prompt = ' [y/N] '
66 else:
67 raise ValueError('Invalid default answer: "{0}"'.format(default))
68
69 while True:
70 sys.stdout.write(question + prompt)
71 choice = read_response().lower()
72
73 if default is not None and choice == '':
74 return valid[default]
75 elif choice in valid:
76 return valid[choice]
77 else:
78 sys.stdout.write('Please respond with "yes" or "no" '
79 '(or "y" or "n").\n')
80
[end of cookiecutter/prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py
--- a/cookiecutter/compat.py
+++ b/cookiecutter/compat.py
@@ -131,4 +131,10 @@
return name
return None
+
+def is_string(obj):
+ """Determine if an object is a string."""
+ return isinstance(obj, str if PY3 else basestring)
+
+
_hush_pyflakes = (patch, StringIO, which)
diff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py
--- a/cookiecutter/prompt.py
+++ b/cookiecutter/prompt.py
@@ -11,7 +11,7 @@
from __future__ import unicode_literals
import sys
-from .compat import iteritems, read_response
+from .compat import iteritems, read_response, is_string
from jinja2.environment import Environment
@@ -26,6 +26,7 @@
env = Environment()
for key, raw in iteritems(context['cookiecutter']):
+ raw = raw if is_string(raw) else str(raw)
val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)
if not no_input:
|
{"golden_diff": "diff --git a/cookiecutter/compat.py b/cookiecutter/compat.py\n--- a/cookiecutter/compat.py\n+++ b/cookiecutter/compat.py\n@@ -131,4 +131,10 @@\n return name\n return None\n \n+\n+def is_string(obj):\n+ \"\"\"Determine if an object is a string.\"\"\"\n+ return isinstance(obj, str if PY3 else basestring)\n+\n+\n _hush_pyflakes = (patch, StringIO, which)\ndiff --git a/cookiecutter/prompt.py b/cookiecutter/prompt.py\n--- a/cookiecutter/prompt.py\n+++ b/cookiecutter/prompt.py\n@@ -11,7 +11,7 @@\n from __future__ import unicode_literals\n import sys\n \n-from .compat import iteritems, read_response\n+from .compat import iteritems, read_response, is_string\n from jinja2.environment import Environment\n \n \n@@ -26,6 +26,7 @@\n env = Environment()\n \n for key, raw in iteritems(context['cookiecutter']):\n+ raw = raw if is_string(raw) else str(raw)\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n \n if not no_input:\n", "issue": "AttributeError in Jinja2 call\nI'm fairly certain this is a Jinja2 bug (submitted [here](https://github.com/mitsuhiko/jinja2/issues/396)), but the ultimate impact is that `cookiecutter==0.9.0` is broken for me (on Python 2 and Python 3).\n\nSteps to reproduce:\n\n```\n $ pip2 install cookiecutter==0.9.0 && cookiecutter gh:jacebrowning/template-python --no-input\n```\n\n```\nTraceback (most recent call last):\n File \"/opt/boxen/homebrew/bin/cookiecutter\", line 9, in <module>\n load_entry_point('cookiecutter==0.9.0', 'console_scripts', 'cookiecutter')()\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py\", line 169, in main\n cookiecutter(args.input_dir, args.checkout, args.no_input)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/main.py\", line 100, in cookiecutter\n context['cookiecutter'] = prompt_for_config(context, no_input)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/cookiecutter/prompt.py\", line 29, in prompt_for_config\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py\", line 841, in from_string\n return cls.from_code(self, self.compile(source), globals, None)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/environment.py\", line 542, in compile\n source = optimize(source, self)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/optimizer.py\", line 27, in optimize\n return optimizer.visit(node)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py\", line 39, in visit\n return self.generic_visit(node, *args, **kwargs)\n File \"/opt/boxen/homebrew/lib/python2.7/site-packages/jinja2/visitor.py\", line 59, in generic_visit\n for field, old_value in node.iter_fields():\nAttributeError: 'int' object has no attribute 'iter_fields'\n```\n\nThis still works with `cookiecutter==0.8.0`:\n\n```\n $ pip2 install cookiecutter==0.8.0 && cookiecutter gh:jacebrowning/template-python --no-input\n```\n\nMy environment:\n- OS X 10.10.1\n- Python 2.7.9\n- pip 1.5.6\n\n", "before_files": [{"content": "import os\nimport sys\n\nPY3 = sys.version_info[0] == 3\nOLD_PY2 = sys.version_info[:2] < (2, 7)\n\nif PY3: # pragma: no cover\n input_str = 'builtins.input'\n iteritems = lambda d: iter(d.items())\n from unittest.mock import patch\n from io import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # The Python 3 input function does exactly what we want\n return input(prompt)\n\nelse: # pragma: no cover\n from __builtin__ import raw_input\n input = raw_input\n input_str = '__builtin__.raw_input'\n iteritems = lambda d: d.iteritems()\n from mock import patch\n from cStringIO import StringIO\n\n def read_response(prompt=''):\n \"\"\"\n Prompt the user for a response.\n\n Prints the given prompt (which should be a Unicode string),\n and returns the text entered by the user as a Unicode string.\n\n :param prompt: A Unicode string that is presented to the user.\n \"\"\"\n # For Python 2, raw_input takes a byte string argument for the prompt.\n # This must be encoded using the encoding used by sys.stdout.\n # The result is a byte string encoding using sys.stdin.encoding.\n # However, if the program is not being run interactively, sys.stdout\n # and sys.stdin may not have encoding attributes.\n # In that case we don't print a prompt (stdin/out isn't interactive,\n # so prompting is pointless), and we assume the returned data is\n # encoded using sys.getdefaultencoding(). This may not be right,\n # but it's likely the best we can do.\n # Isn't Python 2 encoding support wonderful? :-)\n if sys.stdout.encoding:\n prompt = prompt.encode(sys.stdout.encoding)\n else:\n prompt = ''\n enc = sys.stdin.encoding or sys.getdefaultencoding()\n return raw_input(prompt).decode(enc)\n\n\nif PY3: # Forced testing\n\n from shutil import which\n\nelse: # Forced testing\n\n def is_exe(program):\n \"\"\"\n Returns whether or not a file is an executable.\n \"\"\"\n return os.path.isfile(program) and os.access(program, os.X_OK)\n\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n Note: This function was backported from the Python 3 source code.\n \"\"\"\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None\n\n_hush_pyflakes = (patch, StringIO, which)\n", "path": "cookiecutter/compat.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.prompt\n---------------------\n\nFunctions for prompting the user for project info.\n\"\"\"\n\nfrom __future__ import unicode_literals\nimport sys\n\nfrom .compat import iteritems, read_response\nfrom jinja2.environment import Environment\n\n\ndef prompt_for_config(context, no_input=False):\n \"\"\"\n Prompts the user to enter new config, using context as a source for the\n field names and sample values.\n\n :param no_input: Prompt the user at command line for manual configuration?\n \"\"\"\n cookiecutter_dict = {}\n env = Environment()\n\n for key, raw in iteritems(context['cookiecutter']):\n val = env.from_string(raw).render(cookiecutter=cookiecutter_dict)\n\n if not no_input:\n prompt = '{0} (default is \"{1}\")? '.format(key, val)\n\n new_val = read_response(prompt).strip()\n\n if new_val != '':\n val = new_val\n\n cookiecutter_dict[key] = val\n return cookiecutter_dict\n\n\ndef query_yes_no(question, default='yes'):\n \"\"\"\n Ask a yes/no question via `read_response()` and return their answer.\n\n :param question: A string that is presented to the user.\n :param default: The presumed answer if the user just hits <Enter>.\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n\n Adapted from\n http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input\n http://code.activestate.com/recipes/577058/\n\n \"\"\"\n valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}\n if default is None:\n prompt = ' [y/n] '\n elif default == 'yes':\n prompt = ' [Y/n] '\n elif default == 'no':\n prompt = ' [y/N] '\n else:\n raise ValueError('Invalid default answer: \"{0}\"'.format(default))\n\n while True:\n sys.stdout.write(question + prompt)\n choice = read_response().lower()\n\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write('Please respond with \"yes\" or \"no\" '\n '(or \"y\" or \"n\").\\n')\n", "path": "cookiecutter/prompt.py"}]}
| 3,327 | 279 |
gh_patches_debug_16096
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-799
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve POI list and form
### Motivation
<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->
### Proposed Solution
<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->
List:
- Remove id and version columns
- Sort alphabetically after title in region's default language
Form:
- Add field to not show the location on the map (used for event locations, e.g. multiple instances of the same address but different rooms etc)
- Use the same dynamic slug/link update mechanism like for pages
### Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->
### Additional Context
<!-- Add any other information or screenshots about the feature request here. -->
</issue>
<code>
[start of src/cms/models/pois/poi.py]
1 from django.db import models
2 from django.utils.translation import get_language, ugettext_lazy as _
3
4 from ..regions.region import Region, Language
5 from ...constants import status
6
7
8 class POI(models.Model):
9 """
10 Data model representing a point of interest (POI). It contains all relevant data about its exact position, including
11 coordinates.
12 """
13
14 region = models.ForeignKey(
15 Region,
16 on_delete=models.CASCADE,
17 related_name="pois",
18 verbose_name=_("region"),
19 )
20 address = models.CharField(
21 max_length=250, verbose_name=_("street and house number")
22 )
23 postcode = models.CharField(max_length=10, verbose_name=_("postal code"))
24 city = models.CharField(max_length=250, verbose_name=_("city"))
25 country = models.CharField(max_length=250, verbose_name=_("country"))
26 latitude = models.FloatField(
27 verbose_name=_("latitude"), help_text=_("The latitude coordinate")
28 )
29 longitude = models.FloatField(
30 verbose_name=_("longitude"), help_text=_("The longitude coordinate")
31 )
32 icon = models.ImageField(
33 null=True,
34 blank=True,
35 upload_to="pois/%Y/%m/%d",
36 verbose_name=_("icon"),
37 )
38 archived = models.BooleanField(
39 default=False,
40 verbose_name=_("archived"),
41 help_text=_("Whether or not the location is read-only and hidden in the API."),
42 )
43
44 @property
45 def languages(self):
46 """
47 This property returns a QuerySet of all :class:`~cms.models.languages.language.Language` objects, to which a POI
48 translation exists.
49
50 :return: QuerySet of all :class:`~cms.models.languages.language.Language` a POI is translated into
51 :rtype: ~django.db.models.query.QuerySet [ ~cms.models.languages.language.Language ]
52 """
53 return Language.objects.filter(poi_translations__poi=self)
54
55 def get_translation(self, language_slug):
56 """
57 This function uses the reverse foreign key ``self.translations`` to get all translations of ``self``
58 and filters them to the requested :class:`~cms.models.languages.language.Language` slug.
59
60 :param language_slug: The slug of the desired :class:`~cms.models.languages.language.Language`
61 :type language_slug: str
62
63 :return: The POI translation in the requested :class:`~cms.models.languages.language.Language` or :obj:`None`
64 if no translation exists
65 :rtype: ~cms.models.pois.poi_translation.POITranslation
66 """
67 return self.translations.filter(language__slug=language_slug).first()
68
69 def get_public_translation(self, language_slug):
70 """
71 This function retrieves the newest public translation of a POI.
72
73 :param language_slug: The slug of the requested :class:`~cms.models.languages.language.Language`
74 :type language_slug: str
75
76 :return: The public translation of a POI
77 :rtype: ~cms.models.pois.poi_translation.POITranslation
78 """
79 return self.translations.filter(
80 language__slug=language_slug,
81 status=status.PUBLIC,
82 ).first()
83
84 @property
85 def backend_translation(self):
86 """
87 This function returns the translation of this POI in the current backend language.
88
89 :return: The backend translation of a POI
90 :rtype: ~cms.models.pois.poi_translation.POITranslation
91 """
92 return self.translations.filter(language__slug=get_language()).first()
93
94 @property
95 def default_translation(self):
96 """
97 This function returns the translation of this POI in the region's default language.
98 Since a POI can only be created by creating a translation in the default language, this is guaranteed to return
99 a POI translation.
100
101 :return: The default translation of a POI
102 :rtype: ~cms.models.pois.poi_translation.POITranslation
103 """
104 return self.translations.filter(language=self.region.default_language).first()
105
106 @property
107 def best_translation(self):
108 """
109 This function returns the translation of this POI in the current backend language and if it doesn't exist, it
110 provides a fallback to the translation in the region's default language.
111
112 :return: The "best" translation of a POI for displaying in the backend
113 :rtype: ~cms.models.pois.poi_translation.POITranslation
114 """
115 return self.backend_translation or self.default_translation
116
117 def __str__(self):
118 """
119 This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``POI object (id)``.
120 It is used in the Django admin backend and as label for ModelChoiceFields.
121
122 :return: A readable string representation of the POI
123 :rtype: str
124 """
125 return self.best_translation.title
126
127 def __repr__(self):
128 """
129 This overwrites the default Django ``__repr__()`` method which would return ``<POI: POI object (id)>``.
130 It is used for logging.
131
132 :return: The canonical string representation of the POI
133 :rtype: str
134 """
135 return f"<POI (id: {self.id}, region: {self.region.slug}, slug: {self.best_translation.slug})>"
136
137 class Meta:
138 #: The verbose name of the model
139 verbose_name = _("location")
140 #: The plural verbose name of the model
141 verbose_name_plural = _("locations")
142 #: The default permissions for this model
143 default_permissions = ()
144 #: The custom permissions for this model
145 permissions = (("manage_pois", "Can manage points of interest"),)
146
[end of src/cms/models/pois/poi.py]
[start of src/cms/forms/pois/poi_form.py]
1 import logging
2
3 from ...models import POI
4 from ..custom_model_form import CustomModelForm
5 from ..icon_widget import IconWidget
6
7
8 logger = logging.getLogger(__name__)
9
10
11 class POIForm(CustomModelForm):
12 """
13 Form for creating and modifying POI objects
14 """
15
16 class Meta:
17 """
18 This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`
19 for more information.
20 """
21
22 #: The model of this :class:`django.forms.ModelForm`
23 model = POI
24 #: The fields of the model which should be handled by this form
25 fields = [
26 "address",
27 "postcode",
28 "city",
29 "country",
30 "latitude",
31 "longitude",
32 "icon",
33 ]
34 #: The widgets which are used in this form
35 widgets = {
36 "icon": IconWidget(),
37 }
38
39 def __init__(self, data=None, files=None, instance=None):
40 """
41 Initialize POI form
42
43 :param data: submitted POST data
44 :type data: dict
45
46 :param instance: This form's instance
47 :type instance: ~cms.models.pois.poi.POI
48 """
49
50 # instantiate ModelForm
51 super().__init__(data=data, files=files, instance=instance)
52
53 # If form is disabled because the user has no permissions to edit the page, disable all form fields
54 if instance and instance.archived:
55 for _, field in self.fields.items():
56 field.disabled = True
57
58 # pylint: disable=arguments-differ
59 def save(self, region=None):
60 """
61 This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes
62 which are not directly determined by input fields.
63
64 :param region: The region of this form's POI instance
65 :type region: ~cms.models.regions.region.Region
66
67 :return: The saved POI object
68 :rtype: ~cms.models.pois.poi.POI
69 """
70
71 poi = super().save(commit=False)
72
73 if not self.instance.id:
74 # only update these values when poi is created
75 poi.region = region
76
77 poi.save()
78 return poi
79
[end of src/cms/forms/pois/poi_form.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cms/forms/pois/poi_form.py b/src/cms/forms/pois/poi_form.py
--- a/src/cms/forms/pois/poi_form.py
+++ b/src/cms/forms/pois/poi_form.py
@@ -29,6 +29,7 @@
"country",
"latitude",
"longitude",
+ "location_not_on_map",
"icon",
]
#: The widgets which are used in this form
diff --git a/src/cms/models/pois/poi.py b/src/cms/models/pois/poi.py
--- a/src/cms/models/pois/poi.py
+++ b/src/cms/models/pois/poi.py
@@ -29,6 +29,11 @@
longitude = models.FloatField(
verbose_name=_("longitude"), help_text=_("The longitude coordinate")
)
+ location_not_on_map = models.BooleanField(
+ default=False,
+ verbose_name=_("Do not show this location on map"),
+ help_text=_("Tick if you do not show this location on map"),
+ )
icon = models.ImageField(
null=True,
blank=True,
|
{"golden_diff": "diff --git a/src/cms/forms/pois/poi_form.py b/src/cms/forms/pois/poi_form.py\n--- a/src/cms/forms/pois/poi_form.py\n+++ b/src/cms/forms/pois/poi_form.py\n@@ -29,6 +29,7 @@\n \"country\",\n \"latitude\",\n \"longitude\",\n+ \"location_not_on_map\",\n \"icon\",\n ]\n #: The widgets which are used in this form\ndiff --git a/src/cms/models/pois/poi.py b/src/cms/models/pois/poi.py\n--- a/src/cms/models/pois/poi.py\n+++ b/src/cms/models/pois/poi.py\n@@ -29,6 +29,11 @@\n longitude = models.FloatField(\n verbose_name=_(\"longitude\"), help_text=_(\"The longitude coordinate\")\n )\n+ location_not_on_map = models.BooleanField(\n+ default=False,\n+ verbose_name=_(\"Do not show this location on map\"),\n+ help_text=_(\"Tick if you do not show this location on map\"),\n+ )\n icon = models.ImageField(\n null=True,\n blank=True,\n", "issue": "Improve POI list and form\n### Motivation\r\n<!-- A clear and concise description of what the motivation for the new feature is, and what problem it is solving. -->\r\n\r\n\r\n### Proposed Solution\r\n<!-- A clear and concise description of the feature you would like to add, and how it solves the motivating problem. -->\r\nList:\r\n\r\n- Remove id and version columns\r\n- Sort alphabetically after title in region's default language\r\n\r\nForm:\r\n- Add field to not show the location on the map (used for event locations, e.g. multiple instances of the same address but different rooms etc)\r\n- Use the same dynamic slug/link update mechanism like for pages\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, and why you're proposed solution is better. -->\r\n\r\n\r\n### Additional Context\r\n<!-- Add any other information or screenshots about the feature request here. -->\r\n\r\n\n", "before_files": [{"content": "from django.db import models\nfrom django.utils.translation import get_language, ugettext_lazy as _\n\nfrom ..regions.region import Region, Language\nfrom ...constants import status\n\n\nclass POI(models.Model):\n \"\"\"\n Data model representing a point of interest (POI). It contains all relevant data about its exact position, including\n coordinates.\n \"\"\"\n\n region = models.ForeignKey(\n Region,\n on_delete=models.CASCADE,\n related_name=\"pois\",\n verbose_name=_(\"region\"),\n )\n address = models.CharField(\n max_length=250, verbose_name=_(\"street and house number\")\n )\n postcode = models.CharField(max_length=10, verbose_name=_(\"postal code\"))\n city = models.CharField(max_length=250, verbose_name=_(\"city\"))\n country = models.CharField(max_length=250, verbose_name=_(\"country\"))\n latitude = models.FloatField(\n verbose_name=_(\"latitude\"), help_text=_(\"The latitude coordinate\")\n )\n longitude = models.FloatField(\n verbose_name=_(\"longitude\"), help_text=_(\"The longitude coordinate\")\n )\n icon = models.ImageField(\n null=True,\n blank=True,\n upload_to=\"pois/%Y/%m/%d\",\n verbose_name=_(\"icon\"),\n )\n archived = models.BooleanField(\n default=False,\n verbose_name=_(\"archived\"),\n help_text=_(\"Whether or not the location is read-only and hidden in the API.\"),\n )\n\n @property\n def languages(self):\n \"\"\"\n This property returns a QuerySet of all :class:`~cms.models.languages.language.Language` objects, to which a POI\n translation exists.\n\n :return: QuerySet of all :class:`~cms.models.languages.language.Language` a POI is translated into\n :rtype: ~django.db.models.query.QuerySet [ ~cms.models.languages.language.Language ]\n \"\"\"\n return Language.objects.filter(poi_translations__poi=self)\n\n def get_translation(self, language_slug):\n \"\"\"\n This function uses the reverse foreign key ``self.translations`` to get all translations of ``self``\n and filters them to the requested :class:`~cms.models.languages.language.Language` slug.\n\n :param language_slug: The slug of the desired :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The POI translation in the requested :class:`~cms.models.languages.language.Language` or :obj:`None`\n if no translation exists\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=language_slug).first()\n\n def get_public_translation(self, language_slug):\n \"\"\"\n This function retrieves the newest public translation of a POI.\n\n :param language_slug: The slug of the requested :class:`~cms.models.languages.language.Language`\n :type language_slug: str\n\n :return: The public translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(\n language__slug=language_slug,\n status=status.PUBLIC,\n ).first()\n\n @property\n def backend_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language.\n\n :return: The backend translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language__slug=get_language()).first()\n\n @property\n def default_translation(self):\n \"\"\"\n This function returns the translation of this POI in the region's default language.\n Since a POI can only be created by creating a translation in the default language, this is guaranteed to return\n a POI translation.\n\n :return: The default translation of a POI\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.translations.filter(language=self.region.default_language).first()\n\n @property\n def best_translation(self):\n \"\"\"\n This function returns the translation of this POI in the current backend language and if it doesn't exist, it\n provides a fallback to the translation in the region's default language.\n\n :return: The \"best\" translation of a POI for displaying in the backend\n :rtype: ~cms.models.pois.poi_translation.POITranslation\n \"\"\"\n return self.backend_translation or self.default_translation\n\n def __str__(self):\n \"\"\"\n This overwrites the default Django :meth:`~django.db.models.Model.__str__` method which would return ``POI object (id)``.\n It is used in the Django admin backend and as label for ModelChoiceFields.\n\n :return: A readable string representation of the POI\n :rtype: str\n \"\"\"\n return self.best_translation.title\n\n def __repr__(self):\n \"\"\"\n This overwrites the default Django ``__repr__()`` method which would return ``<POI: POI object (id)>``.\n It is used for logging.\n\n :return: The canonical string representation of the POI\n :rtype: str\n \"\"\"\n return f\"<POI (id: {self.id}, region: {self.region.slug}, slug: {self.best_translation.slug})>\"\n\n class Meta:\n #: The verbose name of the model\n verbose_name = _(\"location\")\n #: The plural verbose name of the model\n verbose_name_plural = _(\"locations\")\n #: The default permissions for this model\n default_permissions = ()\n #: The custom permissions for this model\n permissions = ((\"manage_pois\", \"Can manage points of interest\"),)\n", "path": "src/cms/models/pois/poi.py"}, {"content": "import logging\n\nfrom ...models import POI\nfrom ..custom_model_form import CustomModelForm\nfrom ..icon_widget import IconWidget\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass POIForm(CustomModelForm):\n \"\"\"\n Form for creating and modifying POI objects\n \"\"\"\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = POI\n #: The fields of the model which should be handled by this form\n fields = [\n \"address\",\n \"postcode\",\n \"city\",\n \"country\",\n \"latitude\",\n \"longitude\",\n \"icon\",\n ]\n #: The widgets which are used in this form\n widgets = {\n \"icon\": IconWidget(),\n }\n\n def __init__(self, data=None, files=None, instance=None):\n \"\"\"\n Initialize POI form\n\n :param data: submitted POST data\n :type data: dict\n\n :param instance: This form's instance\n :type instance: ~cms.models.pois.poi.POI\n \"\"\"\n\n # instantiate ModelForm\n super().__init__(data=data, files=files, instance=instance)\n\n # If form is disabled because the user has no permissions to edit the page, disable all form fields\n if instance and instance.archived:\n for _, field in self.fields.items():\n field.disabled = True\n\n # pylint: disable=arguments-differ\n def save(self, region=None):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to set attributes\n which are not directly determined by input fields.\n\n :param region: The region of this form's POI instance\n :type region: ~cms.models.regions.region.Region\n\n :return: The saved POI object\n :rtype: ~cms.models.pois.poi.POI\n \"\"\"\n\n poi = super().save(commit=False)\n\n if not self.instance.id:\n # only update these values when poi is created\n poi.region = region\n\n poi.save()\n return poi\n", "path": "src/cms/forms/pois/poi_form.py"}]}
| 2,905 | 236 |
gh_patches_debug_25586
|
rasdani/github-patches
|
git_diff
|
google-deepmind__optax-54
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prevent creating unnecessary momentum variables
Currently, `optax.sgd` and `optax.noisy_sgd` unconditionally create momentum variables for the parameters, since both rely on `trace`. For `optax.noisy_sgd`, this is unnecessary since `decay` is always `0`. For `optax.sgd`, this is unexpected since `momentum=0` by default (and can be wasteful for large models).
`optax.noisy_sgd` should only require `_scale_by_learning_rate` (with a negation). `optax.sgd` could conditionally add `trace` if `momentum > 0`.
Below are the lines of code I'm referring to:
https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L142-L148
https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L105-L113
And here's where trace automatically creates it's state:
https://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/transform.py#L212-L213
</issue>
<code>
[start of optax/_src/alias.py]
1 # Lint as: python3
2 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 # ==============================================================================
16 """Aliases for popular optimisers."""
17
18 from typing import Union
19
20 import jax.numpy as jnp
21 from optax._src import combine
22 from optax._src import schedule
23 from optax._src import transform
24
25
26 GradientTransformation = transform.GradientTransformation
27 ScalarOrSchedule = Union[float, schedule.Schedule]
28
29
30 def _scale_by_learning_rate(learning_rate: ScalarOrSchedule):
31 if callable(learning_rate):
32 return transform.scale_by_schedule(lambda count: -learning_rate(count))
33 return transform.scale(-learning_rate)
34
35
36 def adabelief(learning_rate: ScalarOrSchedule,
37 b1: float = 0.9,
38 b2: float = 0.999,
39 eps: float = 1e-8) -> GradientTransformation:
40 return combine.chain(
41 transform.scale_by_belief(b1=b1, b2=b2, eps=eps),
42 _scale_by_learning_rate(learning_rate),
43 )
44
45
46 def adagrad(
47 learning_rate: ScalarOrSchedule,
48 initial_accumulator_value: float = 0.1,
49 eps: float = 1e-7) -> GradientTransformation:
50 return combine.chain(
51 transform.scale_by_rss(
52 initial_accumulator_value=initial_accumulator_value, eps=eps),
53 _scale_by_learning_rate(learning_rate),
54 )
55
56
57 def adam(learning_rate: ScalarOrSchedule,
58 b1: float = 0.9,
59 b2: float = 0.999,
60 eps: float = 1e-8,
61 eps_root: float = 0.0) -> GradientTransformation:
62 return combine.chain(
63 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
64 _scale_by_learning_rate(learning_rate),
65 )
66
67
68 def adamw(learning_rate: ScalarOrSchedule,
69 b1: float = 0.9,
70 b2: float = 0.999,
71 eps: float = 1e-8,
72 eps_root: float = 0.0,
73 weight_decay: float = 1e-4) -> GradientTransformation:
74 return combine.chain(
75 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
76 transform.additive_weight_decay(weight_decay),
77 _scale_by_learning_rate(learning_rate),
78 )
79
80
81 def fromage(learning_rate: float,
82 min_norm: float = 1e-6) -> GradientTransformation:
83 mult = 1 / jnp.sqrt(1 + learning_rate ** 2)
84 return combine.chain(
85 transform.scale_by_trust_ratio(min_norm),
86 _scale_by_learning_rate(learning_rate * mult),
87 transform.add_decayed_weights((mult - 1)),
88 )
89
90
91 def lamb(learning_rate: ScalarOrSchedule,
92 b1: float = 0.9,
93 b2: float = 0.999,
94 eps: float = 1e-6,
95 eps_root: float = 0.0,
96 weight_decay: float = 0.) -> GradientTransformation:
97 return combine.chain(
98 transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),
99 transform.add_decayed_weights(weight_decay),
100 transform.scale_by_trust_ratio(),
101 _scale_by_learning_rate(learning_rate),
102 )
103
104
105 def noisy_sgd(learning_rate: ScalarOrSchedule,
106 eta: float = 0.01,
107 gamma: float = 0.55,
108 seed: int = 0) -> GradientTransformation:
109 return combine.chain(
110 transform.trace(decay=0., nesterov=False),
111 _scale_by_learning_rate(learning_rate),
112 transform.add_noise(eta, gamma, seed),
113 )
114
115
116 def radam(learning_rate: ScalarOrSchedule,
117 b1: float = 0.9,
118 b2: float = 0.999,
119 eps: float = 1e-8,
120 threshold: float = 5.0) -> GradientTransformation:
121 return combine.chain(
122 transform.scale_by_radam(b1=b1, b2=b2, eps=eps, threshold=threshold),
123 _scale_by_learning_rate(learning_rate),
124 )
125
126
127 def rmsprop(learning_rate: ScalarOrSchedule,
128 decay: float = 0.9,
129 eps: float = 1e-8,
130 centered: bool = False) -> GradientTransformation:
131 if centered:
132 return combine.chain(
133 transform.scale_by_stddev(decay=decay, eps=eps),
134 _scale_by_learning_rate(learning_rate),
135 )
136 return combine.chain(
137 transform.scale_by_rms(decay=decay, eps=eps),
138 _scale_by_learning_rate(learning_rate),
139 )
140
141
142 def sgd(learning_rate: ScalarOrSchedule,
143 momentum: float = 0.,
144 nesterov: bool = False) -> GradientTransformation:
145 return combine.chain(
146 transform.trace(decay=momentum, nesterov=nesterov),
147 _scale_by_learning_rate(learning_rate),
148 )
149
150
151 def yogi(learning_rate: ScalarOrSchedule,
152 b1: float = 0.9,
153 b2: float = 0.999,
154 eps: float = 1e-3) -> GradientTransformation:
155 return combine.chain(
156 transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),
157 _scale_by_learning_rate(learning_rate),
158 )
159
[end of optax/_src/alias.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optax/_src/alias.py b/optax/_src/alias.py
--- a/optax/_src/alias.py
+++ b/optax/_src/alias.py
@@ -15,7 +15,7 @@
# ==============================================================================
"""Aliases for popular optimisers."""
-from typing import Union
+from typing import Union, Optional
import jax.numpy as jnp
from optax._src import combine
@@ -107,7 +107,6 @@
gamma: float = 0.55,
seed: int = 0) -> GradientTransformation:
return combine.chain(
- transform.trace(decay=0., nesterov=False),
_scale_by_learning_rate(learning_rate),
transform.add_noise(eta, gamma, seed),
)
@@ -140,11 +139,12 @@
def sgd(learning_rate: ScalarOrSchedule,
- momentum: float = 0.,
+ momentum: Optional[float] = None,
nesterov: bool = False) -> GradientTransformation:
return combine.chain(
- transform.trace(decay=momentum, nesterov=nesterov),
- _scale_by_learning_rate(learning_rate),
+ (transform.trace(decay=momentum, nesterov=nesterov)
+ if momentum is not None else transform.identity()),
+ _scale_by_learning_rate(learning_rate)
)
|
{"golden_diff": "diff --git a/optax/_src/alias.py b/optax/_src/alias.py\n--- a/optax/_src/alias.py\n+++ b/optax/_src/alias.py\n@@ -15,7 +15,7 @@\n # ==============================================================================\n \"\"\"Aliases for popular optimisers.\"\"\"\n \n-from typing import Union\n+from typing import Union, Optional\n \n import jax.numpy as jnp\n from optax._src import combine\n@@ -107,7 +107,6 @@\n gamma: float = 0.55,\n seed: int = 0) -> GradientTransformation:\n return combine.chain(\n- transform.trace(decay=0., nesterov=False),\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n@@ -140,11 +139,12 @@\n \n \n def sgd(learning_rate: ScalarOrSchedule,\n- momentum: float = 0.,\n+ momentum: Optional[float] = None,\n nesterov: bool = False) -> GradientTransformation:\n return combine.chain(\n- transform.trace(decay=momentum, nesterov=nesterov),\n- _scale_by_learning_rate(learning_rate),\n+ (transform.trace(decay=momentum, nesterov=nesterov)\n+ if momentum is not None else transform.identity()),\n+ _scale_by_learning_rate(learning_rate)\n )\n", "issue": "Prevent creating unnecessary momentum variables\nCurrently, `optax.sgd` and `optax.noisy_sgd` unconditionally create momentum variables for the parameters, since both rely on `trace`. For `optax.noisy_sgd`, this is unnecessary since `decay` is always `0`. For `optax.sgd`, this is unexpected since `momentum=0` by default (and can be wasteful for large models). \r\n\r\n`optax.noisy_sgd` should only require `_scale_by_learning_rate` (with a negation). `optax.sgd` could conditionally add `trace` if `momentum > 0`. \r\n\r\nBelow are the lines of code I'm referring to:\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L142-L148\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/alias.py#L105-L113\r\n\r\nAnd here's where trace automatically creates it's state:\r\n\r\nhttps://github.com/deepmind/optax/blob/ba0bc11d172054d65b4387ecae840c04e2bc7035/optax/_src/transform.py#L212-L213\n", "before_files": [{"content": "# Lint as: python3\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Aliases for popular optimisers.\"\"\"\n\nfrom typing import Union\n\nimport jax.numpy as jnp\nfrom optax._src import combine\nfrom optax._src import schedule\nfrom optax._src import transform\n\n\nGradientTransformation = transform.GradientTransformation\nScalarOrSchedule = Union[float, schedule.Schedule]\n\n\ndef _scale_by_learning_rate(learning_rate: ScalarOrSchedule):\n if callable(learning_rate):\n return transform.scale_by_schedule(lambda count: -learning_rate(count))\n return transform.scale(-learning_rate)\n\n\ndef adabelief(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_belief(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adagrad(\n learning_rate: ScalarOrSchedule,\n initial_accumulator_value: float = 0.1,\n eps: float = 1e-7) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_rss(\n initial_accumulator_value=initial_accumulator_value, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef adamw(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n eps_root: float = 0.0,\n weight_decay: float = 1e-4) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.additive_weight_decay(weight_decay),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef fromage(learning_rate: float,\n min_norm: float = 1e-6) -> GradientTransformation:\n mult = 1 / jnp.sqrt(1 + learning_rate ** 2)\n return combine.chain(\n transform.scale_by_trust_ratio(min_norm),\n _scale_by_learning_rate(learning_rate * mult),\n transform.add_decayed_weights((mult - 1)),\n )\n\n\ndef lamb(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-6,\n eps_root: float = 0.0,\n weight_decay: float = 0.) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_adam(b1=b1, b2=b2, eps=eps, eps_root=eps_root),\n transform.add_decayed_weights(weight_decay),\n transform.scale_by_trust_ratio(),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef noisy_sgd(learning_rate: ScalarOrSchedule,\n eta: float = 0.01,\n gamma: float = 0.55,\n seed: int = 0) -> GradientTransformation:\n return combine.chain(\n transform.trace(decay=0., nesterov=False),\n _scale_by_learning_rate(learning_rate),\n transform.add_noise(eta, gamma, seed),\n )\n\n\ndef radam(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-8,\n threshold: float = 5.0) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_radam(b1=b1, b2=b2, eps=eps, threshold=threshold),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef rmsprop(learning_rate: ScalarOrSchedule,\n decay: float = 0.9,\n eps: float = 1e-8,\n centered: bool = False) -> GradientTransformation:\n if centered:\n return combine.chain(\n transform.scale_by_stddev(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n return combine.chain(\n transform.scale_by_rms(decay=decay, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef sgd(learning_rate: ScalarOrSchedule,\n momentum: float = 0.,\n nesterov: bool = False) -> GradientTransformation:\n return combine.chain(\n transform.trace(decay=momentum, nesterov=nesterov),\n _scale_by_learning_rate(learning_rate),\n )\n\n\ndef yogi(learning_rate: ScalarOrSchedule,\n b1: float = 0.9,\n b2: float = 0.999,\n eps: float = 1e-3) -> GradientTransformation:\n return combine.chain(\n transform.scale_by_yogi(b1=b1, b2=b2, eps=eps),\n _scale_by_learning_rate(learning_rate),\n )\n", "path": "optax/_src/alias.py"}]}
| 2,593 | 304 |
gh_patches_debug_24251
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-7114
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When the -c option is used when starting NVDA user configuration can not be saved
Steps to reproduce:
- Open a command prompt and cd to the nvda.exe directory.
- start nvda with `nvda.exe -c "C:\myTemp"
- open the nvda menu and select "save configuration"
An error is shown "Could not save configuration - probably read only file system".
Regression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.
</issue>
<code>
[start of source/fileUtils.py]
1 #fileUtils.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2017 NV Access Limited, Bram Duvigneau
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6 import os
7 import ctypes
8 from contextlib import contextmanager
9 from tempfile import NamedTemporaryFile
10 from logHandler import log
11
12 #: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.
13 MOVEFILE_REPLACE_EXISTING = 1
14
15 @contextmanager
16 def FaultTolerantFile(name):
17 '''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the
18 file `name' when the context manager scope ends and the the context manager __exit__ is called. This
19 means writing out the complete file can be performed with less concern of corrupting the original file
20 if the process is interrupted by windows shutting down.
21
22 Usage:
23 with FaultTolerantFile("myFile.txt") as f:
24 f.write("This is a test")
25
26 This creates a temporary file, and the writes actually happen on this temp file. At the end of the
27 `with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
28 '''
29 dirpath, filename = os.path.split(name)
30 with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
31 log.debug(f.name)
32 yield f
33 f.flush()
34 os.fsync(f)
35 f.close()
36 moveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)
37 if moveFileResult == 0:
38 raise ctypes.WinError()
39
[end of source/fileUtils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/source/fileUtils.py b/source/fileUtils.py
--- a/source/fileUtils.py
+++ b/source/fileUtils.py
@@ -18,6 +18,7 @@
file `name' when the context manager scope ends and the the context manager __exit__ is called. This
means writing out the complete file can be performed with less concern of corrupting the original file
if the process is interrupted by windows shutting down.
+ `name` must be unicode.
Usage:
with FaultTolerantFile("myFile.txt") as f:
@@ -26,6 +27,8 @@
This creates a temporary file, and the writes actually happen on this temp file. At the end of the
`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces "myFile.txt"
'''
+ if not isinstance(name, unicode):
+ raise TypeError("name must be unicode")
dirpath, filename = os.path.split(name)
with NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:
log.debug(f.name)
|
{"golden_diff": "diff --git a/source/fileUtils.py b/source/fileUtils.py\n--- a/source/fileUtils.py\n+++ b/source/fileUtils.py\n@@ -18,6 +18,7 @@\n \tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n \tmeans writing out the complete file can be performed with less concern of corrupting the original file\n \tif the process is interrupted by windows shutting down.\n+\t`name` must be unicode.\n \n \tUsage:\n \t\twith FaultTolerantFile(\"myFile.txt\") as f:\n@@ -26,6 +27,8 @@\n \tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n \t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n \t'''\n+\tif not isinstance(name, unicode):\n+\t\traise TypeError(\"name must be unicode\")\n \tdirpath, filename = os.path.split(name)\n \twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n \t\tlog.debug(f.name)\n", "issue": "When the -c option is used when starting NVDA user configuration can not be saved\nSteps to reproduce:\r\n\r\n- Open a command prompt and cd to the nvda.exe directory.\r\n- start nvda with `nvda.exe -c \"C:\\myTemp\"\r\n- open the nvda menu and select \"save configuration\"\r\n\r\nAn error is shown \"Could not save configuration - probably read only file system\".\r\n\r\nRegression caused by atomic rename/write fix. Config path is unicode without `-c`, but when `-c` is passed, the config path is ANSI. fileUtils assumes path is always Unicode.\n", "before_files": [{"content": "#fileUtils.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2017 NV Access Limited, Bram Duvigneau\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\nimport os\nimport ctypes\nfrom contextlib import contextmanager\nfrom tempfile import NamedTemporaryFile\nfrom logHandler import log\n\n#: Constant; flag for MoveFileEx(). If a file with the destination filename already exists, it is overwritten.\nMOVEFILE_REPLACE_EXISTING = 1\n\n@contextmanager\ndef FaultTolerantFile(name):\n\t'''Used to write out files in a more fault tolerant way. A temporary file is used, and replaces the \n\tfile `name' when the context manager scope ends and the the context manager __exit__ is called. This\n\tmeans writing out the complete file can be performed with less concern of corrupting the original file\n\tif the process is interrupted by windows shutting down.\n\n\tUsage:\n\t\twith FaultTolerantFile(\"myFile.txt\") as f:\n\t\t\tf.write(\"This is a test\")\n\n\tThis creates a temporary file, and the writes actually happen on this temp file. At the end of the \n\t`with` block, when `f` goes out of context the temporary file is closed and, this temporary file replaces \"myFile.txt\"\n\t'''\n\tdirpath, filename = os.path.split(name)\n\twith NamedTemporaryFile(dir=dirpath, prefix=filename, suffix='.tmp', delete=False) as f:\n\t\tlog.debug(f.name)\n\t\tyield f\n\t\tf.flush()\n\t\tos.fsync(f)\n\t\tf.close()\n\t\tmoveFileResult = ctypes.windll.kernel32.MoveFileExW(f.name, name, MOVEFILE_REPLACE_EXISTING)\n\t\tif moveFileResult == 0:\n\t\t\traise ctypes.WinError()\n", "path": "source/fileUtils.py"}]}
| 1,120 | 253 |
gh_patches_debug_25184
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-6688
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search related events by event ID

Right now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.
We should also add a column for event ID here.
Refs #3147
</issue>
<code>
[start of src/sentry/api/endpoints/group_events.py]
1 from __future__ import absolute_import
2
3 import six
4
5 from sentry import tagstore
6 from sentry.api.base import DocSection, EnvironmentMixin
7 from sentry.api.bases import GroupEndpoint
8 from sentry.api.serializers import serialize
9 from sentry.api.paginator import DateTimePaginator
10 from sentry.models import Environment, Event, Group
11 from sentry.search.utils import parse_query
12 from sentry.utils.apidocs import scenario, attach_scenarios
13 from rest_framework.response import Response
14 from sentry.search.utils import InvalidQuery
15
16
17 @scenario('ListAvailableSamples')
18 def list_available_samples_scenario(runner):
19 group = Group.objects.filter(project=runner.default_project).first()
20 runner.request(method='GET', path='/issues/%s/events/' % group.id)
21
22
23 class GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):
24 doc_section = DocSection.EVENTS
25
26 @attach_scenarios([list_available_samples_scenario])
27 def get(self, request, group):
28 """
29 List an Issue's Events
30 ``````````````````````
31
32 This endpoint lists an issue's events.
33
34 :pparam string issue_id: the ID of the issue to retrieve.
35 :auth: required
36 """
37
38 events = Event.objects.filter(
39 group_id=group.id,
40 )
41
42 query = request.GET.get('query')
43 if query:
44 try:
45 query_kwargs = parse_query(group.project, query, request.user)
46 except InvalidQuery as exc:
47 return Response({'detail': six.text_type(exc)}, status=400)
48
49 if query_kwargs['query']:
50 events = events.filter(
51 message__icontains=query_kwargs['query'],
52 )
53
54 if query_kwargs['tags']:
55 try:
56 environment_id = self._get_environment_id_from_request(
57 request, group.project.organization_id)
58 except Environment.DoesNotExist:
59 event_ids = []
60 else:
61 event_ids = tagstore.get_group_event_ids(
62 group.project_id, group.id, environment_id, query_kwargs['tags'])
63
64 if event_ids:
65 events = events.filter(
66 id__in=event_ids,
67 )
68 else:
69 events = events.none()
70
71 return self.paginate(
72 request=request,
73 queryset=events,
74 order_by='-datetime',
75 on_results=lambda x: serialize(x, request.user),
76 paginator_cls=DateTimePaginator,
77 )
78
[end of src/sentry/api/endpoints/group_events.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py
--- a/src/sentry/api/endpoints/group_events.py
+++ b/src/sentry/api/endpoints/group_events.py
@@ -12,6 +12,7 @@
from sentry.utils.apidocs import scenario, attach_scenarios
from rest_framework.response import Response
from sentry.search.utils import InvalidQuery
+from django.db.models import Q
@scenario('ListAvailableSamples')
@@ -40,6 +41,7 @@
)
query = request.GET.get('query')
+
if query:
try:
query_kwargs = parse_query(group.project, query, request.user)
@@ -47,9 +49,12 @@
return Response({'detail': six.text_type(exc)}, status=400)
if query_kwargs['query']:
- events = events.filter(
- message__icontains=query_kwargs['query'],
- )
+ q = Q(message__icontains=query_kwargs['query'])
+
+ if len(query) == 32:
+ q |= Q(event_id__exact=query_kwargs['query'])
+
+ events = events.filter(q)
if query_kwargs['tags']:
try:
|
{"golden_diff": "diff --git a/src/sentry/api/endpoints/group_events.py b/src/sentry/api/endpoints/group_events.py\n--- a/src/sentry/api/endpoints/group_events.py\n+++ b/src/sentry/api/endpoints/group_events.py\n@@ -12,6 +12,7 @@\n from sentry.utils.apidocs import scenario, attach_scenarios\n from rest_framework.response import Response\n from sentry.search.utils import InvalidQuery\n+from django.db.models import Q\n \n \n @scenario('ListAvailableSamples')\n@@ -40,6 +41,7 @@\n )\n \n query = request.GET.get('query')\n+\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n@@ -47,9 +49,12 @@\n return Response({'detail': six.text_type(exc)}, status=400)\n \n if query_kwargs['query']:\n- events = events.filter(\n- message__icontains=query_kwargs['query'],\n- )\n+ q = Q(message__icontains=query_kwargs['query'])\n+\n+ if len(query) == 32:\n+ q |= Q(event_id__exact=query_kwargs['query'])\n+\n+ events = events.filter(q)\n \n if query_kwargs['tags']:\n try:\n", "issue": "Search related events by event ID\n\r\n\r\n\r\nRight now you can search [by message or tags](https://github.com/getsentry/sentry/blob/master/src/sentry/api/endpoints/group_events.py#L104), but not individual event ID. This is useful when trying to locate a specific event. More useful now that sampling has been disabled by default.\r\n\r\nWe should also add a column for event ID here.\r\n\r\nRefs #3147\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport six\n\nfrom sentry import tagstore\nfrom sentry.api.base import DocSection, EnvironmentMixin\nfrom sentry.api.bases import GroupEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.api.paginator import DateTimePaginator\nfrom sentry.models import Environment, Event, Group\nfrom sentry.search.utils import parse_query\nfrom sentry.utils.apidocs import scenario, attach_scenarios\nfrom rest_framework.response import Response\nfrom sentry.search.utils import InvalidQuery\n\n\n@scenario('ListAvailableSamples')\ndef list_available_samples_scenario(runner):\n group = Group.objects.filter(project=runner.default_project).first()\n runner.request(method='GET', path='/issues/%s/events/' % group.id)\n\n\nclass GroupEventsEndpoint(GroupEndpoint, EnvironmentMixin):\n doc_section = DocSection.EVENTS\n\n @attach_scenarios([list_available_samples_scenario])\n def get(self, request, group):\n \"\"\"\n List an Issue's Events\n ``````````````````````\n\n This endpoint lists an issue's events.\n\n :pparam string issue_id: the ID of the issue to retrieve.\n :auth: required\n \"\"\"\n\n events = Event.objects.filter(\n group_id=group.id,\n )\n\n query = request.GET.get('query')\n if query:\n try:\n query_kwargs = parse_query(group.project, query, request.user)\n except InvalidQuery as exc:\n return Response({'detail': six.text_type(exc)}, status=400)\n\n if query_kwargs['query']:\n events = events.filter(\n message__icontains=query_kwargs['query'],\n )\n\n if query_kwargs['tags']:\n try:\n environment_id = self._get_environment_id_from_request(\n request, group.project.organization_id)\n except Environment.DoesNotExist:\n event_ids = []\n else:\n event_ids = tagstore.get_group_event_ids(\n group.project_id, group.id, environment_id, query_kwargs['tags'])\n\n if event_ids:\n events = events.filter(\n id__in=event_ids,\n )\n else:\n events = events.none()\n\n return self.paginate(\n request=request,\n queryset=events,\n order_by='-datetime',\n on_results=lambda x: serialize(x, request.user),\n paginator_cls=DateTimePaginator,\n )\n", "path": "src/sentry/api/endpoints/group_events.py"}]}
| 1,337 | 269 |
gh_patches_debug_38866
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-1480
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make sure Resources follow semantic conventions
From [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md).
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This package implements `OpenTelemetry Resources
17 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#resource-sdk>`_:
18
19 *A Resource is an immutable representation of the entity producing
20 telemetry. For example, a process producing telemetry that is running in
21 a container on Kubernetes has a Pod name, it is in a namespace and
22 possibly is part of a Deployment which also has a name. All three of
23 these attributes can be included in the Resource.*
24
25 Resource objects are created with `Resource.create`, which accepts attributes
26 (key-values). Resource attributes can also be passed at process invocation in
27 the :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should
28 register your resource with the `opentelemetry.sdk.metrics.MeterProvider` and
29 `opentelemetry.sdk.trace.TracerProvider` by passing them into their
30 constructors. The `Resource` passed to a provider is available to the
31 exporter, which can send on this information as it sees fit.
32
33 .. code-block:: python
34
35 metrics.set_meter_provider(
36 MeterProvider(
37 resource=Resource.create({
38 "service.name": "shoppingcart",
39 "service.instance.id": "instance-12",
40 }),
41 ),
42 )
43 print(metrics.get_meter_provider().resource.attributes)
44
45 {'telemetry.sdk.language': 'python',
46 'telemetry.sdk.name': 'opentelemetry',
47 'telemetry.sdk.version': '0.13.dev0',
48 'service.name': 'shoppingcart',
49 'service.instance.id': 'instance-12'}
50
51 Note that the OpenTelemetry project documents certain `"standard attributes"
52 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md>`_
53 that have prescribed semantic meanings, for example ``service.name`` in the
54 above example.
55
56 .. envvar:: OTEL_RESOURCE_ATTRIBUTES
57
58 The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource
59 attributes to be passed to the SDK at process invocation. The attributes from
60 :envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to
61 `Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*
62 priority. Attributes should be in the format ``key1=value1,key2=value2``.
63 Additional details are available `in the specification
64 <https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.
65
66 .. code-block:: console
67
68 $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF
69 import pprint
70 from opentelemetry.sdk.resources import Resource
71 pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes)
72 EOF
73 {'service.name': 'shoppingcard',
74 'telemetry.sdk.language': 'python',
75 'telemetry.sdk.name': 'opentelemetry',
76 'telemetry.sdk.version': '0.13.dev0',
77 'will_be_overridden': 'bar'}
78 """
79
80 import abc
81 import concurrent.futures
82 import logging
83 import os
84 import typing
85 from json import dumps
86
87 import pkg_resources
88
89 LabelValue = typing.Union[str, bool, int, float]
90 Attributes = typing.Dict[str, LabelValue]
91 logger = logging.getLogger(__name__)
92
93
94 TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language"
95 TELEMETRY_SDK_NAME = "telemetry.sdk.name"
96 TELEMETRY_SDK_VERSION = "telemetry.sdk.version"
97
98 OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
99 "opentelemetry-sdk"
100 ).version
101 OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES"
102
103
104 class Resource:
105 def __init__(self, attributes: Attributes):
106 self._attributes = attributes.copy()
107
108 @staticmethod
109 def create(attributes: typing.Optional[Attributes] = None) -> "Resource":
110 if not attributes:
111 resource = _DEFAULT_RESOURCE
112 else:
113 resource = _DEFAULT_RESOURCE.merge(Resource(attributes))
114 return resource.merge(OTELResourceDetector().detect())
115
116 @staticmethod
117 def create_empty() -> "Resource":
118 return _EMPTY_RESOURCE
119
120 @property
121 def attributes(self) -> Attributes:
122 return self._attributes.copy()
123
124 def merge(self, other: "Resource") -> "Resource":
125 merged_attributes = self.attributes
126 # pylint: disable=protected-access
127 for key, value in other._attributes.items():
128 if key not in merged_attributes or merged_attributes[key] == "":
129 merged_attributes[key] = value
130 return Resource(merged_attributes)
131
132 def __eq__(self, other: object) -> bool:
133 if not isinstance(other, Resource):
134 return False
135 return self._attributes == other._attributes
136
137 def __hash__(self):
138 return hash(dumps(self._attributes, sort_keys=True))
139
140
141 _EMPTY_RESOURCE = Resource({})
142 _DEFAULT_RESOURCE = Resource(
143 {
144 TELEMETRY_SDK_LANGUAGE: "python",
145 TELEMETRY_SDK_NAME: "opentelemetry",
146 TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,
147 }
148 )
149
150
151 class ResourceDetector(abc.ABC):
152 def __init__(self, raise_on_error=False):
153 self.raise_on_error = raise_on_error
154
155 @abc.abstractmethod
156 def detect(self) -> "Resource":
157 raise NotImplementedError()
158
159
160 class OTELResourceDetector(ResourceDetector):
161 # pylint: disable=no-self-use
162 def detect(self) -> "Resource":
163 env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)
164 env_resource_map = {}
165 if env_resources_items:
166 env_resource_map = {
167 key.strip(): value.strip()
168 for key, value in (
169 item.split("=") for item in env_resources_items.split(",")
170 )
171 }
172 return Resource(env_resource_map)
173
174
175 def get_aggregated_resources(
176 detectors: typing.List["ResourceDetector"],
177 initial_resource: typing.Optional[Resource] = None,
178 timeout=5,
179 ) -> "Resource":
180 """ Retrieves resources from detectors in the order that they were passed
181
182 :param detectors: List of resources in order of priority
183 :param initial_resource: Static resource. This has highest priority
184 :param timeout: Number of seconds to wait for each detector to return
185 :return:
186 """
187 final_resource = initial_resource or _EMPTY_RESOURCE
188 detectors = [OTELResourceDetector()] + detectors
189
190 with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
191 futures = [executor.submit(detector.detect) for detector in detectors]
192 for detector_ind, future in enumerate(futures):
193 detector = detectors[detector_ind]
194 try:
195 detected_resources = future.result(timeout=timeout)
196 # pylint: disable=broad-except
197 except Exception as ex:
198 if detector.raise_on_error:
199 raise ex
200 logger.warning(
201 "Exception %s in detector %s, ignoring", ex, detector
202 )
203 detected_resources = _EMPTY_RESOURCE
204 finally:
205 final_resource = final_resource.merge(detected_resources)
206 return final_resource
207
[end of opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
@@ -91,9 +91,62 @@
logger = logging.getLogger(__name__)
-TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language"
+CLOUD_PROVIDER = "cloud.provider"
+CLOUD_ACCOUNT_ID = "cloud.account.id"
+CLOUD_REGION = "cloud.region"
+CLOUD_ZONE = "cloud.zone"
+CONTAINER_NAME = "container.name"
+CONTAINER_ID = "container.id"
+CONTAINER_IMAGE_NAME = "container.image.name"
+CONTAINER_IMAGE_TAG = "container.image.tag"
+DEPLOYMENT_ENVIRONMENT = "deployment.environment"
+FAAS_NAME = "faas.name"
+FAAS_ID = "faas.id"
+FAAS_VERSION = "faas.version"
+FAAS_INSTANCE = "faas.instance"
+HOST_NAME = "host.name"
+HOST_TYPE = "host.type"
+HOST_IMAGE_NAME = "host.image.name"
+HOST_IMAGE_ID = "host.image.id"
+HOST_IMAGE_VERSION = "host.image.version"
+KUBERNETES_CLUSTER_NAME = "k8s.cluster.name"
+KUBERNETES_NAMESPACE_NAME = "k8s.namespace.name"
+KUBERNETES_POD_UID = "k8s.pod.uid"
+KUBERNETES_POD_NAME = "k8s.pod.name"
+KUBERNETES_CONTAINER_NAME = "k8s.container.name"
+KUBERNETES_REPLICA_SET_UID = "k8s.replicaset.uid"
+KUBERNETES_REPLICA_SET_NAME = "k8s.replicaset.name"
+KUBERNETES_DEPLOYMENT_UID = "k8s.deployment.uid"
+KUBERNETES_DEPLOYMENT_NAME = "k8s.deployment.name"
+KUBERNETES_STATEFUL_SET_UID = "k8s.statefulset.uid"
+KUBERNETES_STATEFUL_SET_NAME = "k8s.statefulset.name"
+KUBERNETES_DAEMON_SET_UID = "k8s.daemonset.uid"
+KUBERNETES_DAEMON_SET_NAME = "k8s.daemonset.name"
+KUBERNETES_JOB_UID = "k8s.job.uid"
+KUBERNETES_JOB_NAME = "k8s.job.name"
+KUBERNETES_CRON_JOB_UID = "k8s.cronjob.uid"
+KUBERNETES_CRON_JOB_NAME = "k8s.cronjob.name"
+OS_TYPE = "os.type"
+OS_DESCRIPTION = "os.description"
+PROCESS_PID = "process.pid"
+PROCESS_EXECUTABLE_NAME = "process.executable.name"
+PROCESS_EXECUTABLE_PATH = "process.executable.path"
+PROCESS_COMMAND = "process.command"
+PROCESS_COMMAND_LINE = "process.command_line"
+PROCESS_COMMAND_ARGS = "process.command_args"
+PROCESS_OWNER = "process.owner"
+PROCESS_RUNTIME_NAME = "process.runtime.name"
+PROCESS_RUNTIME_VERSION = "process.runtime.version"
+PROCESS_RUNTIME_DESCRIPTION = "process.runtime.description"
+SERVICE_NAME = "service.name"
+SERVICE_NAMESPACE = "service.namespace"
+SERVICE_INSTANCE_ID = "service.instance.id"
+SERVICE_VERSION = "service.version"
TELEMETRY_SDK_NAME = "telemetry.sdk.name"
TELEMETRY_SDK_VERSION = "telemetry.sdk.version"
+TELEMETRY_AUTO_VERSION = "telemetry.auto.version"
+TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language"
+
OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
"opentelemetry-sdk"
@@ -111,7 +164,18 @@
resource = _DEFAULT_RESOURCE
else:
resource = _DEFAULT_RESOURCE.merge(Resource(attributes))
- return resource.merge(OTELResourceDetector().detect())
+ resource = resource.merge(OTELResourceDetector().detect())
+ if not resource.attributes.get(SERVICE_NAME, None):
+ default_service_name = "unknown_service"
+ process_executable_name = resource.attributes.get(
+ PROCESS_EXECUTABLE_NAME, None
+ )
+ if process_executable_name:
+ default_service_name += ":" + process_executable_name
+ resource = resource.merge(
+ Resource({SERVICE_NAME: default_service_name})
+ )
+ return resource
@staticmethod
def create_empty() -> "Resource":
|
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n@@ -91,9 +91,62 @@\n logger = logging.getLogger(__name__)\n \n \n-TELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n+CLOUD_PROVIDER = \"cloud.provider\"\n+CLOUD_ACCOUNT_ID = \"cloud.account.id\"\n+CLOUD_REGION = \"cloud.region\"\n+CLOUD_ZONE = \"cloud.zone\"\n+CONTAINER_NAME = \"container.name\"\n+CONTAINER_ID = \"container.id\"\n+CONTAINER_IMAGE_NAME = \"container.image.name\"\n+CONTAINER_IMAGE_TAG = \"container.image.tag\"\n+DEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\n+FAAS_NAME = \"faas.name\"\n+FAAS_ID = \"faas.id\"\n+FAAS_VERSION = \"faas.version\"\n+FAAS_INSTANCE = \"faas.instance\"\n+HOST_NAME = \"host.name\"\n+HOST_TYPE = \"host.type\"\n+HOST_IMAGE_NAME = \"host.image.name\"\n+HOST_IMAGE_ID = \"host.image.id\"\n+HOST_IMAGE_VERSION = \"host.image.version\"\n+KUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\n+KUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\n+KUBERNETES_POD_UID = \"k8s.pod.uid\"\n+KUBERNETES_POD_NAME = \"k8s.pod.name\"\n+KUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\n+KUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\n+KUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\n+KUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\n+KUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\n+KUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\n+KUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\n+KUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\n+KUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\n+KUBERNETES_JOB_UID = \"k8s.job.uid\"\n+KUBERNETES_JOB_NAME = \"k8s.job.name\"\n+KUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\n+KUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\n+OS_TYPE = \"os.type\"\n+OS_DESCRIPTION = \"os.description\"\n+PROCESS_PID = \"process.pid\"\n+PROCESS_EXECUTABLE_NAME = \"process.executable.name\"\n+PROCESS_EXECUTABLE_PATH = \"process.executable.path\"\n+PROCESS_COMMAND = \"process.command\"\n+PROCESS_COMMAND_LINE = \"process.command_line\"\n+PROCESS_COMMAND_ARGS = \"process.command_args\"\n+PROCESS_OWNER = \"process.owner\"\n+PROCESS_RUNTIME_NAME = \"process.runtime.name\"\n+PROCESS_RUNTIME_VERSION = \"process.runtime.version\"\n+PROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\n+SERVICE_NAME = \"service.name\"\n+SERVICE_NAMESPACE = \"service.namespace\"\n+SERVICE_INSTANCE_ID = \"service.instance.id\"\n+SERVICE_VERSION = \"service.version\"\n TELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\n TELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\n+TELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\n+TELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n+\n \n OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n@@ -111,7 +164,18 @@\n resource = _DEFAULT_RESOURCE\n else:\n resource = _DEFAULT_RESOURCE.merge(Resource(attributes))\n- return resource.merge(OTELResourceDetector().detect())\n+ resource = resource.merge(OTELResourceDetector().detect())\n+ if not resource.attributes.get(SERVICE_NAME, None):\n+ default_service_name = \"unknown_service\"\n+ process_executable_name = resource.attributes.get(\n+ PROCESS_EXECUTABLE_NAME, None\n+ )\n+ if process_executable_name:\n+ default_service_name += \":\" + process_executable_name\n+ resource = resource.merge(\n+ Resource({SERVICE_NAME: default_service_name})\n+ )\n+ return resource\n \n @staticmethod\n def create_empty() -> \"Resource\":\n", "issue": "Make sure Resources follow semantic conventions\nFrom [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.metrics.MeterProvider` and\n`opentelemetry.sdk.trace.TracerProvider` by passing them into their\nconstructors. The `Resource` passed to a provider is available to the\nexporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n metrics.set_meter_provider(\n MeterProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(metrics.get_meter_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\nOTEL_RESOURCE_ATTRIBUTES = \"OTEL_RESOURCE_ATTRIBUTES\"\n\n\nclass Resource:\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n if not attributes:\n resource = _DEFAULT_RESOURCE\n else:\n resource = _DEFAULT_RESOURCE.merge(Resource(attributes))\n return resource.merge(OTELResourceDetector().detect())\n\n @staticmethod\n def create_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n merged_attributes = self.attributes\n # pylint: disable=protected-access\n for key, value in other._attributes.items():\n if key not in merged_attributes or merged_attributes[key] == \"\":\n merged_attributes[key] = value\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\" Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py"}]}
| 2,744 | 983 |
gh_patches_debug_43222
|
rasdani/github-patches
|
git_diff
|
hydroshare__hydroshare-5067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add a select for "subject areas"
**Describe the feature you'd like and what it will do**
Currently, subject areas is free text.
Should be a typeahead -- noninvasive.
@martinseul suggested that we should populate a list similar to
https://has.arizona.edu/research-focus-areas
Related to https://github.com/hydroshare/hydroshare/issues/4733
</issue>
<code>
[start of theme/migrations/0023_alter_userprofile_subject_areas.py]
1 # Generated by Django 3.2.15 on 2023-04-11 18:41
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 def migrate_csv_subject_areas(apps, schema_editor):
8 SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
9 UserProfile = apps.get_model('theme.UserProfile')
10 # Attempt to match existing SAs from profiles
11 profiles_with_sa = UserProfile.objects \
12 .exclude(subject_areas__isnull=True) \
13 .exclude(subject_areas='')
14
15 subject_area_objects = SubjectArea.objects.all()
16
17 for profile in profiles_with_sa:
18 old_subject_areas = profile.subject_areas.split(',')
19 old_subject_areas = [s for s in old_subject_areas]
20 print('*' * 100)
21 print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
22 new_subj_areas = []
23 for subject in old_subject_areas:
24 print(f"Searching for a match with '{subject}'")
25 match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
26 if match:
27 new_subj_areas.append(match[0].name)
28 if match[0].name == subject:
29 print(f'- Exact match with pre-existing subject area: {subject}')
30 else:
31 print(f'- Near match with pre-existing subject area: {subject}')
32 else:
33 if subject.strip() == subject:
34 print(f"- Unmatched subject area '{subject}' will remain unaltered")
35 new_subj_areas.append(subject)
36 else:
37 print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
38 new_subj_areas.append(subject.strip())
39
40 sas = ','.join(new_subj_areas)
41 print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
42 profile.subject_areas = f'{{{sas}}}'
43 profile.save()
44
45 profiles_without_sa = UserProfile.objects \
46 .filter(subject_areas='')
47 for profile in profiles_without_sa:
48 print(f'Updating {profile} from "" to {{}}')
49 profile.subject_areas = '{}'
50 profile.save()
51
52
53 class Migration(migrations.Migration):
54
55 dependencies = [
56 ('theme', '0022_alter_userprofile_subject_areas'),
57 ]
58
59 operations = [
60 migrations.RunSQL("UPDATE theme_userprofile set subject_areas = NULL "
61 "where theme_userprofile.subject_areas like '';"),
62 migrations.RunPython(migrate_csv_subject_areas),
63 migrations.AlterField(
64 model_name='userprofile',
65 name='subject_areas',
66 field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. "Water Management." Free text entry or select from the suggestions', null=True, size=None),
67 ),
68 ]
69
[end of theme/migrations/0023_alter_userprofile_subject_areas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py
--- a/theme/migrations/0023_alter_userprofile_subject_areas.py
+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py
@@ -2,12 +2,25 @@
import django.contrib.postgres.fields
from django.db import migrations, models
+from django.db.utils import DataError
+from django.core.management import call_command
+import re
def migrate_csv_subject_areas(apps, schema_editor):
+ def strip_for_dict(string=""):
+ # Check if the string consists of only spaces and braces
+ res, _ = re.subn('{|}', '', string)
+ if res.strip() == "":
+ return ""
+ # replace invalid braces and quotes
+ string = string.replace("{", "[").replace("}", "]").replace("\"", "\'")
+ return string.strip()
+ call_command('create_subject_areas_dict')
SubjectArea = apps.get_model('hs_dictionary.SubjectArea')
UserProfile = apps.get_model('theme.UserProfile')
# Attempt to match existing SAs from profiles
+ errors = []
profiles_with_sa = UserProfile.objects \
.exclude(subject_areas__isnull=True) \
.exclude(subject_areas='')
@@ -21,8 +34,16 @@
print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')
new_subj_areas = []
for subject in old_subject_areas:
+ if subject == '':
+ # There is a trailing comma that we need to remove
+ continue
+ stripped_subject = strip_for_dict(subject)
+ if stripped_subject == '':
+ # The subject contained only invalid chars
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ continue
print(f"Searching for a match with '{subject}'")
- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]
+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]
if match:
new_subj_areas.append(match[0].name)
if match[0].name == subject:
@@ -30,24 +51,33 @@
else:
print(f'- Near match with pre-existing subject area: {subject}')
else:
- if subject.strip() == subject:
+ if stripped_subject == subject:
print(f"- Unmatched subject area '{subject}' will remain unaltered")
new_subj_areas.append(subject)
else:
- print(f"- Unmatched subject area '{subject}' contains whitespace that will be stripped")
- new_subj_areas.append(subject.strip())
+ print(f"- Unmatched subject area '{subject}' contains invalid chars that will be stripped")
+ new_subj_areas.append(stripped_subject)
sas = ','.join(new_subj_areas)
- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')
+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'
+ print(message)
profile.subject_areas = f'{{{sas}}}'
+ try:
+ profile.save()
+ except DataError as e:
+ errors.append(f'Error saving profile: {e}' + message)
+
+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')
+ for profile in profiles_without_sa:
+ print(f'Updating {profile} from "" to {{}}')
+ profile.subject_areas = '{}'
profile.save()
- profiles_without_sa = UserProfile.objects \
- .filter(subject_areas='')
- for profile in profiles_without_sa:
- print(f'Updating {profile} from "" to {{}}')
- profile.subject_areas = '{}'
- profile.save()
+ print("Done updating Subject Areas.")
+ if errors:
+ print("Errors during update:")
+ for error in errors:
+ print(error)
class Migration(migrations.Migration):
|
{"golden_diff": "diff --git a/theme/migrations/0023_alter_userprofile_subject_areas.py b/theme/migrations/0023_alter_userprofile_subject_areas.py\n--- a/theme/migrations/0023_alter_userprofile_subject_areas.py\n+++ b/theme/migrations/0023_alter_userprofile_subject_areas.py\n@@ -2,12 +2,25 @@\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+from django.db.utils import DataError\n+from django.core.management import call_command\n+import re\n \n \n def migrate_csv_subject_areas(apps, schema_editor):\n+ def strip_for_dict(string=\"\"):\n+ # Check if the string consists of only spaces and braces\n+ res, _ = re.subn('{|}', '', string)\n+ if res.strip() == \"\":\n+ return \"\"\n+ # replace invalid braces and quotes\n+ string = string.replace(\"{\", \"[\").replace(\"}\", \"]\").replace(\"\\\"\", \"\\'\")\n+ return string.strip()\n+ call_command('create_subject_areas_dict')\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n+ errors = []\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n@@ -21,8 +34,16 @@\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n+ if subject == '':\n+ # There is a trailing comma that we need to remove\n+ continue\n+ stripped_subject = strip_for_dict(subject)\n+ if stripped_subject == '':\n+ # The subject contained only invalid chars\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ continue\n print(f\"Searching for a match with '{subject}'\")\n- match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n+ match = [sa for sa in subject_area_objects if sa.name.lower() == stripped_subject.lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n@@ -30,24 +51,33 @@\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n- if subject.strip() == subject:\n+ if stripped_subject == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n- print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n- new_subj_areas.append(subject.strip())\n+ print(f\"- Unmatched subject area '{subject}' contains invalid chars that will be stripped\")\n+ new_subj_areas.append(stripped_subject)\n \n sas = ','.join(new_subj_areas)\n- print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n+ message = f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}'\n+ print(message)\n profile.subject_areas = f'{{{sas}}}'\n+ try:\n+ profile.save()\n+ except DataError as e:\n+ errors.append(f'Error saving profile: {e}' + message)\n+\n+ profiles_without_sa = UserProfile.objects.filter(subject_areas='')\n+ for profile in profiles_without_sa:\n+ print(f'Updating {profile} from \"\" to {{}}')\n+ profile.subject_areas = '{}'\n profile.save()\n \n- profiles_without_sa = UserProfile.objects \\\n- .filter(subject_areas='')\n- for profile in profiles_without_sa:\n- print(f'Updating {profile} from \"\" to {{}}')\n- profile.subject_areas = '{}'\n- profile.save()\n+ print(\"Done updating Subject Areas.\")\n+ if errors:\n+ print(\"Errors during update:\")\n+ for error in errors:\n+ print(error)\n \n \n class Migration(migrations.Migration):\n", "issue": "add a select for \"subject areas\"\n**Describe the feature you'd like and what it will do**\r\nCurrently, subject areas is free text.\r\nShould be a typeahead -- noninvasive.\r\n@martinseul suggested that we should populate a list similar to\r\nhttps://has.arizona.edu/research-focus-areas\r\n\r\n\r\nRelated to https://github.com/hydroshare/hydroshare/issues/4733\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.15 on 2023-04-11 18:41\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\ndef migrate_csv_subject_areas(apps, schema_editor):\n SubjectArea = apps.get_model('hs_dictionary.SubjectArea')\n UserProfile = apps.get_model('theme.UserProfile')\n # Attempt to match existing SAs from profiles\n profiles_with_sa = UserProfile.objects \\\n .exclude(subject_areas__isnull=True) \\\n .exclude(subject_areas='')\n\n subject_area_objects = SubjectArea.objects.all()\n\n for profile in profiles_with_sa:\n old_subject_areas = profile.subject_areas.split(',')\n old_subject_areas = [s for s in old_subject_areas]\n print('*' * 100)\n print(f'Searching user #{profile.pk} which has subject areas: {profile.subject_areas}')\n new_subj_areas = []\n for subject in old_subject_areas:\n print(f\"Searching for a match with '{subject}'\")\n match = [sa for sa in subject_area_objects if sa.name.lower() == subject.strip().lower()]\n if match:\n new_subj_areas.append(match[0].name)\n if match[0].name == subject:\n print(f'- Exact match with pre-existing subject area: {subject}')\n else:\n print(f'- Near match with pre-existing subject area: {subject}')\n else:\n if subject.strip() == subject:\n print(f\"- Unmatched subject area '{subject}' will remain unaltered\")\n new_subj_areas.append(subject)\n else:\n print(f\"- Unmatched subject area '{subject}' contains whitespace that will be stripped\")\n new_subj_areas.append(subject.strip())\n\n sas = ','.join(new_subj_areas)\n print(f'Updating {profile} from {profile.subject_areas} subject_areas to {{{sas}}}')\n profile.subject_areas = f'{{{sas}}}'\n profile.save()\n\n profiles_without_sa = UserProfile.objects \\\n .filter(subject_areas='')\n for profile in profiles_without_sa:\n print(f'Updating {profile} from \"\" to {{}}')\n profile.subject_areas = '{}'\n profile.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('theme', '0022_alter_userprofile_subject_areas'),\n ]\n\n operations = [\n migrations.RunSQL(\"UPDATE theme_userprofile set subject_areas = NULL \"\n \"where theme_userprofile.subject_areas like '';\"),\n migrations.RunPython(migrate_csv_subject_areas),\n migrations.AlterField(\n model_name='userprofile',\n name='subject_areas',\n field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=1024), blank=True, help_text='A list of subject areas you are interested in researching. e.g. \"Water Management.\" Free text entry or select from the suggestions', null=True, size=None),\n ),\n ]\n", "path": "theme/migrations/0023_alter_userprofile_subject_areas.py"}]}
| 1,393 | 897 |
gh_patches_debug_14475
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-2686
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ParserError exception raised for invalid configuration
### search you tried in the issue tracker
ParserError and unicode
### describe your issue
I executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:
````
$ pre-commit autoupdate
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
Check the log at /home/carsten/.cache/pre-commit/pre-commit.log
````
This is an expected error and I would expect an error message like `Your configuration file "..." is wrongly formatted at <pos>. Please review the format of the content.'.
Thank you,
Carsten
### pre-commit --version
pre-commit 2.21.0
### .pre-commit-config.yaml
```yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-executables-have-shebangs
- repo: https://github.com/shellcheck-py/shellcheck-py
rev: v0.9.0.2
hooks:
- id: shellcheck
```
### ~/.cache/pre-commit/pre-commit.log (if present)
### version information
```
pre-commit version: 2.21.0
git --version: git version 2.35.3
sys.version:
3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]
sys.executable: /home/carsten/virtualenv/bin/python3.10
os.name: posix
sys.platform: linux
```
### error information
```
An unexpected error has occurred: ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
```
Traceback (most recent call last):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py", line 73, in error_handler
yield
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py", line 355, in main
return autoupdate(
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py", line 154, in autoupdate
migrate_config(config_file, quiet=True)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 47, in migrate_config
contents = _migrate_map(contents)
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py", line 16, in _migrate_map
if isinstance(yaml_load(contents), list):
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py", line 81, in load
return loader.get_single_data()
File "/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py", line 49, in get_single_data
node = self.get_single_node()
File "yaml/_yaml.pyx", line 673, in yaml._yaml.CParser.get_single_node
File "yaml/_yaml.pyx", line 687, in yaml._yaml.CParser._compose_document
File "yaml/_yaml.pyx", line 731, in yaml._yaml.CParser._compose_node
File "yaml/_yaml.pyx", line 847, in yaml._yaml.CParser._compose_mapping_node
File "yaml/_yaml.pyx", line 860, in yaml._yaml.CParser._parse_next_event
yaml.parser.ParserError: while parsing a block mapping
in "<unicode string>", line 1, column 1
did not find expected key
in "<unicode string>", line 7, column 1
```
</issue>
<code>
[start of pre_commit/commands/migrate_config.py]
1 from __future__ import annotations
2
3 import re
4 import textwrap
5
6 import yaml
7
8 from pre_commit.yaml import yaml_load
9
10
11 def _is_header_line(line: str) -> bool:
12 return line.startswith(('#', '---')) or not line.strip()
13
14
15 def _migrate_map(contents: str) -> str:
16 if isinstance(yaml_load(contents), list):
17 # Find the first non-header line
18 lines = contents.splitlines(True)
19 i = 0
20 # Only loop on non empty configuration file
21 while i < len(lines) and _is_header_line(lines[i]):
22 i += 1
23
24 header = ''.join(lines[:i])
25 rest = ''.join(lines[i:])
26
27 # If they are using the "default" flow style of yaml, this operation
28 # will yield a valid configuration
29 try:
30 trial_contents = f'{header}repos:\n{rest}'
31 yaml_load(trial_contents)
32 contents = trial_contents
33 except yaml.YAMLError:
34 contents = f'{header}repos:\n{textwrap.indent(rest, " " * 4)}'
35
36 return contents
37
38
39 def _migrate_sha_to_rev(contents: str) -> str:
40 return re.sub(r'(\n\s+)sha:', r'\1rev:', contents)
41
42
43 def migrate_config(config_file: str, quiet: bool = False) -> int:
44 with open(config_file) as f:
45 orig_contents = contents = f.read()
46
47 contents = _migrate_map(contents)
48 contents = _migrate_sha_to_rev(contents)
49
50 if contents != orig_contents:
51 with open(config_file, 'w') as f:
52 f.write(contents)
53
54 print('Configuration has been migrated.')
55 elif not quiet:
56 print('Configuration is already migrated.')
57 return 0
58
[end of pre_commit/commands/migrate_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py
--- a/pre_commit/commands/migrate_config.py
+++ b/pre_commit/commands/migrate_config.py
@@ -3,8 +3,10 @@
import re
import textwrap
+import cfgv
import yaml
+from pre_commit.clientlib import InvalidConfigError
from pre_commit.yaml import yaml_load
@@ -44,6 +46,13 @@
with open(config_file) as f:
orig_contents = contents = f.read()
+ with cfgv.reraise_as(InvalidConfigError):
+ with cfgv.validate_context(f'File {config_file}'):
+ try:
+ yaml_load(orig_contents)
+ except Exception as e:
+ raise cfgv.ValidationError(str(e))
+
contents = _migrate_map(contents)
contents = _migrate_sha_to_rev(contents)
|
{"golden_diff": "diff --git a/pre_commit/commands/migrate_config.py b/pre_commit/commands/migrate_config.py\n--- a/pre_commit/commands/migrate_config.py\n+++ b/pre_commit/commands/migrate_config.py\n@@ -3,8 +3,10 @@\n import re\n import textwrap\n \n+import cfgv\n import yaml\n \n+from pre_commit.clientlib import InvalidConfigError\n from pre_commit.yaml import yaml_load\n \n \n@@ -44,6 +46,13 @@\n with open(config_file) as f:\n orig_contents = contents = f.read()\n \n+ with cfgv.reraise_as(InvalidConfigError):\n+ with cfgv.validate_context(f'File {config_file}'):\n+ try:\n+ yaml_load(orig_contents)\n+ except Exception as e:\n+ raise cfgv.ValidationError(str(e))\n+\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n", "issue": "ParserError exception raised for invalid configuration\n### search you tried in the issue tracker\r\n\r\nParserError and unicode\r\n\r\n### describe your issue\r\n\r\nI executed `pre-commit autoupdate` with an invalid configuration file (the second `- repo` is indented incorrectly) and got this error message:\r\n\r\n````\r\n$ pre-commit autoupdate\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\nCheck the log at /home/carsten/.cache/pre-commit/pre-commit.log\r\n````\r\n\r\nThis is an expected error and I would expect an error message like `Your configuration file \"...\" is wrongly formatted at <pos>. Please review the format of the content.'.\r\n\r\nThank you,\r\nCarsten\r\n\r\n### pre-commit --version\r\n\r\npre-commit 2.21.0\r\n\r\n### .pre-commit-config.yaml\r\n\r\n```yaml\r\nrepos:\r\n - repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v4.4.0\r\n hooks:\r\n - id: check-executables-have-shebangs\r\n\r\n- repo: https://github.com/shellcheck-py/shellcheck-py\r\n rev: v0.9.0.2\r\n hooks:\r\n - id: shellcheck\r\n```\r\n\r\n\r\n### ~/.cache/pre-commit/pre-commit.log (if present)\r\n\r\n### version information\r\n\r\n```\r\npre-commit version: 2.21.0\r\ngit --version: git version 2.35.3\r\nsys.version:\r\n 3.10.8 (main, Oct 28 2022, 17:28:32) [GCC]\r\nsys.executable: /home/carsten/virtualenv/bin/python3.10\r\nos.name: posix\r\nsys.platform: linux\r\n```\r\n\r\n### error information\r\n\r\n```\r\nAn unexpected error has occurred: ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/error_handler.py\", line 73, in error_handler\r\n yield\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/main.py\", line 355, in main\r\n return autoupdate(\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/autoupdate.py\", line 154, in autoupdate\r\n migrate_config(config_file, quiet=True)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 47, in migrate_config\r\n contents = _migrate_map(contents)\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/pre_commit/commands/migrate_config.py\", line 16, in _migrate_map\r\n if isinstance(yaml_load(contents), list):\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/__init__.py\", line 81, in load\r\n return loader.get_single_data()\r\n File \"/home/carsten/virtualenv/lib64/python3.10/site-packages/yaml/constructor.py\", line 49, in get_single_data\r\n node = self.get_single_node()\r\n File \"yaml/_yaml.pyx\", line 673, in yaml._yaml.CParser.get_single_node\r\n File \"yaml/_yaml.pyx\", line 687, in yaml._yaml.CParser._compose_document\r\n File \"yaml/_yaml.pyx\", line 731, in yaml._yaml.CParser._compose_node\r\n File \"yaml/_yaml.pyx\", line 847, in yaml._yaml.CParser._compose_mapping_node\r\n File \"yaml/_yaml.pyx\", line 860, in yaml._yaml.CParser._parse_next_event\r\nyaml.parser.ParserError: while parsing a block mapping\r\n in \"<unicode string>\", line 1, column 1\r\ndid not find expected key\r\n in \"<unicode string>\", line 7, column 1\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nimport textwrap\n\nimport yaml\n\nfrom pre_commit.yaml import yaml_load\n\n\ndef _is_header_line(line: str) -> bool:\n return line.startswith(('#', '---')) or not line.strip()\n\n\ndef _migrate_map(contents: str) -> str:\n if isinstance(yaml_load(contents), list):\n # Find the first non-header line\n lines = contents.splitlines(True)\n i = 0\n # Only loop on non empty configuration file\n while i < len(lines) and _is_header_line(lines[i]):\n i += 1\n\n header = ''.join(lines[:i])\n rest = ''.join(lines[i:])\n\n # If they are using the \"default\" flow style of yaml, this operation\n # will yield a valid configuration\n try:\n trial_contents = f'{header}repos:\\n{rest}'\n yaml_load(trial_contents)\n contents = trial_contents\n except yaml.YAMLError:\n contents = f'{header}repos:\\n{textwrap.indent(rest, \" \" * 4)}'\n\n return contents\n\n\ndef _migrate_sha_to_rev(contents: str) -> str:\n return re.sub(r'(\\n\\s+)sha:', r'\\1rev:', contents)\n\n\ndef migrate_config(config_file: str, quiet: bool = False) -> int:\n with open(config_file) as f:\n orig_contents = contents = f.read()\n\n contents = _migrate_map(contents)\n contents = _migrate_sha_to_rev(contents)\n\n if contents != orig_contents:\n with open(config_file, 'w') as f:\n f.write(contents)\n\n print('Configuration has been migrated.')\n elif not quiet:\n print('Configuration is already migrated.')\n return 0\n", "path": "pre_commit/commands/migrate_config.py"}]}
| 1,982 | 199 |
gh_patches_debug_22028
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-910
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Master pod should not be created when docker image failed to push
Docker image failed to push:
```
===== Pushing Docker Image =====
{'status': 'The push refers to repository [reg.docker.alibaba-inc.com/yuan.tang/elasticdl]'}
{'status': 'Preparing', 'progressDetail': {}, 'id': 'a29e21fbecd7'}
...
{'status': 'Waiting', 'progressDetail': {}, 'id': '2ee61facf36a'}
{'status': 'Waiting', 'progressDetail': {}, 'id': '05ea944dce26'}
{'errorDetail': {'message': 'denied: requested access to the resource is denied'}, 'error': 'denied: requested access to the resource is denied'}
```
But the master pod is created. We should not start the master pod in this case since it cannot find the image anyways. Otherwise users would have to delete the pod themselves before submitting a new elasticdl job.
```
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 1m default-scheduler Successfully assigned elasticdl-test-mnist-terry-master to ckas389w00k
Normal Pulling 1m kubelet, ckas389w00k pulling image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f"
Warning Failed 1m kubelet, ckas389w00k Failed to pull image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f": [rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown, rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown]
Warning Failed 1m kubelet, ckas389w00k Error: ErrImagePull
Warning MissingClusterDNS 1m (x12 over 1m) kubelet, ckas389w00k pod: "elasticdl-test-mnist-terry-master_kubemaker(c14c88d4-a4c0-11e9-bf35-02420ba6e969)". kubelet does not have ClusterDNS IP configured and cannot create Pod using "ClusterFirst" policy. Falling back to "Default" policy.
Normal BackOff 1m (x5 over 1m) kubelet, ckas389w00k Back-off pulling image "reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f"
Warning Failed 1m (x5 over 1m) kubelet, ckas389w00k Error: ImagePullBackOff
```
</issue>
<code>
[start of elasticdl/python/elasticdl/image_builder.py]
1 import os
2 import shutil
3 import tempfile
4 import uuid
5 from urllib.parse import urlparse
6
7 import docker
8
9
10 def build_and_push_docker_image(
11 model_zoo,
12 docker_image_prefix,
13 base_image="",
14 extra_pypi="",
15 cluster_spec="",
16 ):
17 """Build and push a Docker image containing ElasticDL and the model
18 zoo. The parameter model_zoo could be a local directory or an URL.
19 In the later case, we do git clone.
20
21 The basename of the Docker image is auto-generated and is globally
22 unique. The full name is docker_image_prefix + "/" + basename.
23
24 The fullname of the Docker image is docker_image_prefix + "/" +
25 basename. Unless prefix is None or "", _push_docker_image is called
26 after _build_docker_image.
27
28 Returns the full Docker image name. So the caller can docker rmi
29 fullname later.
30
31 """
32 with tempfile.TemporaryDirectory() as ctx_dir:
33 # Copy ElasticDL Python source tree into the context directory.
34 elasticdl = _find_elasticdl_root()
35 shutil.copytree(
36 elasticdl, os.path.join(ctx_dir, os.path.basename(elasticdl))
37 )
38
39 # Copy model zoo source tree into the context directory.
40 shutil.copytree(
41 model_zoo, os.path.join(ctx_dir, os.path.basename(model_zoo))
42 )
43
44 # Copy cluster specification file into the context directory.
45 if cluster_spec:
46 shutil.copy(
47 cluster_spec,
48 os.path.join(ctx_dir, os.path.basename(cluster_spec)),
49 )
50
51 # Create the Dockerfile.
52 with tempfile.NamedTemporaryFile(mode="w+", delete=False) as df:
53 df.write(
54 _create_dockerfile(
55 os.path.basename(elasticdl),
56 os.path.basename(model_zoo),
57 os.path.basename(cluster_spec),
58 base_image,
59 extra_pypi,
60 )
61 )
62
63 image_name = _generate_unique_image_name(docker_image_prefix)
64 client = docker.APIClient(base_url="unix://var/run/docker.sock")
65 _build_docker_image(client, ctx_dir, df.name, image_name)
66
67 if docker_image_prefix:
68 _push_docker_image(client, image_name)
69
70 return image_name
71
72
73 def _find_elasticdl_root():
74 return os.path.abspath(
75 os.path.join(os.path.dirname(__file__), "../../../")
76 )
77
78
79 def _create_dockerfile(
80 elasticdl, model_zoo, cluster_spec="", base_image="", extra_pypi_index=""
81 ):
82 LOCAL_ZOO = """
83 FROM {BASE_IMAGE} as base
84 ENV PYTHONPATH=/:/model_zoo
85 COPY {ELASTIC_DL}/elasticdl /elasticdl
86 RUN pip install -r elasticdl/requirements.txt \
87 --extra-index-url="${EXTRA_PYPI_INDEX}"
88 RUN make -f elasticdl/Makefile
89 # TODO: Need to restructure examples directory to make it conform to model_zoo
90 # convention
91 COPY {MODEL_ZOO} /model_zoo/{MODEL_ZOO}
92 ARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt
93 RUN if [ -f $REQS ]; then \
94 pip install -r $REQS --extra-index-url="${EXTRA_PYPI_INDEX}"; \
95 fi
96 """
97 REMOTE_ZOO = """
98 FROM {BASE_IMAGE} as base
99 ENV PYTHONPATH=/:/model_zoo
100 COPY {ELASTIC_DL}/elasticdl /elasticdl
101 RUN pip install -r elasticdl/requirements.txt \
102 --extra-index-url="${EXTRA_PYPI_INDEX}"
103 RUN make -f elasticdl/Makefile
104 RUN apt-get update && apt-get install -y git
105 RUN git clone --recursive {MODEL_ZOO} /model_zoo
106 ARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt
107 RUN if [ -f $REQS ]; then \
108 pip install -r $REQS --extra-index-url="${EXTRA_PYPI_INDEX}"; \
109 fi
110 """
111 pr = urlparse(model_zoo)
112 if not pr.path:
113 raise RuntimeError("model_zoo {} has no path".format(model_zoo))
114 if pr.scheme in ["file", ""]:
115 tmpl = LOCAL_ZOO
116 model_zoo = pr.path # Remove the "file://" prefix if any.
117 else:
118 tmpl = REMOTE_ZOO
119
120 if cluster_spec:
121 tmpl = """
122 %s
123 COPY %s /cluster_spec/%s
124 """ % (
125 tmpl,
126 cluster_spec,
127 cluster_spec,
128 )
129
130 return tmpl.format(
131 BASE_IMAGE=base_image
132 if base_image
133 else "tensorflow/tensorflow:2.0.0b1-py3",
134 ELASTIC_DL=elasticdl,
135 MODEL_ZOO=model_zoo,
136 EXTRA_PYPI_INDEX=extra_pypi_index,
137 )
138
139
140 def _generate_unique_image_name(prefix):
141 return os.path.join(
142 prefix if prefix else "", "elasticdl:" + uuid.uuid4().hex
143 )
144
145
146 def _build_docker_image(client, ctx_dir, dockerfile, image_name):
147 print("===== Building Docker Image =====")
148 for line in client.build(
149 dockerfile=dockerfile,
150 path=ctx_dir,
151 rm=True,
152 tag=image_name,
153 decode=True,
154 ):
155 error = line.get("error", None)
156 if error:
157 raise RuntimeError("Docker image build: " + error)
158 text = line.get("stream", None)
159 if text:
160 print(text)
161
162
163 def _push_docker_image(client, image_name):
164 print("===== Pushing Docker Image =====")
165 for line in client.push(image_name, stream=True, decode=True):
166 print(line)
167
[end of elasticdl/python/elasticdl/image_builder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/python/elasticdl/image_builder.py b/elasticdl/python/elasticdl/image_builder.py
--- a/elasticdl/python/elasticdl/image_builder.py
+++ b/elasticdl/python/elasticdl/image_builder.py
@@ -143,6 +143,15 @@
)
+def _print_docker_progress(line):
+ error = line.get("error", None)
+ if error:
+ raise RuntimeError("Docker image build: " + error)
+ text = line.get("stream", None)
+ if text:
+ print(text)
+
+
def _build_docker_image(client, ctx_dir, dockerfile, image_name):
print("===== Building Docker Image =====")
for line in client.build(
@@ -152,15 +161,10 @@
tag=image_name,
decode=True,
):
- error = line.get("error", None)
- if error:
- raise RuntimeError("Docker image build: " + error)
- text = line.get("stream", None)
- if text:
- print(text)
+ _print_docker_progress(line)
def _push_docker_image(client, image_name):
print("===== Pushing Docker Image =====")
for line in client.push(image_name, stream=True, decode=True):
- print(line)
+ _print_docker_progress(line)
|
{"golden_diff": "diff --git a/elasticdl/python/elasticdl/image_builder.py b/elasticdl/python/elasticdl/image_builder.py\n--- a/elasticdl/python/elasticdl/image_builder.py\n+++ b/elasticdl/python/elasticdl/image_builder.py\n@@ -143,6 +143,15 @@\n )\n \n \n+def _print_docker_progress(line):\n+ error = line.get(\"error\", None)\n+ if error:\n+ raise RuntimeError(\"Docker image build: \" + error)\n+ text = line.get(\"stream\", None)\n+ if text:\n+ print(text)\n+\n+\n def _build_docker_image(client, ctx_dir, dockerfile, image_name):\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n@@ -152,15 +161,10 @@\n tag=image_name,\n decode=True,\n ):\n- error = line.get(\"error\", None)\n- if error:\n- raise RuntimeError(\"Docker image build: \" + error)\n- text = line.get(\"stream\", None)\n- if text:\n- print(text)\n+ _print_docker_progress(line)\n \n \n def _push_docker_image(client, image_name):\n print(\"===== Pushing Docker Image =====\")\n for line in client.push(image_name, stream=True, decode=True):\n- print(line)\n+ _print_docker_progress(line)\n", "issue": "Master pod should not be created when docker image failed to push\nDocker image failed to push:\r\n```\r\n===== Pushing Docker Image =====\r\n{'status': 'The push refers to repository [reg.docker.alibaba-inc.com/yuan.tang/elasticdl]'}\r\n{'status': 'Preparing', 'progressDetail': {}, 'id': 'a29e21fbecd7'}\r\n...\r\n{'status': 'Waiting', 'progressDetail': {}, 'id': '2ee61facf36a'}\r\n{'status': 'Waiting', 'progressDetail': {}, 'id': '05ea944dce26'}\r\n{'errorDetail': {'message': 'denied: requested access to the resource is denied'}, 'error': 'denied: requested access to the resource is denied'}\r\n```\r\nBut the master pod is created. We should not start the master pod in this case since it cannot find the image anyways. Otherwise users would have to delete the pod themselves before submitting a new elasticdl job.\r\n```\r\nEvents:\r\n Type Reason Age From Message\r\n ---- ------ ---- ---- -------\r\n Normal Scheduled 1m default-scheduler Successfully assigned elasticdl-test-mnist-terry-master to ckas389w00k\r\n Normal Pulling 1m kubelet, ckas389w00k pulling image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\"\r\n Warning Failed 1m kubelet, ckas389w00k Failed to pull image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\": [rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown, rpc error: code = Unknown desc = Error response from daemon: manifest unknown: manifest unknown]\r\n Warning Failed 1m kubelet, ckas389w00k Error: ErrImagePull\r\n Warning MissingClusterDNS 1m (x12 over 1m) kubelet, ckas389w00k pod: \"elasticdl-test-mnist-terry-master_kubemaker(c14c88d4-a4c0-11e9-bf35-02420ba6e969)\". kubelet does not have ClusterDNS IP configured and cannot create Pod using \"ClusterFirst\" policy. Falling back to \"Default\" policy.\r\n Normal BackOff 1m (x5 over 1m) kubelet, ckas389w00k Back-off pulling image \"reg.docker.alibaba-inc.com/yuan.tang/elasticdl:01baa62fcc9d4d8e89a5fc8c735fcb0f\"\r\n Warning Failed 1m (x5 over 1m) kubelet, ckas389w00k Error: ImagePullBackOff\r\n```\n", "before_files": [{"content": "import os\nimport shutil\nimport tempfile\nimport uuid\nfrom urllib.parse import urlparse\n\nimport docker\n\n\ndef build_and_push_docker_image(\n model_zoo,\n docker_image_prefix,\n base_image=\"\",\n extra_pypi=\"\",\n cluster_spec=\"\",\n):\n \"\"\"Build and push a Docker image containing ElasticDL and the model\nzoo. The parameter model_zoo could be a local directory or an URL.\nIn the later case, we do git clone.\n\n The basename of the Docker image is auto-generated and is globally\nunique. The full name is docker_image_prefix + \"/\" + basename.\n\n The fullname of the Docker image is docker_image_prefix + \"/\" +\nbasename. Unless prefix is None or \"\", _push_docker_image is called\nafter _build_docker_image.\n\n Returns the full Docker image name. So the caller can docker rmi\n fullname later.\n\n \"\"\"\n with tempfile.TemporaryDirectory() as ctx_dir:\n # Copy ElasticDL Python source tree into the context directory.\n elasticdl = _find_elasticdl_root()\n shutil.copytree(\n elasticdl, os.path.join(ctx_dir, os.path.basename(elasticdl))\n )\n\n # Copy model zoo source tree into the context directory.\n shutil.copytree(\n model_zoo, os.path.join(ctx_dir, os.path.basename(model_zoo))\n )\n\n # Copy cluster specification file into the context directory.\n if cluster_spec:\n shutil.copy(\n cluster_spec,\n os.path.join(ctx_dir, os.path.basename(cluster_spec)),\n )\n\n # Create the Dockerfile.\n with tempfile.NamedTemporaryFile(mode=\"w+\", delete=False) as df:\n df.write(\n _create_dockerfile(\n os.path.basename(elasticdl),\n os.path.basename(model_zoo),\n os.path.basename(cluster_spec),\n base_image,\n extra_pypi,\n )\n )\n\n image_name = _generate_unique_image_name(docker_image_prefix)\n client = docker.APIClient(base_url=\"unix://var/run/docker.sock\")\n _build_docker_image(client, ctx_dir, df.name, image_name)\n\n if docker_image_prefix:\n _push_docker_image(client, image_name)\n\n return image_name\n\n\ndef _find_elasticdl_root():\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), \"../../../\")\n )\n\n\ndef _create_dockerfile(\n elasticdl, model_zoo, cluster_spec=\"\", base_image=\"\", extra_pypi_index=\"\"\n):\n LOCAL_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\n# TODO: Need to restructure examples directory to make it conform to model_zoo\n# convention\nCOPY {MODEL_ZOO} /model_zoo/{MODEL_ZOO}\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n REMOTE_ZOO = \"\"\"\nFROM {BASE_IMAGE} as base\nENV PYTHONPATH=/:/model_zoo\nCOPY {ELASTIC_DL}/elasticdl /elasticdl\nRUN pip install -r elasticdl/requirements.txt \\\n --extra-index-url=\"${EXTRA_PYPI_INDEX}\"\nRUN make -f elasticdl/Makefile\nRUN apt-get update && apt-get install -y git\nRUN git clone --recursive {MODEL_ZOO} /model_zoo\nARG REQS=/model_zoo/{MODEL_ZOO}/requirements.txt\nRUN if [ -f $REQS ]; then \\\n pip install -r $REQS --extra-index-url=\"${EXTRA_PYPI_INDEX}\"; \\\n fi\n\"\"\"\n pr = urlparse(model_zoo)\n if not pr.path:\n raise RuntimeError(\"model_zoo {} has no path\".format(model_zoo))\n if pr.scheme in [\"file\", \"\"]:\n tmpl = LOCAL_ZOO\n model_zoo = pr.path # Remove the \"file://\" prefix if any.\n else:\n tmpl = REMOTE_ZOO\n\n if cluster_spec:\n tmpl = \"\"\"\n%s\nCOPY %s /cluster_spec/%s\n\"\"\" % (\n tmpl,\n cluster_spec,\n cluster_spec,\n )\n\n return tmpl.format(\n BASE_IMAGE=base_image\n if base_image\n else \"tensorflow/tensorflow:2.0.0b1-py3\",\n ELASTIC_DL=elasticdl,\n MODEL_ZOO=model_zoo,\n EXTRA_PYPI_INDEX=extra_pypi_index,\n )\n\n\ndef _generate_unique_image_name(prefix):\n return os.path.join(\n prefix if prefix else \"\", \"elasticdl:\" + uuid.uuid4().hex\n )\n\n\ndef _build_docker_image(client, ctx_dir, dockerfile, image_name):\n print(\"===== Building Docker Image =====\")\n for line in client.build(\n dockerfile=dockerfile,\n path=ctx_dir,\n rm=True,\n tag=image_name,\n decode=True,\n ):\n error = line.get(\"error\", None)\n if error:\n raise RuntimeError(\"Docker image build: \" + error)\n text = line.get(\"stream\", None)\n if text:\n print(text)\n\n\ndef _push_docker_image(client, image_name):\n print(\"===== Pushing Docker Image =====\")\n for line in client.push(image_name, stream=True, decode=True):\n print(line)\n", "path": "elasticdl/python/elasticdl/image_builder.py"}]}
| 2,844 | 303 |
gh_patches_debug_11999
|
rasdani/github-patches
|
git_diff
|
chainer__chainer-249
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add type check to LeakyReLU function
Related to #123
</issue>
<code>
[start of chainer/functions/leaky_relu.py]
1 from chainer import cuda
2 from chainer import function
3
4
5 def _kern():
6 return cuda.elementwise(
7 'float* y, const float* cond, const float* x, float slope',
8 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')
9
10
11 class LeakyReLU(function.Function):
12
13 """Leaky rectifier unit."""
14
15 def __init__(self, slope=0.2):
16 self.slope = slope
17
18 def forward_cpu(self, x):
19 y = x[0].copy()
20 y[x[0] < 0] *= self.slope
21 return y,
22
23 def forward_gpu(self, x):
24 y = cuda.empty_like(x[0])
25 _kern()(y, x[0], x[0], self.slope)
26 return y,
27
28 def backward_cpu(self, x, gy):
29 gx = gy[0].copy()
30 gx[x[0] < 0] *= self.slope
31 return gx,
32
33 def backward_gpu(self, x, gy):
34 gx = cuda.empty_like(x[0])
35 _kern()(gx, x[0], gy[0], self.slope)
36 return gx,
37
38
39 def leaky_relu(x, slope=0.2):
40 """Leaky Rectified Linear Unit function.
41
42 This function is expressed as :math:`f(x) = \max(x, ax)`, where :math:`a`
43 is a configurable slope value.
44
45 Args:
46 x (~chainer.Variable): Input variable.
47 slope (float): Slope value :math:`a`.
48
49 Returns:
50 ~chainer.Variable: Output variable.
51
52 """
53 return LeakyReLU(slope)(x)
54
[end of chainer/functions/leaky_relu.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py
--- a/chainer/functions/leaky_relu.py
+++ b/chainer/functions/leaky_relu.py
@@ -1,5 +1,8 @@
+import numpy
+
from chainer import cuda
from chainer import function
+from chainer.utils import type_check
def _kern():
@@ -15,6 +18,13 @@
def __init__(self, slope=0.2):
self.slope = slope
+ def check_type_forward(self, in_types):
+ type_check.expect(in_types.size() == 1)
+ x_type, = in_types
+ type_check.expect(
+ x_type.dtype == numpy.float32,
+ )
+
def forward_cpu(self, x):
y = x[0].copy()
y[x[0] < 0] *= self.slope
|
{"golden_diff": "diff --git a/chainer/functions/leaky_relu.py b/chainer/functions/leaky_relu.py\n--- a/chainer/functions/leaky_relu.py\n+++ b/chainer/functions/leaky_relu.py\n@@ -1,5 +1,8 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n+from chainer.utils import type_check\n \n \n def _kern():\n@@ -15,6 +18,13 @@\n def __init__(self, slope=0.2):\n self.slope = slope\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(in_types.size() == 1)\n+ x_type, = in_types\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ )\n+\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n", "issue": "Add type check to LeakyReLU function\nRelated to #123 \n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\n\n\ndef _kern():\n return cuda.elementwise(\n 'float* y, const float* cond, const float* x, float slope',\n 'y[i] = cond[i] >= 0 ? x[i] : slope * x[i]', 'lrelu')\n\n\nclass LeakyReLU(function.Function):\n\n \"\"\"Leaky rectifier unit.\"\"\"\n\n def __init__(self, slope=0.2):\n self.slope = slope\n\n def forward_cpu(self, x):\n y = x[0].copy()\n y[x[0] < 0] *= self.slope\n return y,\n\n def forward_gpu(self, x):\n y = cuda.empty_like(x[0])\n _kern()(y, x[0], x[0], self.slope)\n return y,\n\n def backward_cpu(self, x, gy):\n gx = gy[0].copy()\n gx[x[0] < 0] *= self.slope\n return gx,\n\n def backward_gpu(self, x, gy):\n gx = cuda.empty_like(x[0])\n _kern()(gx, x[0], gy[0], self.slope)\n return gx,\n\n\ndef leaky_relu(x, slope=0.2):\n \"\"\"Leaky Rectified Linear Unit function.\n\n This function is expressed as :math:`f(x) = \\max(x, ax)`, where :math:`a`\n is a configurable slope value.\n\n Args:\n x (~chainer.Variable): Input variable.\n slope (float): Slope value :math:`a`.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return LeakyReLU(slope)(x)\n", "path": "chainer/functions/leaky_relu.py"}]}
| 1,038 | 205 |
gh_patches_debug_1427
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-340
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move py.test config to tox.ini
Pytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 from setuptools import setup, find_packages
3 from setuptools.command.test import test as TestCommand
4 import os
5 import sys
6
7 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')
8
9
10 class PyTest(TestCommand):
11 user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
12
13 def initialize_options(self):
14 TestCommand.initialize_options(self)
15 self.pytest_args = []
16
17 def finalize_options(self):
18 TestCommand.finalize_options(self)
19 self.test_args = []
20 self.test_suite = True
21
22 def run_tests(self):
23 #import here, cause outside the eggs aren't loaded
24 import pytest
25 errno = pytest.main(self.pytest_args)
26 sys.exit(errno)
27
28
29 setup(
30 name='saleor',
31 author='Mirumee Software',
32 author_email='[email protected]',
33 description="A fork'n'play e-commerce in Django",
34 license='BSD',
35 version='0.1.0a0',
36 url='http://getsaleor.com/',
37 packages=find_packages(),
38 include_package_data=True,
39 install_requires=[
40 'Babel>=1.3,<1.4a0',
41 'BabelDjango>=0.2,<0.3a0',
42 'Django>=1.8',
43 'dj_database_url>=0.3.0',
44 'django-emailit>=0.2.2',
45 'django-materializecss-form==0.0.64',
46 'django-model-utils>=2.0.0,<2.1a0',
47 'django-mptt>=0.7.1',
48 'django-offsite-storage>=0.0.5',
49 'django-payments>=0.7.0,<0.8a0',
50 'django-prices>=0.4.0,<0.5a0',
51 'djangorestframework>=3.1,<3.2a0',
52 'django-selectable==0.8.0',
53 'django-versatileimagefield>=1.0.1,<1.1a0',
54 'fake-factory>=0.3.2',
55 'google-measurement-protocol>=0.1.2,<0.2a0',
56 'jsonfield>=1.0.3',
57 'Markdown>=2.4',
58 'prices>=0.5,<0.6a0',
59 'requests>=1.2.0',
60 'satchless>=1.1.2,<1.2a0',
61 'unidecode'
62 ],
63 extras_require={
64 'PaaS': [
65 'whitenoise==1.0.6',
66 'gunicorn==19.2.1',
67 'psycopg2==2.6']},
68 cmdclass={
69 'test': PyTest},
70 entry_points={
71 'console_scripts': ['saleor = saleor:manage']},
72 tests_require=[
73 'mock==1.0.1',
74 'purl>=0.4.1',
75 'pytest',
76 'pytest-django'])
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,7 @@
entry_points={
'console_scripts': ['saleor = saleor:manage']},
tests_require=[
- 'mock==1.0.1',
+ 'mock==1.3.0',
'purl>=0.4.1',
'pytest',
'pytest-django'])
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -70,7 +70,7 @@\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n- 'mock==1.0.1',\n+ 'mock==1.3.0',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "issue": "Move py.test config to tox.ini\nPytest (like many tools) can read its configuration from `tox.ini`. There is no need to keep a separate `pytest.ini`.\n\n", "before_files": [{"content": "#! /usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\nimport os\nimport sys\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saleor.settings')\n\n\nclass PyTest(TestCommand):\n user_options = [('pytest-args=', 'a', \"Arguments to pass to py.test\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.pytest_args)\n sys.exit(errno)\n\n\nsetup(\n name='saleor',\n author='Mirumee Software',\n author_email='[email protected]',\n description=\"A fork'n'play e-commerce in Django\",\n license='BSD',\n version='0.1.0a0',\n url='http://getsaleor.com/',\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'Babel>=1.3,<1.4a0',\n 'BabelDjango>=0.2,<0.3a0',\n 'Django>=1.8',\n 'dj_database_url>=0.3.0',\n 'django-emailit>=0.2.2',\n 'django-materializecss-form==0.0.64',\n 'django-model-utils>=2.0.0,<2.1a0',\n 'django-mptt>=0.7.1',\n 'django-offsite-storage>=0.0.5',\n 'django-payments>=0.7.0,<0.8a0',\n 'django-prices>=0.4.0,<0.5a0',\n 'djangorestframework>=3.1,<3.2a0',\n 'django-selectable==0.8.0',\n 'django-versatileimagefield>=1.0.1,<1.1a0',\n 'fake-factory>=0.3.2',\n 'google-measurement-protocol>=0.1.2,<0.2a0',\n 'jsonfield>=1.0.3',\n 'Markdown>=2.4',\n 'prices>=0.5,<0.6a0',\n 'requests>=1.2.0',\n 'satchless>=1.1.2,<1.2a0',\n 'unidecode'\n ],\n extras_require={\n 'PaaS': [\n 'whitenoise==1.0.6',\n 'gunicorn==19.2.1',\n 'psycopg2==2.6']},\n cmdclass={\n 'test': PyTest},\n entry_points={\n 'console_scripts': ['saleor = saleor:manage']},\n tests_require=[\n 'mock==1.0.1',\n 'purl>=0.4.1',\n 'pytest',\n 'pytest-django'])\n", "path": "setup.py"}]}
| 1,377 | 97 |
gh_patches_debug_42340
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5529
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.nicolive: plugin does not support stream metadata
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
latest
### Description
Current behaviour: Author folder is not created nor is the title grabbed properly. All streams have titles on the site so not sure whats going wrong here.
Current output: -20230825200547.ts (right in the recordings folder, no author folder, no title)
Expected behaviour/output: Create Author folder and filename consisting of title-timestuff.ts
### Debug log
```text
streamlink --output "~/recordings/{author}/{title}-{time:%Y%m%d%H%M%S}.ts" https://live.nicovideo.jp/watch/co1992508 best
```
</issue>
<code>
[start of src/streamlink/plugins/nicolive.py]
1 """
2 $description Japanese live-streaming and video hosting social platform.
3 $url live.nicovideo.jp
4 $type live, vod
5 $account Required by some streams
6 $notes Timeshift is supported
7 """
8
9 import logging
10 import re
11 from threading import Event
12 from urllib.parse import urljoin
13
14 from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher
15 from streamlink.plugin.api import useragents, validate
16 from streamlink.plugin.api.websocket import WebsocketClient
17 from streamlink.stream.hls import HLSStream, HLSStreamReader
18 from streamlink.utils.parse import parse_json
19 from streamlink.utils.times import hours_minutes_seconds
20 from streamlink.utils.url import update_qsd
21
22
23 log = logging.getLogger(__name__)
24
25
26 class NicoLiveWsClient(WebsocketClient):
27 STREAM_OPENED_TIMEOUT = 6
28
29 ready: Event
30 opened: Event
31 hls_stream_url: str
32
33 def __init__(self, *args, **kwargs):
34 super().__init__(*args, **kwargs)
35 self.opened = Event()
36 self.ready = Event()
37
38 def on_open(self, wsapp):
39 super().on_open(wsapp)
40 self.send_playerversion()
41 self.send_getpermit()
42
43 def on_message(self, wsapp, data: str):
44 log.debug(f"Received: {data}")
45 message = parse_json(data)
46 msgtype = message.get("type")
47 msgdata = message.get("data", {})
48
49 if msgtype == "ping":
50 self.send_pong()
51
52 elif msgtype == "stream" and msgdata.get("protocol") == "hls" and msgdata.get("uri"):
53 self.hls_stream_url = msgdata.get("uri")
54 self.ready.set()
55 if self.opened.wait(self.STREAM_OPENED_TIMEOUT):
56 log.debug("Stream opened, keeping websocket connection alive")
57 else:
58 log.info("Closing websocket connection")
59 self.close()
60
61 elif msgtype == "disconnect":
62 reason = msgdata.get("reason", "Unknown reason")
63 log.info(f"Received disconnect message: {reason}")
64 self.close()
65
66 def send_playerversion(self):
67 self.send_json({
68 "type": "startWatching",
69 "data": {
70 "stream": {
71 "quality": "abr",
72 "protocol": "hls",
73 "latency": "high",
74 "chasePlay": False,
75 },
76 "room": {
77 "protocol": "webSocket",
78 "commentable": True,
79 },
80 "reconnect": False,
81 },
82 })
83
84 def send_getpermit(self):
85 self.send_json({
86 "type": "getAkashic",
87 "data": {
88 "chasePlay": False,
89 },
90 })
91
92 def send_pong(self):
93 self.send_json({"type": "pong"})
94 self.send_json({"type": "keepSeat"})
95
96
97 class NicoLiveHLSStreamReader(HLSStreamReader):
98 stream: "NicoLiveHLSStream"
99
100 def open(self):
101 self.stream.wsclient.opened.set()
102 super().open()
103
104 def close(self):
105 super().close()
106 self.stream.wsclient.close()
107
108
109 class NicoLiveHLSStream(HLSStream):
110 __reader__ = NicoLiveHLSStreamReader
111 wsclient: NicoLiveWsClient
112
113 def set_wsclient(self, wsclient: NicoLiveWsClient):
114 self.wsclient = wsclient
115
116
117 @pluginmatcher(re.compile(
118 r"https?://(?P<domain>live\d*\.nicovideo\.jp)/watch/(lv|co)\d+",
119 ))
120 @pluginargument(
121 "email",
122 sensitive=True,
123 argument_name="niconico-email",
124 metavar="EMAIL",
125 help="The email or phone number associated with your Niconico account",
126 )
127 @pluginargument(
128 "password",
129 sensitive=True,
130 argument_name="niconico-password",
131 metavar="PASSWORD",
132 help="The password of your Niconico account",
133 )
134 @pluginargument(
135 "user-session",
136 sensitive=True,
137 argument_name="niconico-user-session",
138 metavar="VALUE",
139 help="""
140 Value of the user-session token.
141
142 Can be used as an alternative to providing a password.
143 """,
144 )
145 @pluginargument(
146 "purge-credentials",
147 argument_name="niconico-purge-credentials",
148 action="store_true",
149 help="Purge cached Niconico credentials to initiate a new session and reauthenticate.",
150 )
151 @pluginargument(
152 "timeshift-offset",
153 type=hours_minutes_seconds,
154 argument_name="niconico-timeshift-offset",
155 metavar="[[XX:]XX:]XX | [XXh][XXm][XXs]",
156 help="""
157 Amount of time to skip from the beginning of a stream.
158
159 Default is 0.
160 """,
161 )
162 class NicoLive(Plugin):
163 STREAM_READY_TIMEOUT = 6
164 LOGIN_URL = "https://account.nicovideo.jp/login/redirector"
165 LOGIN_URL_PARAMS = {
166 "show_button_twitter": 1,
167 "show_button_facebook": 1,
168 "next_url": "/",
169 }
170
171 wsclient: NicoLiveWsClient
172
173 def _get_streams(self):
174 if self.get_option("purge_credentials"):
175 self.clear_cookies()
176 log.info("All credentials were successfully removed")
177
178 self.session.http.headers.update({
179 "User-Agent": useragents.CHROME,
180 })
181
182 self.niconico_web_login()
183
184 wss_api_url = self.get_wss_api_url()
185 if not wss_api_url:
186 log.error(
187 "Failed to get wss_api_url. "
188 + "Please check if the URL is correct, and make sure your account has access to the video.",
189 )
190 return
191
192 self.wsclient = NicoLiveWsClient(self.session, wss_api_url)
193 self.wsclient.start()
194
195 hls_stream_url = self._get_hls_stream_url()
196 if not hls_stream_url:
197 return
198
199 offset = self.get_option("timeshift-offset")
200 if offset and "timeshift" in wss_api_url:
201 hls_stream_url = update_qsd(hls_stream_url, {"start": offset})
202
203 for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():
204 stream.set_wsclient(self.wsclient)
205 yield quality, stream
206
207 def _get_hls_stream_url(self):
208 log.debug(f"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...")
209 if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():
210 log.error("Waiting for permit timed out.")
211 self.wsclient.close()
212 return
213
214 return self.wsclient.hls_stream_url
215
216 def get_wss_api_url(self):
217 try:
218 data = self.session.http.get(self.url, schema=validate.Schema(
219 validate.parse_html(),
220 validate.xml_find(".//script[@id='embedded-data'][@data-props]"),
221 validate.get("data-props"),
222 validate.parse_json(),
223 {"site": {
224 "relive": {
225 "webSocketUrl": validate.url(scheme="wss"),
226 },
227 validate.optional("frontendId"): int,
228 }},
229 validate.get("site"),
230 validate.union_get(("relive", "webSocketUrl"), "frontendId"),
231 ))
232 except PluginError:
233 return
234
235 wss_api_url, frontend_id = data
236 if frontend_id is not None:
237 wss_api_url = update_qsd(wss_api_url, {"frontend_id": frontend_id})
238
239 return wss_api_url
240
241 def niconico_web_login(self):
242 user_session = self.get_option("user-session")
243 email = self.get_option("email")
244 password = self.get_option("password")
245
246 if user_session is not None:
247 log.info("Logging in via provided user session cookie")
248 self.session.http.cookies.set(
249 "user_session",
250 user_session,
251 path="/",
252 domain="nicovideo.jp",
253 )
254 self.save_cookies()
255
256 elif self.session.http.cookies.get("user_session"):
257 log.info("Logging in via cached user session cookie")
258
259 elif email is not None and password is not None:
260 log.info("Logging in via provided email and password")
261 root = self.session.http.post(
262 self.LOGIN_URL,
263 data={"mail_tel": email, "password": password},
264 params=self.LOGIN_URL_PARAMS,
265 schema=validate.Schema(validate.parse_html()),
266 )
267
268 if self.session.http.cookies.get("user_session"):
269 log.info("Logged in.")
270 self.save_cookies()
271 return
272
273 input_with_value = {}
274 for elem in root.xpath(".//form[@action]//input"):
275 if elem.attrib.get("value"):
276 input_with_value[elem.attrib.get("name")] = elem.attrib.get("value")
277 elif elem.attrib.get("id") == "oneTimePw":
278 maxlength = int(elem.attrib.get("maxlength"))
279 oneTimePw = self.input_ask("Enter the 6 digit number included in email")
280 if len(oneTimePw) > maxlength:
281 log.error("invalid user input")
282 return
283 input_with_value[elem.attrib.get("name")] = oneTimePw
284 else:
285 log.debug(f"unknown input: {elem.attrib.get('name')}")
286
287 root = self.session.http.post(
288 urljoin("https://account.nicovideo.jp", root.xpath("string(.//form[@action]/@action)")),
289 data=input_with_value,
290 schema=validate.Schema(validate.parse_html()),
291 )
292 log.debug(f"Cookies: {self.session.http.cookies.get_dict()}")
293 if self.session.http.cookies.get("user_session") is None:
294 error = root.xpath("string(//div[@class='formError']/div/text())")
295 log.warning(f"Login failed: {error or 'unknown reason'}")
296 else:
297 log.info("Logged in.")
298 self.save_cookies()
299
300
301 __plugin__ = NicoLive
302
[end of src/streamlink/plugins/nicolive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py
--- a/src/streamlink/plugins/nicolive.py
+++ b/src/streamlink/plugins/nicolive.py
@@ -2,6 +2,9 @@
$description Japanese live-streaming and video hosting social platform.
$url live.nicovideo.jp
$type live, vod
+$metadata id
+$metadata author
+$metadata title
$account Required by some streams
$notes Timeshift is supported
"""
@@ -11,7 +14,7 @@
from threading import Event
from urllib.parse import urljoin
-from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher
+from streamlink.plugin import Plugin, pluginargument, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.plugin.api.websocket import WebsocketClient
from streamlink.stream.hls import HLSStream, HLSStreamReader
@@ -181,7 +184,9 @@
self.niconico_web_login()
- wss_api_url = self.get_wss_api_url()
+ data = self.get_data()
+
+ wss_api_url = self.find_wss_api_url(data)
if not wss_api_url:
log.error(
"Failed to get wss_api_url. "
@@ -189,6 +194,8 @@
)
return
+ self.id, self.author, self.title = self.find_metadata(data)
+
self.wsclient = NicoLiveWsClient(self.session, wss_api_url)
self.wsclient.start()
@@ -213,26 +220,56 @@
return self.wsclient.hls_stream_url
- def get_wss_api_url(self):
- try:
- data = self.session.http.get(self.url, schema=validate.Schema(
- validate.parse_html(),
- validate.xml_find(".//script[@id='embedded-data'][@data-props]"),
- validate.get("data-props"),
- validate.parse_json(),
- {"site": {
+ def get_data(self):
+ return self.session.http.get(self.url, schema=validate.Schema(
+ validate.parse_html(),
+ validate.xml_find(".//script[@id='embedded-data'][@data-props]"),
+ validate.get("data-props"),
+ validate.parse_json(),
+ ))
+
+ @staticmethod
+ def find_metadata(data):
+ schema = validate.Schema(
+ {
+ "program": {
+ "nicoliveProgramId": str,
+ "supplier": {"name": str},
+ "title": str,
+ },
+ },
+ validate.get("program"),
+ validate.union_get(
+ "nicoliveProgramId",
+ ("supplier", "name"),
+ "title",
+ ),
+ )
+
+ return schema.validate(data)
+
+ @staticmethod
+ def find_wss_api_url(data):
+ schema = validate.Schema(
+ {
+ "site": {
"relive": {
- "webSocketUrl": validate.url(scheme="wss"),
+ "webSocketUrl": validate.any(
+ validate.url(scheme="wss"),
+ "",
+ ),
},
validate.optional("frontendId"): int,
- }},
- validate.get("site"),
- validate.union_get(("relive", "webSocketUrl"), "frontendId"),
- ))
- except PluginError:
+ },
+ },
+ validate.get("site"),
+ validate.union_get(("relive", "webSocketUrl"), "frontendId"),
+ )
+
+ wss_api_url, frontend_id = schema.validate(data)
+ if not wss_api_url:
return
- wss_api_url, frontend_id = data
if frontend_id is not None:
wss_api_url = update_qsd(wss_api_url, {"frontend_id": frontend_id})
|
{"golden_diff": "diff --git a/src/streamlink/plugins/nicolive.py b/src/streamlink/plugins/nicolive.py\n--- a/src/streamlink/plugins/nicolive.py\n+++ b/src/streamlink/plugins/nicolive.py\n@@ -2,6 +2,9 @@\n $description Japanese live-streaming and video hosting social platform.\n $url live.nicovideo.jp\n $type live, vod\n+$metadata id\n+$metadata author\n+$metadata title\n $account Required by some streams\n $notes Timeshift is supported\n \"\"\"\n@@ -11,7 +14,7 @@\n from threading import Event\n from urllib.parse import urljoin\n \n-from streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\n+from streamlink.plugin import Plugin, pluginargument, pluginmatcher\n from streamlink.plugin.api import useragents, validate\n from streamlink.plugin.api.websocket import WebsocketClient\n from streamlink.stream.hls import HLSStream, HLSStreamReader\n@@ -181,7 +184,9 @@\n \n self.niconico_web_login()\n \n- wss_api_url = self.get_wss_api_url()\n+ data = self.get_data()\n+\n+ wss_api_url = self.find_wss_api_url(data)\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n@@ -189,6 +194,8 @@\n )\n return\n \n+ self.id, self.author, self.title = self.find_metadata(data)\n+\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n \n@@ -213,26 +220,56 @@\n \n return self.wsclient.hls_stream_url\n \n- def get_wss_api_url(self):\n- try:\n- data = self.session.http.get(self.url, schema=validate.Schema(\n- validate.parse_html(),\n- validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n- validate.get(\"data-props\"),\n- validate.parse_json(),\n- {\"site\": {\n+ def get_data(self):\n+ return self.session.http.get(self.url, schema=validate.Schema(\n+ validate.parse_html(),\n+ validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n+ validate.get(\"data-props\"),\n+ validate.parse_json(),\n+ ))\n+\n+ @staticmethod\n+ def find_metadata(data):\n+ schema = validate.Schema(\n+ {\n+ \"program\": {\n+ \"nicoliveProgramId\": str,\n+ \"supplier\": {\"name\": str},\n+ \"title\": str,\n+ },\n+ },\n+ validate.get(\"program\"),\n+ validate.union_get(\n+ \"nicoliveProgramId\",\n+ (\"supplier\", \"name\"),\n+ \"title\",\n+ ),\n+ )\n+\n+ return schema.validate(data)\n+\n+ @staticmethod\n+ def find_wss_api_url(data):\n+ schema = validate.Schema(\n+ {\n+ \"site\": {\n \"relive\": {\n- \"webSocketUrl\": validate.url(scheme=\"wss\"),\n+ \"webSocketUrl\": validate.any(\n+ validate.url(scheme=\"wss\"),\n+ \"\",\n+ ),\n },\n validate.optional(\"frontendId\"): int,\n- }},\n- validate.get(\"site\"),\n- validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n- ))\n- except PluginError:\n+ },\n+ },\n+ validate.get(\"site\"),\n+ validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n+ )\n+\n+ wss_api_url, frontend_id = schema.validate(data)\n+ if not wss_api_url:\n return\n \n- wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n", "issue": "plugins.nicolive: plugin does not support stream metadata\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nlatest\n\n### Description\n\nCurrent behaviour: Author folder is not created nor is the title grabbed properly. All streams have titles on the site so not sure whats going wrong here.\r\n\r\nCurrent output: -20230825200547.ts (right in the recordings folder, no author folder, no title)\r\n\r\nExpected behaviour/output: Create Author folder and filename consisting of title-timestuff.ts\r\n\r\n\n\n### Debug log\n\n```text\nstreamlink --output \"~/recordings/{author}/{title}-{time:%Y%m%d%H%M%S}.ts\" https://live.nicovideo.jp/watch/co1992508 best\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Japanese live-streaming and video hosting social platform.\n$url live.nicovideo.jp\n$type live, vod\n$account Required by some streams\n$notes Timeshift is supported\n\"\"\"\n\nimport logging\nimport re\nfrom threading import Event\nfrom urllib.parse import urljoin\n\nfrom streamlink.plugin import Plugin, PluginError, pluginargument, pluginmatcher\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.websocket import WebsocketClient\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader\nfrom streamlink.utils.parse import parse_json\nfrom streamlink.utils.times import hours_minutes_seconds\nfrom streamlink.utils.url import update_qsd\n\n\nlog = logging.getLogger(__name__)\n\n\nclass NicoLiveWsClient(WebsocketClient):\n STREAM_OPENED_TIMEOUT = 6\n\n ready: Event\n opened: Event\n hls_stream_url: str\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.opened = Event()\n self.ready = Event()\n\n def on_open(self, wsapp):\n super().on_open(wsapp)\n self.send_playerversion()\n self.send_getpermit()\n\n def on_message(self, wsapp, data: str):\n log.debug(f\"Received: {data}\")\n message = parse_json(data)\n msgtype = message.get(\"type\")\n msgdata = message.get(\"data\", {})\n\n if msgtype == \"ping\":\n self.send_pong()\n\n elif msgtype == \"stream\" and msgdata.get(\"protocol\") == \"hls\" and msgdata.get(\"uri\"):\n self.hls_stream_url = msgdata.get(\"uri\")\n self.ready.set()\n if self.opened.wait(self.STREAM_OPENED_TIMEOUT):\n log.debug(\"Stream opened, keeping websocket connection alive\")\n else:\n log.info(\"Closing websocket connection\")\n self.close()\n\n elif msgtype == \"disconnect\":\n reason = msgdata.get(\"reason\", \"Unknown reason\")\n log.info(f\"Received disconnect message: {reason}\")\n self.close()\n\n def send_playerversion(self):\n self.send_json({\n \"type\": \"startWatching\",\n \"data\": {\n \"stream\": {\n \"quality\": \"abr\",\n \"protocol\": \"hls\",\n \"latency\": \"high\",\n \"chasePlay\": False,\n },\n \"room\": {\n \"protocol\": \"webSocket\",\n \"commentable\": True,\n },\n \"reconnect\": False,\n },\n })\n\n def send_getpermit(self):\n self.send_json({\n \"type\": \"getAkashic\",\n \"data\": {\n \"chasePlay\": False,\n },\n })\n\n def send_pong(self):\n self.send_json({\"type\": \"pong\"})\n self.send_json({\"type\": \"keepSeat\"})\n\n\nclass NicoLiveHLSStreamReader(HLSStreamReader):\n stream: \"NicoLiveHLSStream\"\n\n def open(self):\n self.stream.wsclient.opened.set()\n super().open()\n\n def close(self):\n super().close()\n self.stream.wsclient.close()\n\n\nclass NicoLiveHLSStream(HLSStream):\n __reader__ = NicoLiveHLSStreamReader\n wsclient: NicoLiveWsClient\n\n def set_wsclient(self, wsclient: NicoLiveWsClient):\n self.wsclient = wsclient\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?P<domain>live\\d*\\.nicovideo\\.jp)/watch/(lv|co)\\d+\",\n))\n@pluginargument(\n \"email\",\n sensitive=True,\n argument_name=\"niconico-email\",\n metavar=\"EMAIL\",\n help=\"The email or phone number associated with your Niconico account\",\n)\n@pluginargument(\n \"password\",\n sensitive=True,\n argument_name=\"niconico-password\",\n metavar=\"PASSWORD\",\n help=\"The password of your Niconico account\",\n)\n@pluginargument(\n \"user-session\",\n sensitive=True,\n argument_name=\"niconico-user-session\",\n metavar=\"VALUE\",\n help=\"\"\"\n Value of the user-session token.\n\n Can be used as an alternative to providing a password.\n \"\"\",\n)\n@pluginargument(\n \"purge-credentials\",\n argument_name=\"niconico-purge-credentials\",\n action=\"store_true\",\n help=\"Purge cached Niconico credentials to initiate a new session and reauthenticate.\",\n)\n@pluginargument(\n \"timeshift-offset\",\n type=hours_minutes_seconds,\n argument_name=\"niconico-timeshift-offset\",\n metavar=\"[[XX:]XX:]XX | [XXh][XXm][XXs]\",\n help=\"\"\"\n Amount of time to skip from the beginning of a stream.\n\n Default is 0.\n \"\"\",\n)\nclass NicoLive(Plugin):\n STREAM_READY_TIMEOUT = 6\n LOGIN_URL = \"https://account.nicovideo.jp/login/redirector\"\n LOGIN_URL_PARAMS = {\n \"show_button_twitter\": 1,\n \"show_button_facebook\": 1,\n \"next_url\": \"/\",\n }\n\n wsclient: NicoLiveWsClient\n\n def _get_streams(self):\n if self.get_option(\"purge_credentials\"):\n self.clear_cookies()\n log.info(\"All credentials were successfully removed\")\n\n self.session.http.headers.update({\n \"User-Agent\": useragents.CHROME,\n })\n\n self.niconico_web_login()\n\n wss_api_url = self.get_wss_api_url()\n if not wss_api_url:\n log.error(\n \"Failed to get wss_api_url. \"\n + \"Please check if the URL is correct, and make sure your account has access to the video.\",\n )\n return\n\n self.wsclient = NicoLiveWsClient(self.session, wss_api_url)\n self.wsclient.start()\n\n hls_stream_url = self._get_hls_stream_url()\n if not hls_stream_url:\n return\n\n offset = self.get_option(\"timeshift-offset\")\n if offset and \"timeshift\" in wss_api_url:\n hls_stream_url = update_qsd(hls_stream_url, {\"start\": offset})\n\n for quality, stream in NicoLiveHLSStream.parse_variant_playlist(self.session, hls_stream_url).items():\n stream.set_wsclient(self.wsclient)\n yield quality, stream\n\n def _get_hls_stream_url(self):\n log.debug(f\"Waiting for permit (for at most {self.STREAM_READY_TIMEOUT} seconds)...\")\n if not self.wsclient.ready.wait(self.STREAM_READY_TIMEOUT) or not self.wsclient.is_alive():\n log.error(\"Waiting for permit timed out.\")\n self.wsclient.close()\n return\n\n return self.wsclient.hls_stream_url\n\n def get_wss_api_url(self):\n try:\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//script[@id='embedded-data'][@data-props]\"),\n validate.get(\"data-props\"),\n validate.parse_json(),\n {\"site\": {\n \"relive\": {\n \"webSocketUrl\": validate.url(scheme=\"wss\"),\n },\n validate.optional(\"frontendId\"): int,\n }},\n validate.get(\"site\"),\n validate.union_get((\"relive\", \"webSocketUrl\"), \"frontendId\"),\n ))\n except PluginError:\n return\n\n wss_api_url, frontend_id = data\n if frontend_id is not None:\n wss_api_url = update_qsd(wss_api_url, {\"frontend_id\": frontend_id})\n\n return wss_api_url\n\n def niconico_web_login(self):\n user_session = self.get_option(\"user-session\")\n email = self.get_option(\"email\")\n password = self.get_option(\"password\")\n\n if user_session is not None:\n log.info(\"Logging in via provided user session cookie\")\n self.session.http.cookies.set(\n \"user_session\",\n user_session,\n path=\"/\",\n domain=\"nicovideo.jp\",\n )\n self.save_cookies()\n\n elif self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logging in via cached user session cookie\")\n\n elif email is not None and password is not None:\n log.info(\"Logging in via provided email and password\")\n root = self.session.http.post(\n self.LOGIN_URL,\n data={\"mail_tel\": email, \"password\": password},\n params=self.LOGIN_URL_PARAMS,\n schema=validate.Schema(validate.parse_html()),\n )\n\n if self.session.http.cookies.get(\"user_session\"):\n log.info(\"Logged in.\")\n self.save_cookies()\n return\n\n input_with_value = {}\n for elem in root.xpath(\".//form[@action]//input\"):\n if elem.attrib.get(\"value\"):\n input_with_value[elem.attrib.get(\"name\")] = elem.attrib.get(\"value\")\n elif elem.attrib.get(\"id\") == \"oneTimePw\":\n maxlength = int(elem.attrib.get(\"maxlength\"))\n oneTimePw = self.input_ask(\"Enter the 6 digit number included in email\")\n if len(oneTimePw) > maxlength:\n log.error(\"invalid user input\")\n return\n input_with_value[elem.attrib.get(\"name\")] = oneTimePw\n else:\n log.debug(f\"unknown input: {elem.attrib.get('name')}\")\n\n root = self.session.http.post(\n urljoin(\"https://account.nicovideo.jp\", root.xpath(\"string(.//form[@action]/@action)\")),\n data=input_with_value,\n schema=validate.Schema(validate.parse_html()),\n )\n log.debug(f\"Cookies: {self.session.http.cookies.get_dict()}\")\n if self.session.http.cookies.get(\"user_session\") is None:\n error = root.xpath(\"string(//div[@class='formError']/div/text())\")\n log.warning(f\"Login failed: {error or 'unknown reason'}\")\n else:\n log.info(\"Logged in.\")\n self.save_cookies()\n\n\n__plugin__ = NicoLive\n", "path": "src/streamlink/plugins/nicolive.py"}]}
| 3,801 | 854 |
gh_patches_debug_837
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-1088
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[RFC] CI for examples/visualization
It'll be better if we can validate that all the visualization examples run without failures with CI.
I guess https://github.com/computationalmodelling/nbval/ might be useful.
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3
4 import pkg_resources
5 from setuptools import find_packages
6 from setuptools import setup
7
8 from typing import Dict
9 from typing import List
10 from typing import Optional
11
12
13 def get_version() -> str:
14
15 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
16 with open(version_filepath) as f:
17 for line in f:
18 if line.startswith("__version__"):
19 return line.strip().split()[-1][1:-1]
20 assert False
21
22
23 def get_long_description() -> str:
24
25 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
26 with open(readme_filepath) as f:
27 return f.read()
28
29
30 def get_install_requires() -> List[str]:
31
32 return [
33 "alembic",
34 "cliff",
35 "cmaes>=0.3.2",
36 "colorlog",
37 "joblib",
38 "numpy",
39 "scipy!=1.4.0",
40 "sqlalchemy>=1.1.0",
41 "tqdm",
42 ]
43
44
45 def get_tests_require() -> List[str]:
46
47 return get_extras_require()["testing"]
48
49
50 def get_extras_require() -> Dict[str, List[str]]:
51
52 requirements = {
53 "checking": ["black", "hacking", "mypy",],
54 "codecov": ["codecov", "pytest-cov",],
55 "doctest": ["cma", "pandas", "plotly>=4.0.0", "scikit-learn>=0.19.0", "scikit-optimize",],
56 "document": ["sphinx", "sphinx_rtd_theme",],
57 "example": [
58 "catboost",
59 "chainer",
60 "lightgbm",
61 "mlflow",
62 "mpi4py",
63 "mxnet",
64 "pytorch-ignite",
65 "scikit-image",
66 "scikit-learn",
67 "torch",
68 "torchvision>=0.5.0",
69 "xgboost",
70 ]
71 + (["allennlp", "fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
72 + (
73 [
74 "dask[dataframe]",
75 "dask-ml",
76 "keras",
77 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
78 # https://github.com/optuna/optuna/issues/997.
79 "pytorch-lightning<0.7.0",
80 "tensorflow>=2.0.0",
81 "tensorflow-datasets",
82 ]
83 if sys.version_info[:2] < (3, 8)
84 else []
85 ),
86 "testing": [
87 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
88 # https://github.com/optuna/optuna/issues/1000.
89 "bokeh<2.0.0",
90 "chainer>=5.0.0",
91 "cma",
92 "fanova",
93 "lightgbm",
94 "mock",
95 "mpi4py",
96 "mxnet",
97 "pandas",
98 "plotly>=4.0.0",
99 "pytest",
100 "pytorch-ignite",
101 "scikit-learn>=0.19.0",
102 "scikit-optimize",
103 "torch",
104 "torchvision>=0.5.0",
105 "xgboost",
106 ]
107 + (["fastai<2"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
108 + (
109 [
110 "keras",
111 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
112 # https://github.com/optuna/optuna/issues/997.
113 "pytorch-lightning<0.7.0",
114 "tensorflow",
115 "tensorflow-datasets",
116 ]
117 if sys.version_info[:2] < (3, 8)
118 else []
119 ),
120 }
121
122 return requirements
123
124
125 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
126
127 for pkg in pkgs:
128 try:
129 return pkg_resources.get_distribution(pkg)
130 except pkg_resources.DistributionNotFound:
131 pass
132 return None
133
134
135 pfnopt_pkg = find_any_distribution(["pfnopt"])
136 if pfnopt_pkg is not None:
137 msg = (
138 "We detected that PFNOpt is installed in your environment.\n"
139 "PFNOpt has been renamed Optuna. Please uninstall the old\n"
140 "PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`)."
141 )
142 print(msg)
143 exit(1)
144
145 setup(
146 name="optuna",
147 version=get_version(),
148 description="A hyperparameter optimization framework",
149 long_description=get_long_description(),
150 long_description_content_type="text/markdown",
151 author="Takuya Akiba",
152 author_email="[email protected]",
153 url="https://optuna.org/",
154 packages=find_packages(),
155 package_data={
156 "optuna": [
157 "storages/rdb/alembic.ini",
158 "storages/rdb/alembic/*.*",
159 "storages/rdb/alembic/versions/*.*",
160 ]
161 },
162 install_requires=get_install_requires(),
163 tests_require=get_tests_require(),
164 extras_require=get_extras_require(),
165 entry_points={"console_scripts": ["optuna = optuna.cli:main"]},
166 )
167
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -61,6 +61,7 @@
"mlflow",
"mpi4py",
"mxnet",
+ "nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,7 @@\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n+ \"nbval\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n", "issue": "[RFC] CI for examples/visualization\nIt'll be better if we can validate that all the visualization examples run without failures with CI.\r\n\r\nI guess https://github.com/computationalmodelling/nbval/ might be useful.\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.3.2\",\n \"colorlog\",\n \"joblib\",\n \"numpy\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n \"checking\": [\"black\", \"hacking\", \"mypy\",],\n \"codecov\": [\"codecov\", \"pytest-cov\",],\n \"doctest\": [\"cma\", \"pandas\", \"plotly>=4.0.0\", \"scikit-learn>=0.19.0\", \"scikit-optimize\",],\n \"document\": [\"sphinx\", \"sphinx_rtd_theme\",],\n \"example\": [\n \"catboost\",\n \"chainer\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pytorch-ignite\",\n \"scikit-image\",\n \"scikit-learn\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"allennlp\", \"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"dask[dataframe]\",\n \"dask-ml\",\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow>=2.0.0\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fanova\",\n \"lightgbm\",\n \"mock\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"pytorch-ignite\",\n \"scikit-learn>=0.19.0\",\n \"scikit-optimize\",\n \"torch\",\n \"torchvision>=0.5.0\",\n \"xgboost\",\n ]\n + ([\"fastai<2\"] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + (\n [\n \"keras\",\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n \"pytorch-lightning<0.7.0\",\n \"tensorflow\",\n \"tensorflow-datasets\",\n ]\n if sys.version_info[:2] < (3, 8)\n else []\n ),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution([\"pfnopt\"])\nif pfnopt_pkg is not None:\n msg = (\n \"We detected that PFNOpt is installed in your environment.\\n\"\n \"PFNOpt has been renamed Optuna. Please uninstall the old\\n\"\n \"PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).\"\n )\n print(msg)\n exit(1)\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(),\n package_data={\n \"optuna\": [\n \"storages/rdb/alembic.ini\",\n \"storages/rdb/alembic/*.*\",\n \"storages/rdb/alembic/versions/*.*\",\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\"console_scripts\": [\"optuna = optuna.cli:main\"]},\n)\n", "path": "setup.py"}]}
| 2,174 | 74 |
gh_patches_debug_44014
|
rasdani/github-patches
|
git_diff
|
pypi__warehouse-627
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable Read Only Transactions (via factory wrapper)
If a route isn't going to modify data, then we can use a read only deferrable transaction so that we don't need to take SIRead locks and we skip some of the overhead of having a serializable transaction.
Closes #623
</issue>
<code>
[start of warehouse/db.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import functools
14
15 import alembic.config
16 import sqlalchemy
17 import venusian
18 import zope.sqlalchemy
19
20 from sqlalchemy import event
21 from sqlalchemy.dialects.postgresql import UUID
22 from sqlalchemy.ext.declarative import declarative_base
23 from sqlalchemy.orm import sessionmaker
24
25 from warehouse.utils.attrs import make_repr
26
27
28 __all__ = ["includeme", "metadata", "ModelBase"]
29
30
31 class ModelBase:
32
33 def __repr__(self):
34 self.__repr__ = make_repr(*self.__table__.columns.keys(), _self=self)
35 return self.__repr__()
36
37
38 # The Global metadata object.
39 metadata = sqlalchemy.MetaData()
40
41
42 # Base class for models using declarative syntax
43 ModelBase = declarative_base(cls=ModelBase, metadata=metadata)
44
45
46 class Model(ModelBase):
47
48 __abstract__ = True
49
50 id = sqlalchemy.Column(
51 UUID(as_uuid=True),
52 primary_key=True,
53 server_default=sqlalchemy.text("gen_random_uuid()"),
54 )
55
56 # Create our session class here, this will stay stateless as we'll bind the
57 # engine to each new state we create instead of binding it to the session
58 # class.
59 Session = sessionmaker()
60
61
62 def listens_for(target, identifier, *args, **kwargs):
63 def deco(wrapped):
64 def callback(scanner, _name, wrapped):
65 wrapped = functools.partial(wrapped, scanner.config)
66 event.listen(target, identifier, wrapped, *args, **kwargs)
67
68 venusian.attach(wrapped, callback)
69
70 return wrapped
71 return deco
72
73
74 def _configure_alembic(config):
75 alembic_cfg = alembic.config.Config()
76 alembic_cfg.set_main_option("script_location", "warehouse:migrations")
77 alembic_cfg.set_main_option(
78 "url", config.registry.settings["database.url"],
79 )
80 return alembic_cfg
81
82
83 def _create_session(request):
84 # Create our session
85 session = Session(bind=request.registry["sqlalchemy.engine"])
86
87 # Register only this particular session with zope.sqlalchemy
88 zope.sqlalchemy.register(session, transaction_manager=request.tm)
89
90 # Return our session now that it's created and registered
91 return session
92
93
94 def includeme(config):
95 # Add a directive to get an alembic configuration.
96 config.add_directive("alembic_config", _configure_alembic)
97
98 # Create our SQLAlchemy Engine.
99 config.registry["sqlalchemy.engine"] = sqlalchemy.create_engine(
100 config.registry.settings["database.url"],
101 isolation_level="SERIALIZABLE",
102 )
103
104 # Register our request.db property
105 config.add_request_method(_create_session, name="db", reify=True)
106
[end of warehouse/db.py]
[start of warehouse/routes.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13
14 def includeme(config):
15 config.add_route("index", "/")
16
17 # Accounts
18 config.add_route(
19 "accounts.profile",
20 "/user/{username}/",
21 factory="warehouse.accounts.models:UserFactory",
22 traverse="/{username}",
23 )
24 config.add_route("accounts.login", "/account/login/")
25 config.add_route("accounts.logout", "/account/logout/")
26
27 # Packaging
28 config.add_route(
29 "packaging.project",
30 "/project/{name}/",
31 factory="warehouse.packaging.models:ProjectFactory",
32 traverse="/{name}",
33 )
34 config.add_route(
35 "packaging.release",
36 "/project/{name}/{version}/",
37 factory="warehouse.packaging.models:ProjectFactory",
38 traverse="/{name}/{version}",
39 )
40 config.add_route("packaging.file", "/packages/{path:.*}")
41
42 # Legacy URLs
43 config.add_route("legacy.api.simple.index", "/simple/")
44 config.add_route(
45 "legacy.api.simple.detail",
46 "/simple/{name}/",
47 factory="warehouse.packaging.models:ProjectFactory",
48 traverse="/{name}/",
49 )
50 config.add_route(
51 "legacy.api.json.project",
52 "/pypi/{name}/json",
53 factory="warehouse.packaging.models:ProjectFactory",
54 traverse="/{name}",
55 )
56 config.add_route(
57 "legacy.api.json.release",
58 "/pypi/{name}/{version}/json",
59 factory="warehouse.packaging.models:ProjectFactory",
60 traverse="/{name}/{version}",
61 )
62
63 # Legacy Action URLs
64 config.add_pypi_action_route("legacy.api.pypi.file_upload", "file_upload")
65 config.add_pypi_action_route("legacy.api.pypi.submit", "submit")
66 config.add_pypi_action_route(
67 "legacy.api.pypi.submit_pkg_info",
68 "submit_pkg_info",
69 )
70 config.add_pypi_action_route("legacy.api.pypi.doc_upload", "doc_upload")
71 config.add_pypi_action_route("legacy.api.pypi.doap", "doap")
72
73 # Legacy XMLRPC
74 config.add_xmlrpc_endpoint(
75 "pypi",
76 pattern="/pypi",
77 header="Content-Type:text/xml",
78 )
79
80 # Legacy Documentation
81 config.add_route("legacy.docs", config.registry.settings["docs.url"])
82
83 # Legacy Redirects
84 config.add_redirect("/pypi/{name}/", "/project/{name}/")
85 config.add_redirect(
86 "/pypi/{name}/{version}/",
87 "/project/{name}/{version}/",
88 )
89
[end of warehouse/routes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/warehouse/db.py b/warehouse/db.py
--- a/warehouse/db.py
+++ b/warehouse/db.py
@@ -28,6 +28,24 @@
__all__ = ["includeme", "metadata", "ModelBase"]
+# We'll add a basic predicate that won't do anything except allow marking a
+# route as read only (or not).
+class ReadOnlyPredicate:
+
+ def __init__(self, val, config):
+ self.val = val
+
+ def text(self):
+ return "read_only = {!r}".format(self.val)
+
+ phash = text
+
+ # This predicate doesn't actually participate in the route selection
+ # process, so we'll just always return True.
+ def __call__(self, info, request):
+ return True
+
+
class ModelBase:
def __repr__(self):
@@ -84,6 +102,16 @@
# Create our session
session = Session(bind=request.registry["sqlalchemy.engine"])
+ # Set our transaction to read only if the route has been marked as read
+ # only.
+ for predicate in request.matched_route.predicates:
+ if isinstance(predicate, ReadOnlyPredicate) and predicate.val:
+ session.execute(
+ """ SET TRANSACTION
+ ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE
+ """
+ )
+
# Register only this particular session with zope.sqlalchemy
zope.sqlalchemy.register(session, transaction_manager=request.tm)
@@ -103,3 +131,6 @@
# Register our request.db property
config.add_request_method(_create_session, name="db", reify=True)
+
+ # Add a route predicate to mark a route as read only.
+ config.add_route_predicate("read_only", ReadOnlyPredicate)
diff --git a/warehouse/routes.py b/warehouse/routes.py
--- a/warehouse/routes.py
+++ b/warehouse/routes.py
@@ -12,7 +12,7 @@
def includeme(config):
- config.add_route("index", "/")
+ config.add_route("index", "/", read_only=True)
# Accounts
config.add_route(
@@ -20,6 +20,7 @@
"/user/{username}/",
factory="warehouse.accounts.models:UserFactory",
traverse="/{username}",
+ read_only=True,
)
config.add_route("accounts.login", "/account/login/")
config.add_route("accounts.logout", "/account/logout/")
@@ -30,34 +31,39 @@
"/project/{name}/",
factory="warehouse.packaging.models:ProjectFactory",
traverse="/{name}",
+ read_only=True,
)
config.add_route(
"packaging.release",
"/project/{name}/{version}/",
factory="warehouse.packaging.models:ProjectFactory",
traverse="/{name}/{version}",
+ read_only=True,
)
- config.add_route("packaging.file", "/packages/{path:.*}")
+ config.add_route("packaging.file", "/packages/{path:.*}", read_only=True)
# Legacy URLs
- config.add_route("legacy.api.simple.index", "/simple/")
+ config.add_route("legacy.api.simple.index", "/simple/", read_only=True)
config.add_route(
"legacy.api.simple.detail",
"/simple/{name}/",
factory="warehouse.packaging.models:ProjectFactory",
traverse="/{name}/",
+ read_only=True,
)
config.add_route(
"legacy.api.json.project",
"/pypi/{name}/json",
factory="warehouse.packaging.models:ProjectFactory",
traverse="/{name}",
+ read_only=True,
)
config.add_route(
"legacy.api.json.release",
"/pypi/{name}/{version}/json",
factory="warehouse.packaging.models:ProjectFactory",
traverse="/{name}/{version}",
+ read_only=True,
)
# Legacy Action URLs
@@ -75,6 +81,7 @@
"pypi",
pattern="/pypi",
header="Content-Type:text/xml",
+ read_only=True,
)
# Legacy Documentation
|
{"golden_diff": "diff --git a/warehouse/db.py b/warehouse/db.py\n--- a/warehouse/db.py\n+++ b/warehouse/db.py\n@@ -28,6 +28,24 @@\n __all__ = [\"includeme\", \"metadata\", \"ModelBase\"]\n \n \n+# We'll add a basic predicate that won't do anything except allow marking a\n+# route as read only (or not).\n+class ReadOnlyPredicate:\n+\n+ def __init__(self, val, config):\n+ self.val = val\n+\n+ def text(self):\n+ return \"read_only = {!r}\".format(self.val)\n+\n+ phash = text\n+\n+ # This predicate doesn't actually participate in the route selection\n+ # process, so we'll just always return True.\n+ def __call__(self, info, request):\n+ return True\n+\n+\n class ModelBase:\n \n def __repr__(self):\n@@ -84,6 +102,16 @@\n # Create our session\n session = Session(bind=request.registry[\"sqlalchemy.engine\"])\n \n+ # Set our transaction to read only if the route has been marked as read\n+ # only.\n+ for predicate in request.matched_route.predicates:\n+ if isinstance(predicate, ReadOnlyPredicate) and predicate.val:\n+ session.execute(\n+ \"\"\" SET TRANSACTION\n+ ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE\n+ \"\"\"\n+ )\n+\n # Register only this particular session with zope.sqlalchemy\n zope.sqlalchemy.register(session, transaction_manager=request.tm)\n \n@@ -103,3 +131,6 @@\n \n # Register our request.db property\n config.add_request_method(_create_session, name=\"db\", reify=True)\n+\n+ # Add a route predicate to mark a route as read only.\n+ config.add_route_predicate(\"read_only\", ReadOnlyPredicate)\ndiff --git a/warehouse/routes.py b/warehouse/routes.py\n--- a/warehouse/routes.py\n+++ b/warehouse/routes.py\n@@ -12,7 +12,7 @@\n \n \n def includeme(config):\n- config.add_route(\"index\", \"/\")\n+ config.add_route(\"index\", \"/\", read_only=True)\n \n # Accounts\n config.add_route(\n@@ -20,6 +20,7 @@\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n+ read_only=True,\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n@@ -30,34 +31,39 @@\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n+ read_only=True,\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n+ read_only=True,\n )\n- config.add_route(\"packaging.file\", \"/packages/{path:.*}\")\n+ config.add_route(\"packaging.file\", \"/packages/{path:.*}\", read_only=True)\n \n # Legacy URLs\n- config.add_route(\"legacy.api.simple.index\", \"/simple/\")\n+ config.add_route(\"legacy.api.simple.index\", \"/simple/\", read_only=True)\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n+ read_only=True,\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n+ read_only=True,\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n+ read_only=True,\n )\n \n # Legacy Action URLs\n@@ -75,6 +81,7 @@\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n+ read_only=True,\n )\n \n # Legacy Documentation\n", "issue": "Enable Read Only Transactions (via factory wrapper)\nIf a route isn't going to modify data, then we can use a read only deferrable transaction so that we don't need to take SIRead locks and we skip some of the overhead of having a serializable transaction.\n\nCloses #623\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\n\nimport alembic.config\nimport sqlalchemy\nimport venusian\nimport zope.sqlalchemy\n\nfrom sqlalchemy import event\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nfrom warehouse.utils.attrs import make_repr\n\n\n__all__ = [\"includeme\", \"metadata\", \"ModelBase\"]\n\n\nclass ModelBase:\n\n def __repr__(self):\n self.__repr__ = make_repr(*self.__table__.columns.keys(), _self=self)\n return self.__repr__()\n\n\n# The Global metadata object.\nmetadata = sqlalchemy.MetaData()\n\n\n# Base class for models using declarative syntax\nModelBase = declarative_base(cls=ModelBase, metadata=metadata)\n\n\nclass Model(ModelBase):\n\n __abstract__ = True\n\n id = sqlalchemy.Column(\n UUID(as_uuid=True),\n primary_key=True,\n server_default=sqlalchemy.text(\"gen_random_uuid()\"),\n )\n\n# Create our session class here, this will stay stateless as we'll bind the\n# engine to each new state we create instead of binding it to the session\n# class.\nSession = sessionmaker()\n\n\ndef listens_for(target, identifier, *args, **kwargs):\n def deco(wrapped):\n def callback(scanner, _name, wrapped):\n wrapped = functools.partial(wrapped, scanner.config)\n event.listen(target, identifier, wrapped, *args, **kwargs)\n\n venusian.attach(wrapped, callback)\n\n return wrapped\n return deco\n\n\ndef _configure_alembic(config):\n alembic_cfg = alembic.config.Config()\n alembic_cfg.set_main_option(\"script_location\", \"warehouse:migrations\")\n alembic_cfg.set_main_option(\n \"url\", config.registry.settings[\"database.url\"],\n )\n return alembic_cfg\n\n\ndef _create_session(request):\n # Create our session\n session = Session(bind=request.registry[\"sqlalchemy.engine\"])\n\n # Register only this particular session with zope.sqlalchemy\n zope.sqlalchemy.register(session, transaction_manager=request.tm)\n\n # Return our session now that it's created and registered\n return session\n\n\ndef includeme(config):\n # Add a directive to get an alembic configuration.\n config.add_directive(\"alembic_config\", _configure_alembic)\n\n # Create our SQLAlchemy Engine.\n config.registry[\"sqlalchemy.engine\"] = sqlalchemy.create_engine(\n config.registry.settings[\"database.url\"],\n isolation_level=\"SERIALIZABLE\",\n )\n\n # Register our request.db property\n config.add_request_method(_create_session, name=\"db\", reify=True)\n", "path": "warehouse/db.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef includeme(config):\n config.add_route(\"index\", \"/\")\n\n # Accounts\n config.add_route(\n \"accounts.profile\",\n \"/user/{username}/\",\n factory=\"warehouse.accounts.models:UserFactory\",\n traverse=\"/{username}\",\n )\n config.add_route(\"accounts.login\", \"/account/login/\")\n config.add_route(\"accounts.logout\", \"/account/logout/\")\n\n # Packaging\n config.add_route(\n \"packaging.project\",\n \"/project/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n )\n config.add_route(\n \"packaging.release\",\n \"/project/{name}/{version}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n )\n config.add_route(\"packaging.file\", \"/packages/{path:.*}\")\n\n # Legacy URLs\n config.add_route(\"legacy.api.simple.index\", \"/simple/\")\n config.add_route(\n \"legacy.api.simple.detail\",\n \"/simple/{name}/\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/\",\n )\n config.add_route(\n \"legacy.api.json.project\",\n \"/pypi/{name}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}\",\n )\n config.add_route(\n \"legacy.api.json.release\",\n \"/pypi/{name}/{version}/json\",\n factory=\"warehouse.packaging.models:ProjectFactory\",\n traverse=\"/{name}/{version}\",\n )\n\n # Legacy Action URLs\n config.add_pypi_action_route(\"legacy.api.pypi.file_upload\", \"file_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.submit\", \"submit\")\n config.add_pypi_action_route(\n \"legacy.api.pypi.submit_pkg_info\",\n \"submit_pkg_info\",\n )\n config.add_pypi_action_route(\"legacy.api.pypi.doc_upload\", \"doc_upload\")\n config.add_pypi_action_route(\"legacy.api.pypi.doap\", \"doap\")\n\n # Legacy XMLRPC\n config.add_xmlrpc_endpoint(\n \"pypi\",\n pattern=\"/pypi\",\n header=\"Content-Type:text/xml\",\n )\n\n # Legacy Documentation\n config.add_route(\"legacy.docs\", config.registry.settings[\"docs.url\"])\n\n # Legacy Redirects\n config.add_redirect(\"/pypi/{name}/\", \"/project/{name}/\")\n config.add_redirect(\n \"/pypi/{name}/{version}/\",\n \"/project/{name}/{version}/\",\n )\n", "path": "warehouse/routes.py"}]}
| 2,333 | 915 |
gh_patches_debug_10680
|
rasdani/github-patches
|
git_diff
|
piskvorky__gensim-902
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ZeroDivisionError: float division by zero in pagerank_weighted.py
Got ZeroDivisionError: float division by zero in **pagerank_weighted.py**
Input Text: BYAnonymousArabe
Error thrown at: `summarize(text_to_sum, word_count=50)`

I know word_count is 50 and I'm just giving a word. But ideally it should output that single word instead of throwing an error isn't it?
Please throw some light
</issue>
<code>
[start of gensim/summarization/summarizer.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
5
6 import logging
7 from gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank
8 from gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences
9 from gensim.summarization.commons import build_graph as _build_graph
10 from gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes
11 from gensim.summarization.bm25 import get_bm25_weights as _bm25_weights
12 from gensim.corpora import Dictionary
13 from math import log10 as _log10
14 from six.moves import xrange
15
16
17 INPUT_MIN_LENGTH = 10
18
19 WEIGHT_THRESHOLD = 1.e-3
20
21 logger = logging.getLogger(__name__)
22
23
24 def _set_graph_edge_weights(graph):
25 documents = graph.nodes()
26 weights = _bm25_weights(documents)
27
28 for i in xrange(len(documents)):
29 for j in xrange(len(documents)):
30 if i == j or weights[i][j] < WEIGHT_THRESHOLD:
31 continue
32
33 sentence_1 = documents[i]
34 sentence_2 = documents[j]
35
36 edge_1 = (sentence_1, sentence_2)
37 edge_2 = (sentence_2, sentence_1)
38
39 if not graph.has_edge(edge_1):
40 graph.add_edge(edge_1, weights[i][j])
41 if not graph.has_edge(edge_2):
42 graph.add_edge(edge_2, weights[j][i])
43
44 # Handles the case in which all similarities are zero.
45 # The resultant summary will consist of random sentences.
46 if all(graph.edge_weight(edge) == 0 for edge in graph.edges()):
47 _create_valid_graph(graph)
48
49
50 def _create_valid_graph(graph):
51 nodes = graph.nodes()
52
53 for i in xrange(len(nodes)):
54 for j in xrange(len(nodes)):
55 if i == j:
56 continue
57
58 edge = (nodes[i], nodes[j])
59
60 if graph.has_edge(edge):
61 graph.del_edge(edge)
62
63 graph.add_edge(edge, 1)
64
65
66 def _get_doc_length(doc):
67 return sum([item[1] for item in doc])
68
69
70 def _get_similarity(doc1, doc2, vec1, vec2):
71 numerator = vec1.dot(vec2.transpose()).toarray()[0][0]
72 length_1 = _get_doc_length(doc1)
73 length_2 = _get_doc_length(doc2)
74
75 denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0
76
77 return numerator / denominator if denominator != 0 else 0
78
79
80 def _build_corpus(sentences):
81 split_tokens = [sentence.token.split() for sentence in sentences]
82 dictionary = Dictionary(split_tokens)
83 return [dictionary.doc2bow(token) for token in split_tokens]
84
85
86 def _get_important_sentences(sentences, corpus, important_docs):
87 hashable_corpus = _build_hasheable_corpus(corpus)
88 sentences_by_corpus = dict(zip(hashable_corpus, sentences))
89 return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs]
90
91
92 def _get_sentences_with_word_count(sentences, word_count):
93 """ Given a list of sentences, returns a list of sentences with a
94 total word count similar to the word count provided."""
95 length = 0
96 selected_sentences = []
97
98 # Loops until the word count is reached.
99 for sentence in sentences:
100 words_in_sentence = len(sentence.text.split())
101
102 # Checks if the inclusion of the sentence gives a better approximation
103 # to the word parameter.
104 if abs(word_count - length - words_in_sentence) > abs(word_count - length):
105 return selected_sentences
106
107 selected_sentences.append(sentence)
108 length += words_in_sentence
109
110 return selected_sentences
111
112
113 def _extract_important_sentences(sentences, corpus, important_docs, word_count):
114 important_sentences = _get_important_sentences(sentences, corpus, important_docs)
115
116 # If no "word_count" option is provided, the number of sentences is
117 # reduced by the provided ratio. Else, the ratio is ignored.
118 return important_sentences if word_count is None else _get_sentences_with_word_count(important_sentences, word_count)
119
120
121 def _format_results(extracted_sentences, split):
122 if split:
123 return [sentence.text for sentence in extracted_sentences]
124 return "\n".join([sentence.text for sentence in extracted_sentences])
125
126
127 def _build_hasheable_corpus(corpus):
128 return [tuple(doc) for doc in corpus]
129
130
131 def summarize_corpus(corpus, ratio=0.2):
132 """
133 Returns a list of the most important documents of a corpus using a
134 variation of the TextRank algorithm.
135 The input must have at least INPUT_MIN_LENGTH (%d) documents for the
136 summary to make sense.
137
138 The length of the output can be specified using the ratio parameter,
139 which determines how many documents will be chosen for the summary
140 (defaults at 20%% of the number of documents of the corpus).
141
142 The most important documents are returned as a list sorted by the
143 document score, highest first.
144
145 """ % INPUT_MIN_LENGTH
146 hashable_corpus = _build_hasheable_corpus(corpus)
147
148 # If the corpus is empty, the function ends.
149 if len(corpus) == 0:
150 logger.warning("Input corpus is empty.")
151 return
152
153 # Warns the user if there are too few documents.
154 if len(corpus) < INPUT_MIN_LENGTH:
155 logger.warning("Input corpus is expected to have at least " + str(INPUT_MIN_LENGTH) + " documents.")
156
157 graph = _build_graph(hashable_corpus)
158 _set_graph_edge_weights(graph)
159 _remove_unreachable_nodes(graph)
160
161 pagerank_scores = _pagerank(graph)
162
163 hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True)
164
165 return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]]
166
167
168 def summarize(text, ratio=0.2, word_count=None, split=False):
169 """
170 Returns a summarized version of the given text using a variation of
171 the TextRank algorithm.
172 The input must be longer than INPUT_MIN_LENGTH sentences for the
173 summary to make sense and must be given as a string.
174
175 The output summary will consist of the most representative sentences
176 and will also be returned as a string, divided by newlines. If the
177 split parameter is set to True, a list of sentences will be
178 returned.
179
180 The length of the output can be specified using the ratio and
181 word_count parameters:
182 ratio should be a number between 0 and 1 that determines the
183 percentage of the number of sentences of the original text to be
184 chosen for the summary (defaults at 0.2).
185 word_count determines how many words will the output contain.
186 If both parameters are provided, the ratio will be ignored.
187 """
188 # Gets a list of processed sentences.
189 sentences = _clean_text_by_sentences(text)
190
191 # If no sentence could be identified, the function ends.
192 if len(sentences) == 0:
193 logger.warning("Input text is empty.")
194 return
195
196 # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError).
197 if len(sentences) == 1:
198 logger.warning("Summarization not performed since the document has only one sentence.")
199 return text
200
201 # Warns if the text is too short.
202 if len(sentences) < INPUT_MIN_LENGTH:
203 logger.warning("Input text is expected to have at least " + str(INPUT_MIN_LENGTH) + " sentences.")
204
205 corpus = _build_corpus(sentences)
206
207 most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)
208
209 # Extracts the most important sentences with the selected criterion.
210 extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count)
211
212 # Sorts the extracted sentences by apparition order in the original text.
213 extracted_sentences.sort(key=lambda s: s.index)
214
215 return _format_results(extracted_sentences, split)
216
[end of gensim/summarization/summarizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gensim/summarization/summarizer.py b/gensim/summarization/summarizer.py
--- a/gensim/summarization/summarizer.py
+++ b/gensim/summarization/summarizer.py
@@ -193,10 +193,9 @@
logger.warning("Input text is empty.")
return
- # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError).
+ # If only one sentence is present, the function raises an error (Avoids ZeroDivisionError).
if len(sentences) == 1:
- logger.warning("Summarization not performed since the document has only one sentence.")
- return text
+ raise ValueError("input must have more than one sentence")
# Warns if the text is too short.
if len(sentences) < INPUT_MIN_LENGTH:
|
{"golden_diff": "diff --git a/gensim/summarization/summarizer.py b/gensim/summarization/summarizer.py\n--- a/gensim/summarization/summarizer.py\n+++ b/gensim/summarization/summarizer.py\n@@ -193,10 +193,9 @@\n logger.warning(\"Input text is empty.\")\n return\n \n- # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). \n+ # If only one sentence is present, the function raises an error (Avoids ZeroDivisionError). \n if len(sentences) == 1:\n- logger.warning(\"Summarization not performed since the document has only one sentence.\")\n- return text\n+ raise ValueError(\"input must have more than one sentence\")\n \n # Warns if the text is too short.\n if len(sentences) < INPUT_MIN_LENGTH:\n", "issue": "ZeroDivisionError: float division by zero in pagerank_weighted.py\nGot ZeroDivisionError: float division by zero in **pagerank_weighted.py**\n\nInput Text: BYAnonymousArabe\n\nError thrown at: `summarize(text_to_sum, word_count=50)`\n\n\n\nI know word_count is 50 and I'm just giving a word. But ideally it should output that single word instead of throwing an error isn't it?\n\nPlease throw some light\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html\n\nimport logging\nfrom gensim.summarization.pagerank_weighted import pagerank_weighted as _pagerank\nfrom gensim.summarization.textcleaner import clean_text_by_sentences as _clean_text_by_sentences\nfrom gensim.summarization.commons import build_graph as _build_graph\nfrom gensim.summarization.commons import remove_unreachable_nodes as _remove_unreachable_nodes\nfrom gensim.summarization.bm25 import get_bm25_weights as _bm25_weights\nfrom gensim.corpora import Dictionary\nfrom math import log10 as _log10\nfrom six.moves import xrange\n\n\nINPUT_MIN_LENGTH = 10\n\nWEIGHT_THRESHOLD = 1.e-3\n\nlogger = logging.getLogger(__name__)\n\n\ndef _set_graph_edge_weights(graph):\n documents = graph.nodes()\n weights = _bm25_weights(documents)\n\n for i in xrange(len(documents)):\n for j in xrange(len(documents)):\n if i == j or weights[i][j] < WEIGHT_THRESHOLD:\n continue\n\n sentence_1 = documents[i]\n sentence_2 = documents[j]\n\n edge_1 = (sentence_1, sentence_2)\n edge_2 = (sentence_2, sentence_1)\n\n if not graph.has_edge(edge_1):\n graph.add_edge(edge_1, weights[i][j])\n if not graph.has_edge(edge_2):\n graph.add_edge(edge_2, weights[j][i])\n\n # Handles the case in which all similarities are zero.\n # The resultant summary will consist of random sentences.\n if all(graph.edge_weight(edge) == 0 for edge in graph.edges()):\n _create_valid_graph(graph)\n\n\ndef _create_valid_graph(graph):\n nodes = graph.nodes()\n\n for i in xrange(len(nodes)):\n for j in xrange(len(nodes)):\n if i == j:\n continue\n\n edge = (nodes[i], nodes[j])\n\n if graph.has_edge(edge):\n graph.del_edge(edge)\n\n graph.add_edge(edge, 1)\n\n\ndef _get_doc_length(doc):\n return sum([item[1] for item in doc])\n\n\ndef _get_similarity(doc1, doc2, vec1, vec2):\n numerator = vec1.dot(vec2.transpose()).toarray()[0][0]\n length_1 = _get_doc_length(doc1)\n length_2 = _get_doc_length(doc2)\n\n denominator = _log10(length_1) + _log10(length_2) if length_1 > 0 and length_2 > 0 else 0\n\n return numerator / denominator if denominator != 0 else 0\n\n\ndef _build_corpus(sentences):\n split_tokens = [sentence.token.split() for sentence in sentences]\n dictionary = Dictionary(split_tokens)\n return [dictionary.doc2bow(token) for token in split_tokens]\n\n\ndef _get_important_sentences(sentences, corpus, important_docs):\n hashable_corpus = _build_hasheable_corpus(corpus)\n sentences_by_corpus = dict(zip(hashable_corpus, sentences))\n return [sentences_by_corpus[tuple(important_doc)] for important_doc in important_docs]\n\n\ndef _get_sentences_with_word_count(sentences, word_count):\n \"\"\" Given a list of sentences, returns a list of sentences with a\n total word count similar to the word count provided.\"\"\"\n length = 0\n selected_sentences = []\n\n # Loops until the word count is reached.\n for sentence in sentences:\n words_in_sentence = len(sentence.text.split())\n\n # Checks if the inclusion of the sentence gives a better approximation\n # to the word parameter.\n if abs(word_count - length - words_in_sentence) > abs(word_count - length):\n return selected_sentences\n\n selected_sentences.append(sentence)\n length += words_in_sentence\n\n return selected_sentences\n\n\ndef _extract_important_sentences(sentences, corpus, important_docs, word_count):\n important_sentences = _get_important_sentences(sentences, corpus, important_docs)\n\n # If no \"word_count\" option is provided, the number of sentences is\n # reduced by the provided ratio. Else, the ratio is ignored.\n return important_sentences if word_count is None else _get_sentences_with_word_count(important_sentences, word_count)\n\n\ndef _format_results(extracted_sentences, split):\n if split:\n return [sentence.text for sentence in extracted_sentences]\n return \"\\n\".join([sentence.text for sentence in extracted_sentences])\n\n\ndef _build_hasheable_corpus(corpus):\n return [tuple(doc) for doc in corpus]\n\n\ndef summarize_corpus(corpus, ratio=0.2):\n \"\"\"\n Returns a list of the most important documents of a corpus using a\n variation of the TextRank algorithm.\n The input must have at least INPUT_MIN_LENGTH (%d) documents for the\n summary to make sense.\n\n The length of the output can be specified using the ratio parameter,\n which determines how many documents will be chosen for the summary\n (defaults at 20%% of the number of documents of the corpus).\n\n The most important documents are returned as a list sorted by the\n document score, highest first.\n\n \"\"\" % INPUT_MIN_LENGTH\n hashable_corpus = _build_hasheable_corpus(corpus)\n\n # If the corpus is empty, the function ends.\n if len(corpus) == 0:\n logger.warning(\"Input corpus is empty.\")\n return\n\n # Warns the user if there are too few documents.\n if len(corpus) < INPUT_MIN_LENGTH:\n logger.warning(\"Input corpus is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" documents.\")\n\n graph = _build_graph(hashable_corpus)\n _set_graph_edge_weights(graph)\n _remove_unreachable_nodes(graph)\n\n pagerank_scores = _pagerank(graph)\n\n hashable_corpus.sort(key=lambda doc: pagerank_scores.get(doc, 0), reverse=True)\n\n return [list(doc) for doc in hashable_corpus[:int(len(corpus) * ratio)]]\n\n\ndef summarize(text, ratio=0.2, word_count=None, split=False):\n \"\"\"\n Returns a summarized version of the given text using a variation of\n the TextRank algorithm.\n The input must be longer than INPUT_MIN_LENGTH sentences for the\n summary to make sense and must be given as a string.\n\n The output summary will consist of the most representative sentences\n and will also be returned as a string, divided by newlines. If the\n split parameter is set to True, a list of sentences will be\n returned.\n\n The length of the output can be specified using the ratio and\n word_count parameters:\n ratio should be a number between 0 and 1 that determines the\n percentage of the number of sentences of the original text to be\n chosen for the summary (defaults at 0.2).\n word_count determines how many words will the output contain.\n If both parameters are provided, the ratio will be ignored.\n \"\"\"\n # Gets a list of processed sentences.\n sentences = _clean_text_by_sentences(text)\n\n # If no sentence could be identified, the function ends.\n if len(sentences) == 0:\n logger.warning(\"Input text is empty.\")\n return\n\n # If only one sentence is present, the function return the input text (Avoids ZeroDivisionError). \n if len(sentences) == 1:\n logger.warning(\"Summarization not performed since the document has only one sentence.\")\n return text\n \n # Warns if the text is too short.\n if len(sentences) < INPUT_MIN_LENGTH:\n logger.warning(\"Input text is expected to have at least \" + str(INPUT_MIN_LENGTH) + \" sentences.\")\n\n corpus = _build_corpus(sentences)\n\n most_important_docs = summarize_corpus(corpus, ratio=ratio if word_count is None else 1)\n\n # Extracts the most important sentences with the selected criterion.\n extracted_sentences = _extract_important_sentences(sentences, corpus, most_important_docs, word_count)\n\n # Sorts the extracted sentences by apparition order in the original text.\n extracted_sentences.sort(key=lambda s: s.index)\n\n return _format_results(extracted_sentences, split)\n", "path": "gensim/summarization/summarizer.py"}]}
| 3,060 | 203 |
gh_patches_debug_21581
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cloud metadata collection is leaking sockets
When collecting the meta data information from cloud providers a socket connection is opened but not closed. This causes `ResourceWarnings` when running pytest. The issue seems to be the socket that is opened [here](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/utils/cloud.py#L138).
**To Reproduce**
Create a file `test.py` with the following content:
```python
import elasticapm
def test():
c = elasticapm.Client({"SERVICE_NAME": "dev"})
c.build_metadata()
assert False
```
Run the tests with:
```cmd
pytest --pythonwarnings once test.py
```
You should see warnings similar to this one (you have to run this on a cloud hosted VM):
```cmd
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52428), raddr=('169.254.169.254', 80)>
socket.create_connection(("169.254.169.254", 80), 0.1)
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52430), raddr=('169.254.169.254', 80)>
socket.create_connection(("169.254.169.254", 80), 0.1)
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52432), raddr=('169.254.169.254', 80)>
data = cloud.aws_metadata()
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52434), raddr=('169.254.169.254', 80)>
data = cloud.aws_metadata()
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52436), raddr=('169.254.169.254', 80)>
socket.create_connection(("169.254.169.254", 80), 0.1)
test.py::test
/tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52438), raddr=('169.254.169.254', 80)>
socket.create_connection(("169.254.169.254", 80), 0.1)
```
**Environment (please complete the following information)**
- OS: Mac and Linux
- Python version: 3.6.6
- APM Server version:
- Agent version: 5.10.0
</issue>
<code>
[start of elasticapm/utils/cloud.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import json
32 import os
33 import socket
34
35 import urllib3
36
37
38 def aws_metadata():
39 """
40 Fetch AWS metadata from the local metadata server. If metadata server is
41 not found, return an empty dictionary
42 """
43 http = urllib3.PoolManager()
44
45 try:
46 # This will throw an error if the metadata server isn't available,
47 # and will be quiet in the logs, unlike urllib3
48 socket.create_connection(("169.254.169.254", 80), 0.1)
49
50 try:
51 # This whole block is almost unnecessary. IMDSv1 will be supported
52 # indefinitely, so the only time this block is needed is if a
53 # security-conscious user has set the metadata service to require
54 # IMDSv2. Thus, the very expansive try:except: coverage.
55
56 # TODO: should we have a config option to completely disable IMDSv2 to reduce overhead?
57 ttl_header = {"X-aws-ec2-metadata-token-ttl-seconds": "300"}
58 token_url = "http://169.254.169.254/latest/api/token"
59 token_request = http.request("PUT", token_url, headers=ttl_header, timeout=1.0, retries=False)
60 token = token_request.data.decode("utf-8")
61 aws_token_header = {"X-aws-ec2-metadata-token": token} if token else {}
62 except Exception:
63 aws_token_header = {}
64 metadata = json.loads(
65 http.request(
66 "GET",
67 "http://169.254.169.254/latest/dynamic/instance-identity/document",
68 headers=aws_token_header,
69 timeout=1.0,
70 retries=False,
71 ).data.decode("utf-8")
72 )
73
74 return {
75 "account": {"id": metadata["accountId"]},
76 "instance": {"id": metadata["instanceId"]},
77 "availability_zone": metadata["availabilityZone"],
78 "machine": {"type": metadata["instanceType"]},
79 "provider": "aws",
80 "region": metadata["region"],
81 }
82
83 except Exception:
84 # Not on an AWS box
85 return {}
86
87
88 def gcp_metadata():
89 """
90 Fetch GCP metadata from the local metadata server. If metadata server is
91 not found, return an empty dictionary
92 """
93 headers = {"Metadata-Flavor": "Google"}
94 http = urllib3.PoolManager()
95
96 try:
97 # This will throw an error if the metadata server isn't available,
98 # and will be quiet in the logs, unlike urllib3
99 socket.getaddrinfo("metadata.google.internal", 80, 0, socket.SOCK_STREAM)
100
101 metadata = json.loads(
102 http.request(
103 "GET",
104 "http://metadata.google.internal/computeMetadata/v1/?recursive=true",
105 headers=headers,
106 timeout=1.0,
107 retries=False,
108 ).data.decode("utf-8")
109 )
110
111 availability_zone = os.path.split(metadata["instance"]["zone"])[1]
112
113 return {
114 "provider": "gcp",
115 "instance": {"id": str(metadata["instance"]["id"]), "name": metadata["instance"]["name"]},
116 "project": {"id": str(metadata["project"]["numericProjectId"]), "name": metadata["project"]["projectId"]},
117 "availability_zone": availability_zone,
118 "region": availability_zone.rsplit("-", 1)[0],
119 "machine": {"type": metadata["instance"]["machineType"].split("/")[-1]},
120 }
121
122 except Exception:
123 # Not on a gcp box
124 return {}
125
126
127 def azure_metadata():
128 """
129 Fetch Azure metadata from the local metadata server. If metadata server is
130 not found, return an empty dictionary
131 """
132 headers = {"Metadata": "true"}
133 http = urllib3.PoolManager()
134
135 try:
136 # This will throw an error if the metadata server isn't available,
137 # and will be quiet in the logs, unlike urllib3
138 socket.create_connection(("169.254.169.254", 80), 0.1)
139
140 # Can't use newest metadata service version, as it's not guaranteed
141 # to be available in all regions
142 metadata = json.loads(
143 http.request(
144 "GET",
145 "http://169.254.169.254/metadata/instance/compute?api-version=2019-08-15",
146 headers=headers,
147 timeout=1.0,
148 retries=False,
149 ).data.decode("utf-8")
150 )
151
152 ret = {
153 "account": {"id": metadata["subscriptionId"]},
154 "instance": {"id": metadata["vmId"], "name": metadata["name"]},
155 "project": {"name": metadata["resourceGroupName"]},
156 "availability_zone": metadata["zone"],
157 "machine": {"type": metadata["vmSize"]},
158 "provider": "azure",
159 "region": metadata["location"],
160 }
161
162 if not ret["availability_zone"]:
163 ret.pop("availability_zone")
164 return ret
165
166 except Exception:
167 # Not on an Azure box, maybe an azure app service?
168 return azure_app_service_metadata()
169
170
171 def azure_app_service_metadata():
172 ret = {"provider": "azure"}
173 website_owner_name = os.environ.get("WEBSITE_OWNER_NAME")
174 website_instance_id = os.environ.get("WEBSITE_INSTANCE_ID")
175 website_site_name = os.environ.get("WEBSITE_SITE_NAME")
176 website_resource_group = os.environ.get("WEBSITE_RESOURCE_GROUP")
177 if not all((website_owner_name, website_instance_id, website_site_name, website_resource_group)):
178 return {}
179 # Format of website_owner_name: {subscription id}+{app service plan resource group}-{region}webspace{.*}
180 if "+" not in website_owner_name:
181 return {}
182 try:
183 account_id, website_owner_name = website_owner_name.split("+")
184 ret["account"] = {"id": account_id}
185 region, _ = website_owner_name.split("webspace")
186 ret["region"] = region.rsplit("-", 1)[1]
187 except Exception:
188 return {}
189
190 ret["instance"] = {"id": website_instance_id, "name": website_site_name}
191 ret["project"] = {"name": website_resource_group}
192
193 return ret
194
[end of elasticapm/utils/cloud.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/utils/cloud.py b/elasticapm/utils/cloud.py
--- a/elasticapm/utils/cloud.py
+++ b/elasticapm/utils/cloud.py
@@ -45,7 +45,8 @@
try:
# This will throw an error if the metadata server isn't available,
# and will be quiet in the logs, unlike urllib3
- socket.create_connection(("169.254.169.254", 80), 0.1)
+ with socket.create_connection(("169.254.169.254", 80), 0.1):
+ pass
try:
# This whole block is almost unnecessary. IMDSv1 will be supported
@@ -135,7 +136,8 @@
try:
# This will throw an error if the metadata server isn't available,
# and will be quiet in the logs, unlike urllib3
- socket.create_connection(("169.254.169.254", 80), 0.1)
+ with socket.create_connection(("169.254.169.254", 80), 0.1):
+ pass
# Can't use newest metadata service version, as it's not guaranteed
# to be available in all regions
|
{"golden_diff": "diff --git a/elasticapm/utils/cloud.py b/elasticapm/utils/cloud.py\n--- a/elasticapm/utils/cloud.py\n+++ b/elasticapm/utils/cloud.py\n@@ -45,7 +45,8 @@\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n- socket.create_connection((\"169.254.169.254\", 80), 0.1)\n+ with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n+ pass\n \n try:\n # This whole block is almost unnecessary. IMDSv1 will be supported\n@@ -135,7 +136,8 @@\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n- socket.create_connection((\"169.254.169.254\", 80), 0.1)\n+ with socket.create_connection((\"169.254.169.254\", 80), 0.1):\n+ pass\n \n # Can't use newest metadata service version, as it's not guaranteed\n # to be available in all regions\n", "issue": "Cloud metadata collection is leaking sockets\nWhen collecting the meta data information from cloud providers a socket connection is opened but not closed. This causes `ResourceWarnings` when running pytest. The issue seems to be the socket that is opened [here](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/utils/cloud.py#L138).\r\n\r\n**To Reproduce**\r\nCreate a file `test.py` with the following content:\r\n\r\n```python\r\nimport elasticapm\r\n\r\ndef test():\r\n c = elasticapm.Client({\"SERVICE_NAME\": \"dev\"})\r\n c.build_metadata()\r\n assert False\r\n```\r\n\r\nRun the tests with:\r\n```cmd\r\npytest --pythonwarnings once test.py\r\n```\r\n\r\nYou should see warnings similar to this one (you have to run this on a cloud hosted VM):\r\n```cmd\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52428), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:48: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52430), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52432), raddr=('169.254.169.254', 80)>\r\n data = cloud.aws_metadata()\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/base.py:393: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52434), raddr=('169.254.169.254', 80)>\r\n data = cloud.aws_metadata()\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=12, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52436), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n\r\ntest.py::test\r\n /tmp/venv/lib/python3.6/site-packages/elasticapm/utils/cloud.py:138: ResourceWarning: unclosed <socket.socket fd=11, family=AddressFamily.AF_INET, type=2049, proto=6, laddr=('10.5.128.4', 52438), raddr=('169.254.169.254', 80)>\r\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\r\n```\r\n**Environment (please complete the following information)**\r\n- OS: Mac and Linux\r\n- Python version: 3.6.6\r\n- APM Server version:\r\n- Agent version: 5.10.0\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport json\nimport os\nimport socket\n\nimport urllib3\n\n\ndef aws_metadata():\n \"\"\"\n Fetch AWS metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\n\n try:\n # This whole block is almost unnecessary. IMDSv1 will be supported\n # indefinitely, so the only time this block is needed is if a\n # security-conscious user has set the metadata service to require\n # IMDSv2. Thus, the very expansive try:except: coverage.\n\n # TODO: should we have a config option to completely disable IMDSv2 to reduce overhead?\n ttl_header = {\"X-aws-ec2-metadata-token-ttl-seconds\": \"300\"}\n token_url = \"http://169.254.169.254/latest/api/token\"\n token_request = http.request(\"PUT\", token_url, headers=ttl_header, timeout=1.0, retries=False)\n token = token_request.data.decode(\"utf-8\")\n aws_token_header = {\"X-aws-ec2-metadata-token\": token} if token else {}\n except Exception:\n aws_token_header = {}\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/latest/dynamic/instance-identity/document\",\n headers=aws_token_header,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n return {\n \"account\": {\"id\": metadata[\"accountId\"]},\n \"instance\": {\"id\": metadata[\"instanceId\"]},\n \"availability_zone\": metadata[\"availabilityZone\"],\n \"machine\": {\"type\": metadata[\"instanceType\"]},\n \"provider\": \"aws\",\n \"region\": metadata[\"region\"],\n }\n\n except Exception:\n # Not on an AWS box\n return {}\n\n\ndef gcp_metadata():\n \"\"\"\n Fetch GCP metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata-Flavor\": \"Google\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.getaddrinfo(\"metadata.google.internal\", 80, 0, socket.SOCK_STREAM)\n\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://metadata.google.internal/computeMetadata/v1/?recursive=true\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n availability_zone = os.path.split(metadata[\"instance\"][\"zone\"])[1]\n\n return {\n \"provider\": \"gcp\",\n \"instance\": {\"id\": str(metadata[\"instance\"][\"id\"]), \"name\": metadata[\"instance\"][\"name\"]},\n \"project\": {\"id\": str(metadata[\"project\"][\"numericProjectId\"]), \"name\": metadata[\"project\"][\"projectId\"]},\n \"availability_zone\": availability_zone,\n \"region\": availability_zone.rsplit(\"-\", 1)[0],\n \"machine\": {\"type\": metadata[\"instance\"][\"machineType\"].split(\"/\")[-1]},\n }\n\n except Exception:\n # Not on a gcp box\n return {}\n\n\ndef azure_metadata():\n \"\"\"\n Fetch Azure metadata from the local metadata server. If metadata server is\n not found, return an empty dictionary\n \"\"\"\n headers = {\"Metadata\": \"true\"}\n http = urllib3.PoolManager()\n\n try:\n # This will throw an error if the metadata server isn't available,\n # and will be quiet in the logs, unlike urllib3\n socket.create_connection((\"169.254.169.254\", 80), 0.1)\n\n # Can't use newest metadata service version, as it's not guaranteed\n # to be available in all regions\n metadata = json.loads(\n http.request(\n \"GET\",\n \"http://169.254.169.254/metadata/instance/compute?api-version=2019-08-15\",\n headers=headers,\n timeout=1.0,\n retries=False,\n ).data.decode(\"utf-8\")\n )\n\n ret = {\n \"account\": {\"id\": metadata[\"subscriptionId\"]},\n \"instance\": {\"id\": metadata[\"vmId\"], \"name\": metadata[\"name\"]},\n \"project\": {\"name\": metadata[\"resourceGroupName\"]},\n \"availability_zone\": metadata[\"zone\"],\n \"machine\": {\"type\": metadata[\"vmSize\"]},\n \"provider\": \"azure\",\n \"region\": metadata[\"location\"],\n }\n\n if not ret[\"availability_zone\"]:\n ret.pop(\"availability_zone\")\n return ret\n\n except Exception:\n # Not on an Azure box, maybe an azure app service?\n return azure_app_service_metadata()\n\n\ndef azure_app_service_metadata():\n ret = {\"provider\": \"azure\"}\n website_owner_name = os.environ.get(\"WEBSITE_OWNER_NAME\")\n website_instance_id = os.environ.get(\"WEBSITE_INSTANCE_ID\")\n website_site_name = os.environ.get(\"WEBSITE_SITE_NAME\")\n website_resource_group = os.environ.get(\"WEBSITE_RESOURCE_GROUP\")\n if not all((website_owner_name, website_instance_id, website_site_name, website_resource_group)):\n return {}\n # Format of website_owner_name: {subscription id}+{app service plan resource group}-{region}webspace{.*}\n if \"+\" not in website_owner_name:\n return {}\n try:\n account_id, website_owner_name = website_owner_name.split(\"+\")\n ret[\"account\"] = {\"id\": account_id}\n region, _ = website_owner_name.split(\"webspace\")\n ret[\"region\"] = region.rsplit(\"-\", 1)[1]\n except Exception:\n return {}\n\n ret[\"instance\"] = {\"id\": website_instance_id, \"name\": website_site_name}\n ret[\"project\"] = {\"name\": website_resource_group}\n\n return ret\n", "path": "elasticapm/utils/cloud.py"}]}
| 3,717 | 309 |
gh_patches_debug_19733
|
rasdani/github-patches
|
git_diff
|
bids-standard__pybids-81
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fieldmaps - _magnitude2.nii[.gz] is optional
As per these lines:
https://github.com/INCF/pybids/blob/800d15053952991c9cd4a00cf0039288d489ca12/bids/grabbids/bids_layout.py#L143-L148
when a fieldmap type is identified as `'phasediff'` then pybids understands that there is always a `_magnitude2.nii[.gz]` file. However, this is maked as optional in the specification.
</issue>
<code>
[start of bids/grabbids/bids_layout.py]
1 import os
2 import json
3
4 from os.path import dirname
5 from os.path import abspath
6 from os.path import join as pathjoin
7
8 from .bids_validator import BIDSValidator
9 from grabbit import Layout
10
11 __all__ = ['BIDSLayout']
12
13
14 class BIDSLayout(Layout):
15
16 def __init__(self, path, config=None, validate=False,
17 index_associated=True, **kwargs):
18 self.validator = BIDSValidator(index_associated=index_associated)
19 self.validate = validate
20 if config is None:
21 root = dirname(abspath(__file__))
22 config = pathjoin(root, 'config', 'bids.json')
23 super(BIDSLayout, self).__init__(path, config,
24 dynamic_getters=True, **kwargs)
25
26 def _validate_file(self, f):
27 # If validate=True then checks files according to BIDS and
28 # returns False if file doesn't fit BIDS specification
29 if not self.validate:
30 return True
31 to_check = f.path
32 to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1]
33
34 sep = os.path.sep
35 if to_check[:len(sep)] != sep:
36 to_check = sep + to_check
37 else:
38 None
39
40 return self.validator.is_bids(to_check)
41
42 def _get_nearest_helper(self, path, extension, type=None, **kwargs):
43 """ Helper function for grabbit get_nearest """
44 path = abspath(path)
45
46 if path not in self.files:
47 raise ValueError("File '%s' could not be found in the current BIDS"
48 " project." % path)
49
50 if not type:
51 # Constrain the search to .json files with the same type as target
52 type = self.files[path].entities['type']
53
54 tmp = self.get_nearest(path, extensions=extension, all_=True,
55 type=type, ignore_strict_entities=['type'],
56 **kwargs)
57
58 if len(tmp):
59 return tmp
60 else:
61 return None
62
63 def get_metadata(self, path, **kwargs):
64 ''' Returns metadata found in JSON sidecars for the specified file.
65 Args:
66 path (str): Path to the file to get metadata for.
67 kwargs (dict): Optional keyword arguments to pass onto
68 get_nearest().
69 Notes:
70 A dictionary containing metadata extracted from all matching .json
71 files is returned. In cases where the same key is found in multiple
72 files, the values in files closer to the input filename will take
73 precedence, per the inheritance rules in the BIDS specification.
74 '''
75 potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)
76
77 if not isinstance(potentialJSONs, list):
78 return potentialJSONs
79
80 merged_param_dict = {}
81 for json_file_path in reversed(potentialJSONs):
82 if os.path.exists(json_file_path):
83 param_dict = json.load(open(json_file_path, "r",
84 encoding='utf-8'))
85 merged_param_dict.update(param_dict)
86
87 return merged_param_dict
88
89 def get_bvec(self, path, **kwargs):
90 tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]
91 if isinstance(tmp, list):
92 return tmp[0]
93 else:
94 return tmp
95
96 def get_bval(self, path, **kwargs):
97 tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]
98 if isinstance(tmp, list):
99 return tmp[0]
100 else:
101 return tmp
102
103 def get_events(self, path, **kwargs):
104 tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)
105 if isinstance(tmp, list):
106 return tmp[0]
107 else:
108 return tmp
109
110 def get_fieldmap(self, path, return_list=False):
111 fieldmaps = self._get_fieldmaps(path)
112
113 if return_list:
114 return fieldmaps
115 else:
116 if len(fieldmaps) == 1:
117 return fieldmaps[0]
118 elif len(fieldmaps) > 1:
119 raise ValueError("More than one fieldmap found, but the "
120 "'return_list' argument was set to False. "
121 "Either ensure that there is only one "
122 "fieldmap for this image, or set the "
123 "'return_list' argument to True and handle "
124 "the result as a list.")
125 else: # len(fieldmaps) == 0
126 return None
127
128 def _get_fieldmaps(self, path):
129 sub = os.path.split(path)[1].split("_")[0].split("sub-")[1]
130 fieldmap_set = []
131 type_ = '(phase1|phasediff|epi|fieldmap)'
132 for file in self.get(subject=sub, type=type_,
133 extensions=['nii.gz', 'nii']):
134 metadata = self.get_metadata(file.filename)
135 if metadata and "IntendedFor" in metadata.keys():
136 if isinstance(metadata["IntendedFor"], list):
137 intended_for = metadata["IntendedFor"]
138 else:
139 intended_for = [metadata["IntendedFor"]]
140 if any([path.endswith(suffix) for suffix in intended_for]):
141 cur_fieldmap = {}
142 if file.type == "phasediff":
143 cur_fieldmap = {"phasediff": file.filename,
144 "magnitude1": file.filename.replace(
145 "phasediff", "magnitude1"),
146 "magnitude2": file.filename.replace(
147 "phasediff", "magnitude2"),
148 "type": "phasediff"}
149 elif file.type == "phase1":
150 cur_fieldmap["phase1"] = file.filename
151 cur_fieldmap["magnitude1"] = \
152 file.filename.replace("phase1", "magnitude1")
153 cur_fieldmap["phase2"] = \
154 file.filename.replace("phase1", "phase2")
155 cur_fieldmap["magnitude2"] = \
156 file.filename.replace("phase1", "magnitude2")
157 cur_fieldmap["type"] = "phase"
158 elif file.type == "epi":
159 cur_fieldmap["epi"] = file.filename
160 cur_fieldmap["type"] = "epi"
161 elif file.type == "fieldmap":
162 cur_fieldmap["fieldmap"] = file.filename
163 cur_fieldmap["magnitude"] = \
164 file.filename.replace("fieldmap", "magnitude")
165 cur_fieldmap["type"] = "fieldmap"
166 fieldmap_set.append(cur_fieldmap)
167 return fieldmap_set
168
[end of bids/grabbids/bids_layout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py
--- a/bids/grabbids/bids_layout.py
+++ b/bids/grabbids/bids_layout.py
@@ -143,9 +143,11 @@
cur_fieldmap = {"phasediff": file.filename,
"magnitude1": file.filename.replace(
"phasediff", "magnitude1"),
- "magnitude2": file.filename.replace(
- "phasediff", "magnitude2"),
"type": "phasediff"}
+ magnitude2 = file.filename.replace(
+ "phasediff", "magnitude2")
+ if os.path.isfile(magnitude2):
+ cur_fieldmap['magnitude2'] = magnitude2
elif file.type == "phase1":
cur_fieldmap["phase1"] = file.filename
cur_fieldmap["magnitude1"] = \
|
{"golden_diff": "diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py\n--- a/bids/grabbids/bids_layout.py\n+++ b/bids/grabbids/bids_layout.py\n@@ -143,9 +143,11 @@\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n- \"magnitude2\": file.filename.replace(\n- \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n+ magnitude2 = file.filename.replace(\n+ \"phasediff\", \"magnitude2\")\n+ if os.path.isfile(magnitude2):\n+ cur_fieldmap['magnitude2'] = magnitude2\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n", "issue": "Fieldmaps - _magnitude2.nii[.gz] is optional\nAs per these lines:\r\n\r\nhttps://github.com/INCF/pybids/blob/800d15053952991c9cd4a00cf0039288d489ca12/bids/grabbids/bids_layout.py#L143-L148\r\n\r\nwhen a fieldmap type is identified as `'phasediff'` then pybids understands that there is always a `_magnitude2.nii[.gz]` file. However, this is maked as optional in the specification.\n", "before_files": [{"content": "import os\nimport json\n\nfrom os.path import dirname\nfrom os.path import abspath\nfrom os.path import join as pathjoin\n\nfrom .bids_validator import BIDSValidator\nfrom grabbit import Layout\n\n__all__ = ['BIDSLayout']\n\n\nclass BIDSLayout(Layout):\n\n def __init__(self, path, config=None, validate=False,\n index_associated=True, **kwargs):\n self.validator = BIDSValidator(index_associated=index_associated)\n self.validate = validate\n if config is None:\n root = dirname(abspath(__file__))\n config = pathjoin(root, 'config', 'bids.json')\n super(BIDSLayout, self).__init__(path, config,\n dynamic_getters=True, **kwargs)\n\n def _validate_file(self, f):\n # If validate=True then checks files according to BIDS and\n # returns False if file doesn't fit BIDS specification\n if not self.validate:\n return True\n to_check = f.path\n to_check = to_check.split(os.path.abspath(self.root), maxsplit=1)[1]\n\n sep = os.path.sep\n if to_check[:len(sep)] != sep:\n to_check = sep + to_check\n else:\n None\n\n return self.validator.is_bids(to_check)\n\n def _get_nearest_helper(self, path, extension, type=None, **kwargs):\n \"\"\" Helper function for grabbit get_nearest \"\"\"\n path = abspath(path)\n\n if path not in self.files:\n raise ValueError(\"File '%s' could not be found in the current BIDS\"\n \" project.\" % path)\n\n if not type:\n # Constrain the search to .json files with the same type as target\n type = self.files[path].entities['type']\n\n tmp = self.get_nearest(path, extensions=extension, all_=True,\n type=type, ignore_strict_entities=['type'],\n **kwargs)\n\n if len(tmp):\n return tmp\n else:\n return None\n\n def get_metadata(self, path, **kwargs):\n ''' Returns metadata found in JSON sidecars for the specified file.\n Args:\n path (str): Path to the file to get metadata for.\n kwargs (dict): Optional keyword arguments to pass onto\n get_nearest().\n Notes:\n A dictionary containing metadata extracted from all matching .json\n files is returned. In cases where the same key is found in multiple\n files, the values in files closer to the input filename will take\n precedence, per the inheritance rules in the BIDS specification.\n '''\n potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)\n\n if not isinstance(potentialJSONs, list):\n return potentialJSONs\n\n merged_param_dict = {}\n for json_file_path in reversed(potentialJSONs):\n if os.path.exists(json_file_path):\n param_dict = json.load(open(json_file_path, \"r\",\n encoding='utf-8'))\n merged_param_dict.update(param_dict)\n\n return merged_param_dict\n\n def get_bvec(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_bval(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_events(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_fieldmap(self, path, return_list=False):\n fieldmaps = self._get_fieldmaps(path)\n\n if return_list:\n return fieldmaps\n else:\n if len(fieldmaps) == 1:\n return fieldmaps[0]\n elif len(fieldmaps) > 1:\n raise ValueError(\"More than one fieldmap found, but the \"\n \"'return_list' argument was set to False. \"\n \"Either ensure that there is only one \"\n \"fieldmap for this image, or set the \"\n \"'return_list' argument to True and handle \"\n \"the result as a list.\")\n else: # len(fieldmaps) == 0\n return None\n\n def _get_fieldmaps(self, path):\n sub = os.path.split(path)[1].split(\"_\")[0].split(\"sub-\")[1]\n fieldmap_set = []\n type_ = '(phase1|phasediff|epi|fieldmap)'\n for file in self.get(subject=sub, type=type_,\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n if isinstance(metadata[\"IntendedFor\"], list):\n intended_for = metadata[\"IntendedFor\"]\n else:\n intended_for = [metadata[\"IntendedFor\"]]\n if any([path.endswith(suffix) for suffix in intended_for]):\n cur_fieldmap = {}\n if file.type == \"phasediff\":\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n \"magnitude2\": file.filename.replace(\n \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n file.filename.replace(\"phase1\", \"magnitude1\")\n cur_fieldmap[\"phase2\"] = \\\n file.filename.replace(\"phase1\", \"phase2\")\n cur_fieldmap[\"magnitude2\"] = \\\n file.filename.replace(\"phase1\", \"magnitude2\")\n cur_fieldmap[\"type\"] = \"phase\"\n elif file.type == \"epi\":\n cur_fieldmap[\"epi\"] = file.filename\n cur_fieldmap[\"type\"] = \"epi\"\n elif file.type == \"fieldmap\":\n cur_fieldmap[\"fieldmap\"] = file.filename\n cur_fieldmap[\"magnitude\"] = \\\n file.filename.replace(\"fieldmap\", \"magnitude\")\n cur_fieldmap[\"type\"] = \"fieldmap\"\n fieldmap_set.append(cur_fieldmap)\n return fieldmap_set\n", "path": "bids/grabbids/bids_layout.py"}]}
| 2,481 | 199 |
gh_patches_debug_2789
|
rasdani/github-patches
|
git_diff
|
ivy-llc__ivy-18204
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
meshgrid
</issue>
<code>
[start of ivy/functional/frontends/paddle/tensor/creation.py]
1 # global
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes
4 from .tensor import Tensor
5 from ivy.functional.frontends.paddle.func_wrapper import (
6 to_ivy_arrays_and_back,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):
12 array = ivy.array(data, dtype=dtype, device=place)
13 return Tensor(array, dtype=dtype, place=place)
14
15
16 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
17 @to_ivy_arrays_and_back
18 def ones(shape, /, *, dtype=None, name=None):
19 dtype = "float32" if dtype is None else dtype
20 return ivy.ones(shape, dtype=dtype)
21
22
23 @with_unsupported_dtypes(
24 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
25 )
26 @to_ivy_arrays_and_back
27 def ones_like(x, /, *, dtype=None, name=None):
28 dtype = x.dtype if dtype is None else dtype
29 return ivy.ones_like(x, dtype=dtype)
30
31
32 @with_unsupported_dtypes({"2.5.0 and below": "int8"}, "paddle")
33 @to_ivy_arrays_and_back
34 def zeros(shape, /, *, dtype=None, name=None):
35 dtype = "float32" if dtype is None else dtype
36 return ivy.zeros(shape, dtype=dtype)
37
38
39 @with_unsupported_dtypes(
40 {"2.5.0 and below": ("uint8", "int8", "complex64", "complex128")}, "paddle"
41 )
42 @to_ivy_arrays_and_back
43 def zeros_like(x, /, *, dtype=None, name=None):
44 dtype = x.dtype if dtype is None else dtype
45 return ivy.zeros_like(x, dtype=dtype)
46
47
48 @to_ivy_arrays_and_back
49 def full(shape, fill_value, /, *, dtype=None, name=None):
50 dtype = "float32" if dtype is None else dtype
51 return ivy.full(shape, fill_value, dtype=dtype)
52
53
54 @to_ivy_arrays_and_back
55 def full_like(x, fill_value, /, *, dtype=None, name=None):
56 dtype = x.dtype if dtype is None else dtype
57 return ivy.full_like(x, fill_value, dtype=dtype)
58
59
60 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle")
61 @to_ivy_arrays_and_back
62 def arange(start, end=None, step=1, dtype=None, name=None):
63 return ivy.arange(start, end, step=step, dtype=dtype)
64
65
66 @to_ivy_arrays_and_back
67 def empty(shape, dtype=None):
68 return ivy.empty(shape=shape, dtype=dtype)
69
70
71 @to_ivy_arrays_and_back
72 def eye(num_rows, num_columns=None, dtype=None, name=None):
73 return ivy.eye(num_rows, num_columns, dtype=dtype)
74
75
76 @to_ivy_arrays_and_back
77 def empty_like(x, dtype=None, name=None):
78 return ivy.empty_like(x, dtype=dtype)
79
80
81 @with_unsupported_dtypes(
82 {
83 "2.5.0 and below": (
84 "uint8",
85 "int8",
86 "int16",
87 "float16",
88 "complex64",
89 "complex128",
90 "bool",
91 )
92 },
93 "paddle",
94 )
95 @to_ivy_arrays_and_back
96 def tril(x, diagonal=0, name=None):
97 return ivy.tril(x, k=diagonal)
98
99
100 @with_unsupported_dtypes(
101 {
102 "2.5.0 and below": (
103 "uint8",
104 "int8",
105 "int16",
106 "float16",
107 "complex64",
108 "complex128",
109 "bool",
110 )
111 },
112 "paddle",
113 )
114 @to_ivy_arrays_and_back
115 def triu(x, diagonal=0, name=None):
116 return ivy.triu(x, k=diagonal)
117
118
119 @with_supported_dtypes(
120 {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
121 )
122 @to_ivy_arrays_and_back
123 def diagflat(x, offset=0, name=None):
124 arr = ivy.diagflat(x, offset=offset)
125 return arr
126
[end of ivy/functional/frontends/paddle/tensor/creation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py
--- a/ivy/functional/frontends/paddle/tensor/creation.py
+++ b/ivy/functional/frontends/paddle/tensor/creation.py
@@ -123,3 +123,11 @@
def diagflat(x, offset=0, name=None):
arr = ivy.diagflat(x, offset=offset)
return arr
+
+
+@with_supported_dtypes(
+ {"2.5.0 and below": ("float32", "float64", "int32", "int64")}, "paddle"
+)
+@to_ivy_arrays_and_back
+def meshgrid(*args, **kwargs):
+ return ivy.meshgrid(*args, indexing="ij")
|
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/creation.py b/ivy/functional/frontends/paddle/tensor/creation.py\n--- a/ivy/functional/frontends/paddle/tensor/creation.py\n+++ b/ivy/functional/frontends/paddle/tensor/creation.py\n@@ -123,3 +123,11 @@\n def diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n+\r\n+\r\n+@with_supported_dtypes(\r\n+ {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n+)\r\n+@to_ivy_arrays_and_back\r\n+def meshgrid(*args, **kwargs):\r\n+ return ivy.meshgrid(*args, indexing=\"ij\")\n", "issue": "meshgrid\n\n", "before_files": [{"content": "# global\r\nimport ivy\r\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\r\nfrom .tensor import Tensor\r\nfrom ivy.functional.frontends.paddle.func_wrapper import (\r\n to_ivy_arrays_and_back,\r\n)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef to_tensor(data, /, *, dtype=None, place=None, stop_gradient=True):\r\n array = ivy.array(data, dtype=dtype, device=place)\r\n return Tensor(array, dtype=dtype, place=place)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef ones(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.ones(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef ones_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.ones_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": \"int8\"}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef zeros(shape, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.zeros(shape, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\"2.5.0 and below\": (\"uint8\", \"int8\", \"complex64\", \"complex128\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef zeros_like(x, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.zeros_like(x, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full(shape, fill_value, /, *, dtype=None, name=None):\r\n dtype = \"float32\" if dtype is None else dtype\r\n return ivy.full(shape, fill_value, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef full_like(x, fill_value, /, *, dtype=None, name=None):\r\n dtype = x.dtype if dtype is None else dtype\r\n return ivy.full_like(x, fill_value, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\r\n@to_ivy_arrays_and_back\r\ndef arange(start, end=None, step=1, dtype=None, name=None):\r\n return ivy.arange(start, end, step=step, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty(shape, dtype=None):\r\n return ivy.empty(shape=shape, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef eye(num_rows, num_columns=None, dtype=None, name=None):\r\n return ivy.eye(num_rows, num_columns, dtype=dtype)\r\n\r\n\r\n@to_ivy_arrays_and_back\r\ndef empty_like(x, dtype=None, name=None):\r\n return ivy.empty_like(x, dtype=dtype)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef tril(x, diagonal=0, name=None):\r\n return ivy.tril(x, k=diagonal)\r\n\r\n\r\n@with_unsupported_dtypes(\r\n {\r\n \"2.5.0 and below\": (\r\n \"uint8\",\r\n \"int8\",\r\n \"int16\",\r\n \"float16\",\r\n \"complex64\",\r\n \"complex128\",\r\n \"bool\",\r\n )\r\n },\r\n \"paddle\",\r\n)\r\n@to_ivy_arrays_and_back\r\ndef triu(x, diagonal=0, name=None):\r\n return ivy.triu(x, k=diagonal)\r\n\r\n\r\n@with_supported_dtypes(\r\n {\"2.5.0 and below\": (\"float32\", \"float64\", \"int32\", \"int64\")}, \"paddle\"\r\n)\r\n@to_ivy_arrays_and_back\r\ndef diagflat(x, offset=0, name=None):\r\n arr = ivy.diagflat(x, offset=offset)\r\n return arr\r\n", "path": "ivy/functional/frontends/paddle/tensor/creation.py"}]}
| 1,833 | 190 |
gh_patches_debug_2003
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-9604
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release google-cloud-storage
Hi @tseaver, could you help cut a release for google-cloud-storage?
cc: @JesseLovelace
</issue>
<code>
[start of storage/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-storage"
24 description = "Google Cloud Storage API client library"
25 version = "1.21.0"
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-auth >= 1.2.0",
33 "google-cloud-core >= 1.0.3, < 2.0dev",
34 "google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev",
35 ]
36 extras = {}
37
38
39 # Setup boilerplate below this line.
40
41 package_root = os.path.abspath(os.path.dirname(__file__))
42
43 readme_filename = os.path.join(package_root, "README.rst")
44 with io.open(readme_filename, encoding="utf-8") as readme_file:
45 readme = readme_file.read()
46
47 # Only include packages under the 'google' namespace. Do not include tests,
48 # benchmarks, etc.
49 packages = [
50 package for package in setuptools.find_packages() if package.startswith("google")
51 ]
52
53 # Determine which namespaces are needed.
54 namespaces = ["google"]
55 if "google.cloud" in packages:
56 namespaces.append("google.cloud")
57
58
59 setuptools.setup(
60 name=name,
61 version=version,
62 description=description,
63 long_description=readme,
64 author="Google LLC",
65 author_email="[email protected]",
66 license="Apache 2.0",
67 url="https://github.com/GoogleCloudPlatform/google-cloud-python",
68 classifiers=[
69 release_status,
70 "Intended Audience :: Developers",
71 "License :: OSI Approved :: Apache Software License",
72 "Programming Language :: Python",
73 "Programming Language :: Python :: 2",
74 "Programming Language :: Python :: 2.7",
75 "Programming Language :: Python :: 3",
76 "Programming Language :: Python :: 3.5",
77 "Programming Language :: Python :: 3.6",
78 "Programming Language :: Python :: 3.7",
79 "Operating System :: OS Independent",
80 "Topic :: Internet",
81 ],
82 platforms="Posix; MacOS X; Windows",
83 packages=packages,
84 namespace_packages=namespaces,
85 install_requires=dependencies,
86 extras_require=extras,
87 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
88 include_package_data=True,
89 zip_safe=False,
90 )
91
[end of storage/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/storage/setup.py b/storage/setup.py
--- a/storage/setup.py
+++ b/storage/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-storage"
description = "Google Cloud Storage API client library"
-version = "1.21.0"
+version = "1.22.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
|
{"golden_diff": "diff --git a/storage/setup.py b/storage/setup.py\n--- a/storage/setup.py\n+++ b/storage/setup.py\n@@ -22,7 +22,7 @@\n \n name = \"google-cloud-storage\"\n description = \"Google Cloud Storage API client library\"\n-version = \"1.21.0\"\n+version = \"1.22.0\"\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Release google-cloud-storage \nHi @tseaver, could you help cut a release for google-cloud-storage?\r\n\r\ncc: @JesseLovelace \n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-storage\"\ndescription = \"Google Cloud Storage API client library\"\nversion = \"1.21.0\"\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-auth >= 1.2.0\",\n \"google-cloud-core >= 1.0.3, < 2.0dev\",\n \"google-resumable-media >= 0.3.1, != 0.4.0, < 0.5dev\",\n]\nextras = {}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages() if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/GoogleCloudPlatform/google-cloud-python\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "storage/setup.py"}]}
| 1,423 | 101 |
gh_patches_debug_318
|
rasdani/github-patches
|
git_diff
|
TencentBlueKing__bk-user-164
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
部门查询接口 ?lookup_field=name,当部门名称中含有 "." 时返回 404
**用文字描述你遇到的问题**
请用简练的文字描述你遇到的问题,问题描述的清晰程度决定了问题被解决的效率。
**重现方法**
1. 创建一个目录,名字包含点,如【广东省.深圳市】
2. 使用api查询, http:://{host:port}/api/v2/departments/广东省.深圳市/?lookup_field=name
查询结果是404
请描述问题重现的方法,如果不方便描述,可以通过截图或者视频辅助。
**预期行为**
预期的正常行为
**版本**
- 提供用户管理的具体版本号
- 是否是企业版问题?
**如果是 SaaS 页面问题,请提供使用的操作系统和浏览器信息**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**额外信息**
任何你觉得有助于问题解决的内容
</issue>
<code>
[start of src/api/bkuser_core/departments/urls.py]
1 # -*- coding: utf-8 -*-
2 """
3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at http://opensource.org/licenses/MIT
7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 specific language governing permissions and limitations under the License.
10 """
11 from bkuser_core.common.constants import LOOKUP_FIELD_NAME
12 from django.conf.urls import url
13
14 from . import views
15
16 PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
17
18 urlpatterns = [
19 url(
20 r"^api/v2/departments/$",
21 views.DepartmentViewSet.as_view(
22 {
23 "get": "list",
24 "post": "create",
25 }
26 ),
27 name="departments",
28 ),
29 url(
30 r"^api/v2/departments/%s/$" % PVAR_DEPARTMENT_ID,
31 views.DepartmentViewSet.as_view(
32 {
33 "get": "retrieve",
34 "post": "update",
35 "delete": "destroy",
36 "patch": "partial_update",
37 }
38 ),
39 name="departments.action",
40 ),
41 url(
42 r"^api/v2/departments/%s/restoration/$" % PVAR_DEPARTMENT_ID,
43 views.DepartmentViewSet.as_view(
44 {
45 "post": "restoration",
46 }
47 ),
48 name="departments.restoration",
49 ),
50 url(
51 r"^api/v2/departments/%s/ancestors/$" % PVAR_DEPARTMENT_ID,
52 views.DepartmentViewSet.as_view(
53 {
54 "get": "get_ancestor",
55 }
56 ),
57 name="departments.ancestors",
58 ),
59 url(
60 r"^api/v2/departments/%s/children/$" % PVAR_DEPARTMENT_ID,
61 views.DepartmentViewSet.as_view(
62 {
63 "get": "get_children",
64 }
65 ),
66 name="departments.children",
67 ),
68 url(
69 r"^api/v2/departments/%s/profiles/$" % PVAR_DEPARTMENT_ID,
70 views.DepartmentViewSet.as_view({"get": "get_profiles", "post": "add_profiles"}),
71 name="departments.profiles",
72 ),
73 #########
74 # Batch #
75 #########
76 url(
77 r"^api/v2/batch/departments/profiles/$",
78 views.BatchDepartmentsViewSet.as_view(
79 {
80 "get": "multiple_retrieve_profiles",
81 }
82 ),
83 name="department.batch",
84 ),
85 ########
86 # Edge #
87 ########
88 url(
89 r"^api/v2/edges/department_profile/$",
90 views.DepartmentProfileEdgeViewSet.as_view({"get": "list"}),
91 name="edge.department_profile",
92 ),
93 #############
94 # shortcuts #
95 #############
96 url(
97 r"^api/v2/shortcuts/departments/tops/$",
98 views.DepartmentViewSet.as_view({"get": "list_tops"}),
99 name="shortcuts.departments.list.tops",
100 ),
101 ]
102
[end of src/api/bkuser_core/departments/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py
--- a/src/api/bkuser_core/departments/urls.py
+++ b/src/api/bkuser_core/departments/urls.py
@@ -13,7 +13,7 @@
from . import views
-PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-]+)" % LOOKUP_FIELD_NAME
+PVAR_DEPARTMENT_ID = r"(?P<%s>[\w\-\.]+)" % LOOKUP_FIELD_NAME
urlpatterns = [
url(
|
{"golden_diff": "diff --git a/src/api/bkuser_core/departments/urls.py b/src/api/bkuser_core/departments/urls.py\n--- a/src/api/bkuser_core/departments/urls.py\n+++ b/src/api/bkuser_core/departments/urls.py\n@@ -13,7 +13,7 @@\n \n from . import views\n \n-PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n+PVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-\\.]+)\" % LOOKUP_FIELD_NAME\n \n urlpatterns = [\n url(\n", "issue": "\u90e8\u95e8\u67e5\u8be2\u63a5\u53e3 ?lookup_field=name\uff0c\u5f53\u90e8\u95e8\u540d\u79f0\u4e2d\u542b\u6709 \".\" \u65f6\u8fd4\u56de 404\n**\u7528\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898**\r\n\r\n\u8bf7\u7528\u7b80\u7ec3\u7684\u6587\u5b57\u63cf\u8ff0\u4f60\u9047\u5230\u7684\u95ee\u9898\uff0c\u95ee\u9898\u63cf\u8ff0\u7684\u6e05\u6670\u7a0b\u5ea6\u51b3\u5b9a\u4e86\u95ee\u9898\u88ab\u89e3\u51b3\u7684\u6548\u7387\u3002\r\n\r\n**\u91cd\u73b0\u65b9\u6cd5**\r\n1. \u521b\u5efa\u4e00\u4e2a\u76ee\u5f55\uff0c\u540d\u5b57\u5305\u542b\u70b9\uff0c\u5982\u3010\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02\u3011\r\n2. \u4f7f\u7528api\u67e5\u8be2\uff0c http:://{host:port}/api/v2/departments/\u5e7f\u4e1c\u7701.\u6df1\u5733\u5e02/?lookup_field=name\r\n\r\n\u67e5\u8be2\u7ed3\u679c\u662f404\r\n\r\n\u8bf7\u63cf\u8ff0\u95ee\u9898\u91cd\u73b0\u7684\u65b9\u6cd5\uff0c\u5982\u679c\u4e0d\u65b9\u4fbf\u63cf\u8ff0\uff0c\u53ef\u4ee5\u901a\u8fc7\u622a\u56fe\u6216\u8005\u89c6\u9891\u8f85\u52a9\u3002\r\n\r\n**\u9884\u671f\u884c\u4e3a**\r\n\r\n\u9884\u671f\u7684\u6b63\u5e38\u884c\u4e3a\r\n\r\n**\u7248\u672c**\r\n- \u63d0\u4f9b\u7528\u6237\u7ba1\u7406\u7684\u5177\u4f53\u7248\u672c\u53f7\r\n- \u662f\u5426\u662f\u4f01\u4e1a\u7248\u95ee\u9898\uff1f\r\n\r\n**\u5982\u679c\u662f SaaS \u9875\u9762\u95ee\u9898\uff0c\u8bf7\u63d0\u4f9b\u4f7f\u7528\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u6d4f\u89c8\u5668\u4fe1\u606f**\r\n - OS: [e.g. iOS]\r\n - Browser [e.g. chrome, safari]\r\n - Version [e.g. 22]\r\n\r\n**\u989d\u5916\u4fe1\u606f**\r\n\r\n\u4efb\u4f55\u4f60\u89c9\u5f97\u6709\u52a9\u4e8e\u95ee\u9898\u89e3\u51b3\u7684\u5185\u5bb9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom bkuser_core.common.constants import LOOKUP_FIELD_NAME\nfrom django.conf.urls import url\n\nfrom . import views\n\nPVAR_DEPARTMENT_ID = r\"(?P<%s>[\\w\\-]+)\" % LOOKUP_FIELD_NAME\n\nurlpatterns = [\n url(\n r\"^api/v2/departments/$\",\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"list\",\n \"post\": \"create\",\n }\n ),\n name=\"departments\",\n ),\n url(\n r\"^api/v2/departments/%s/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"retrieve\",\n \"post\": \"update\",\n \"delete\": \"destroy\",\n \"patch\": \"partial_update\",\n }\n ),\n name=\"departments.action\",\n ),\n url(\n r\"^api/v2/departments/%s/restoration/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"post\": \"restoration\",\n }\n ),\n name=\"departments.restoration\",\n ),\n url(\n r\"^api/v2/departments/%s/ancestors/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_ancestor\",\n }\n ),\n name=\"departments.ancestors\",\n ),\n url(\n r\"^api/v2/departments/%s/children/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view(\n {\n \"get\": \"get_children\",\n }\n ),\n name=\"departments.children\",\n ),\n url(\n r\"^api/v2/departments/%s/profiles/$\" % PVAR_DEPARTMENT_ID,\n views.DepartmentViewSet.as_view({\"get\": \"get_profiles\", \"post\": \"add_profiles\"}),\n name=\"departments.profiles\",\n ),\n #########\n # Batch #\n #########\n url(\n r\"^api/v2/batch/departments/profiles/$\",\n views.BatchDepartmentsViewSet.as_view(\n {\n \"get\": \"multiple_retrieve_profiles\",\n }\n ),\n name=\"department.batch\",\n ),\n ########\n # Edge #\n ########\n url(\n r\"^api/v2/edges/department_profile/$\",\n views.DepartmentProfileEdgeViewSet.as_view({\"get\": \"list\"}),\n name=\"edge.department_profile\",\n ),\n #############\n # shortcuts #\n #############\n url(\n r\"^api/v2/shortcuts/departments/tops/$\",\n views.DepartmentViewSet.as_view({\"get\": \"list_tops\"}),\n name=\"shortcuts.departments.list.tops\",\n ),\n]\n", "path": "src/api/bkuser_core/departments/urls.py"}]}
| 1,689 | 136 |
gh_patches_debug_35015
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-1546
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TN: Only scraping Joint committees
Running the [TN committee scraper](http://github.com/openstates/openstates/blob/7506cd3d6e0dacf891a23818d012ff0adf789265/openstates/tn/committees.py) locally, only the 4 joint committees are scraped:
```
18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/
18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/
18:35:45 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/fiscal-review
18:35:46 INFO billy: Save committee Fiscal Review
18:35:46 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/gov-opps
18:35:47 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/gov-opps.html
18:35:48 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/gov-opps.html
18:35:49 INFO billy: Save committee Government Operations
18:35:49 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/pension-ins.html
18:35:50 INFO billy: Save committee Pensions and Insurance
18:35:50 INFO scrapelib: GET - http://www.tn.gov/tacir/
18:35:52 INFO billy: Save committee TACIR
18:35:52 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/
18:35:52 INFO billy: imported 4 committee files
```
This matches the [bobsled report](http://bobsled.openstates.org/run-tn-2017-03-17.html) and [TN committees available on openstates.org](https://openstates.org/tn/committees/?chamber=)
</issue>
<code>
[start of openstates/tn/committees.py]
1 """
2 Archived Committee notes:
3
4 Senate committees only avail from 105th forward
5
6 Urls are inconsistent
7 'http://www.capitol.tn.gov/senate/archives/105GA/Committees/scommemb.htm'
8 'http://www.capitol.tn.gov/senate/archives/106GA/Committees/index.html'
9
10 'http://www.capitol.tn.gov/house/archives/99GA/Committees/hcommemb.htm'
11 'http://www.capitol.tn.gov/house/archives/100GA/hcommemb.htm'
12 'http://www.capitol.tn.gov/house/archives/101GA/hcommemb.htm'
13 'http://www.capitol.tn.gov/house/archives/102GA/Committees/HComm.htm'
14 'http://www.capitol.tn.gov/house/archives/103GA/hcommemb.htm'
15 'http://www.capitol.tn.gov/house/archives/104GA/hcommemb.htm'
16 'http://www.capitol.tn.gov/house/archives/105GA/Committees/hcommemb.htm'
17 'http://www.capitol.tn.gov/house/archives/106GA/Committees/index.html'
18
19 """
20 import re
21
22 from billy.scrape.committees import Committee, CommitteeScraper
23 import lxml.html
24 import requests
25
26
27 def fix_whitespace(s):
28 return re.sub(r'\s+', ' ', s)
29
30
31 class TNCommitteeScraper(CommitteeScraper):
32 jurisdiction = 'tn'
33 base_href = 'http://www.capitol.tn.gov'
34 chambers = {
35 'lower': 'house',
36 'upper': 'senate'
37 }
38
39 def scrape(self, chamber, term):
40 self.validate_term(term, latest_only=True)
41 url_chamber = self.chambers[chamber]
42 url = 'http://www.capitol.tn.gov/%s/committees/' % (url_chamber)
43 if chamber == 'upper':
44 self.scrape_senate_committees(url)
45 self.scrape_joint_committees()
46 else:
47 self.scrape_house_committees(url)
48
49 def scrape_senate_committees(self, url):
50 page = self.get(url).text
51 page = lxml.html.fromstring(page)
52 page.make_links_absolute(url)
53
54 find_expr = 'body/div/div/h1[text()="Senate Committees"]/' \
55 'following-sibling::div/div/div/div//a'
56 links = [(a.text_content(), a.attrib['href']) for a in
57 page.xpath(find_expr)]
58
59 for committee_name, link in links:
60 self._scrape_committee(committee_name, link, 'upper')
61
62 def scrape_house_committees(self, url):
63 html = self.get(url).text
64 doc = lxml.html.fromstring(html)
65 doc.make_links_absolute(url)
66
67 links = doc.xpath(
68 'body/div/div/h1[text()="House Committees"]/'
69 'following-sibling::div/div/div/div//a'
70 )
71
72 for a in links:
73 self._scrape_committee(a.text.strip(), a.get('href'), 'lower')
74
75 def _scrape_committee(self, committee_name, link, chamber):
76 """Scrape individual committee page and add members"""
77
78 page = self.get(link).text
79 page = lxml.html.fromstring(page)
80 page.make_links_absolute(link)
81
82 is_subcommittee = bool(page.xpath('//li/a[text()="Committee"]'))
83 if is_subcommittee:
84 com = Committee(
85 chamber,
86 re.sub(r'\s*Subcommittee\s*', '', committee_name),
87 committee_name
88 )
89 else:
90 com = Committee(chamber, committee_name)
91
92 OFFICER_SEARCH = '//h2[contains(text(), "Committee Officers")]/' \
93 'following-sibling::div/ul/li/a'
94 MEMBER_SEARCH = '//h2[contains(text(), "Committee Members")]/' \
95 'following-sibling::div/ul/li/a'
96 HOUSE_SEARCH = '//h2[contains(text(), "House Members")]/' \
97 'following-sibling::div/ul/li/a'
98 SENATE_SEARCH = '//h2[contains(text(), "House Members")]/' \
99 'following-sibling::div/ul/li/a'
100 for a in (page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH)):
101
102 member_name = ' '.join([
103 x.strip() for x in
104 a.xpath('text()') + a.xpath('span/text()')
105 if x.strip()
106 ])
107 role = a.xpath('small')
108 if role:
109 role = role[0].xpath('text()')[0].strip()
110 else:
111 role = 'member'
112
113 com.add_member(member_name, role)
114
115 com.add_source(link)
116 self.save_committee(com)
117
118 #Scrapes joint committees
119 def scrape_joint_committees(self):
120 main_url = 'http://www.capitol.tn.gov/joint/'
121
122 page = self.get(main_url).text
123 page = lxml.html.fromstring(page)
124 page.make_links_absolute(main_url)
125
126 for el in page.xpath(
127 '//div/h2[text()="Committees"]/'
128 'following-sibling::div/div//a'
129 ):
130 com_name = el.text
131 com_link = el.attrib["href"]
132 self.scrape_joint_committee(com_name, com_link)
133
134 #Scrapes the individual joint committee - most of it is special case
135 def scrape_joint_committee(self, committee_name, url):
136 if 'state.tn.us' in url:
137 com = Committee('joint', committee_name)
138 try:
139 page = self.get(url).text
140 except requests.exceptions.ConnectionError:
141 self.logger.warning("Committee link is broken, skipping")
142 return
143
144 page = lxml.html.fromstring(page)
145
146 for el in page.xpath("//div[@class='Blurb']/table//tr[2 <= position() and position() < 10]/td[1]"):
147 if el.xpath('text()') == ['Vacant']:
148 continue
149
150 (member_name, ) = el.xpath('a/text()')
151 if el.xpath('text()'):
152 role = el.xpath('text()')[0].strip(' ,')
153 else:
154 role = 'member'
155
156 member_name = member_name.replace('Senator', '')
157 member_name = member_name.replace('Representative', '')
158 member_name = member_name.strip()
159 com.add_member(member_name, role)
160
161 com.add_source(url)
162 self.save_committee(com)
163
164 elif 'gov-opps' in url:
165 com = Committee('joint', committee_name)
166 page = self.get(url).text
167 page = lxml.html.fromstring(page)
168
169 links = ['senate', 'house']
170 for link in links:
171 chamber_link = self.base_href + '/' + link + '/committees/gov-opps.html'
172 chamber_page = self.get(chamber_link).text
173 chamber_page = lxml.html.fromstring(chamber_page)
174
175 OFFICER_SEARCH = '//h2[contains(text(), "Committee Officers")]/' \
176 'following-sibling::div/ul/li/a'
177 MEMBER_SEARCH = '//h2[contains(text(), "Committee Members")]/' \
178 'following-sibling::div/ul/li/a'
179 for a in (
180 chamber_page.xpath(OFFICER_SEARCH) +
181 chamber_page.xpath(MEMBER_SEARCH)
182 ):
183 member_name = ' '.join([
184 x.strip() for x in
185 a.xpath('.//text()')
186 if x.strip()
187 ])
188 role = a.xpath('small')
189 if role:
190 role = role[0].xpath('text()')[0].strip()
191 member_name = member_name.replace(role, '').strip()
192 else:
193 role = 'member'
194 com.add_member(member_name, role)
195
196 com.add_source(chamber_link)
197
198 com.add_source(url)
199 self.save_committee(com)
200
201 else:
202 self._scrape_committee(committee_name, url, 'joint')
203
[end of openstates/tn/committees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openstates/tn/committees.py b/openstates/tn/committees.py
--- a/openstates/tn/committees.py
+++ b/openstates/tn/committees.py
@@ -28,6 +28,10 @@
return re.sub(r'\s+', ' ', s)
+# All links in a section with a given title
+COMMITTEE_LINKS_TEMPLATE = '//h2[text()="{header}"]/parent::*//a'
+
+
class TNCommitteeScraper(CommitteeScraper):
jurisdiction = 'tn'
base_href = 'http://www.capitol.tn.gov'
@@ -51,8 +55,9 @@
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
- find_expr = 'body/div/div/h1[text()="Senate Committees"]/' \
- 'following-sibling::div/div/div/div//a'
+ standing = COMMITTEE_LINKS_TEMPLATE.format(header="Standing Committees")
+ select = COMMITTEE_LINKS_TEMPLATE.format(header="Select Committees")
+ find_expr = '{}|{}'.format(standing, select)
links = [(a.text_content(), a.attrib['href']) for a in
page.xpath(find_expr)]
@@ -64,10 +69,9 @@
doc = lxml.html.fromstring(html)
doc.make_links_absolute(url)
- links = doc.xpath(
- 'body/div/div/h1[text()="House Committees"]/'
- 'following-sibling::div/div/div/div//a'
- )
+ standing = COMMITTEE_LINKS_TEMPLATE.format(header="Committees & Subcommittees")
+ select = COMMITTEE_LINKS_TEMPLATE.format(header="Select Committees")
+ links = doc.xpath('{}|{}'.format(standing, select))
for a in links:
self._scrape_committee(a.text.strip(), a.get('href'), 'lower')
@@ -123,10 +127,7 @@
page = lxml.html.fromstring(page)
page.make_links_absolute(main_url)
- for el in page.xpath(
- '//div/h2[text()="Committees"]/'
- 'following-sibling::div/div//a'
- ):
+ for el in page.xpath(COMMITTEE_LINKS_TEMPLATE.format(header="Committees")):
com_name = el.text
com_link = el.attrib["href"]
self.scrape_joint_committee(com_name, com_link)
|
{"golden_diff": "diff --git a/openstates/tn/committees.py b/openstates/tn/committees.py\n--- a/openstates/tn/committees.py\n+++ b/openstates/tn/committees.py\n@@ -28,6 +28,10 @@\n return re.sub(r'\\s+', ' ', s)\n \n \n+# All links in a section with a given title\n+COMMITTEE_LINKS_TEMPLATE = '//h2[text()=\"{header}\"]/parent::*//a'\n+\n+\n class TNCommitteeScraper(CommitteeScraper):\n jurisdiction = 'tn'\n base_href = 'http://www.capitol.tn.gov'\n@@ -51,8 +55,9 @@\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n \n- find_expr = 'body/div/div/h1[text()=\"Senate Committees\"]/' \\\n- 'following-sibling::div/div/div/div//a'\n+ standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Standing Committees\")\n+ select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n+ find_expr = '{}|{}'.format(standing, select)\n links = [(a.text_content(), a.attrib['href']) for a in\n page.xpath(find_expr)]\n \n@@ -64,10 +69,9 @@\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n \n- links = doc.xpath(\n- 'body/div/div/h1[text()=\"House Committees\"]/'\n- 'following-sibling::div/div/div/div//a'\n- )\n+ standing = COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees & Subcommittees\")\n+ select = COMMITTEE_LINKS_TEMPLATE.format(header=\"Select Committees\")\n+ links = doc.xpath('{}|{}'.format(standing, select))\n \n for a in links:\n self._scrape_committee(a.text.strip(), a.get('href'), 'lower')\n@@ -123,10 +127,7 @@\n page = lxml.html.fromstring(page)\n page.make_links_absolute(main_url)\n \n- for el in page.xpath(\n- '//div/h2[text()=\"Committees\"]/'\n- 'following-sibling::div/div//a'\n- ):\n+ for el in page.xpath(COMMITTEE_LINKS_TEMPLATE.format(header=\"Committees\")):\n com_name = el.text\n com_link = el.attrib[\"href\"]\n self.scrape_joint_committee(com_name, com_link)\n", "issue": "TN: Only scraping Joint committees\nRunning the [TN committee scraper](http://github.com/openstates/openstates/blob/7506cd3d6e0dacf891a23818d012ff0adf789265/openstates/tn/committees.py) locally, only the 4 joint committees are scraped:\r\n\r\n```\r\n18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/\r\n18:35:44 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/\r\n18:35:45 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/fiscal-review\r\n18:35:46 INFO billy: Save committee Fiscal Review\r\n18:35:46 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/gov-opps\r\n18:35:47 INFO scrapelib: GET - http://www.capitol.tn.gov/senate/committees/gov-opps.html\r\n18:35:48 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/gov-opps.html\r\n18:35:49 INFO billy: Save committee Government Operations\r\n18:35:49 INFO scrapelib: GET - http://www.capitol.tn.gov/joint/committees/pension-ins.html\r\n18:35:50 INFO billy: Save committee Pensions and Insurance\r\n18:35:50 INFO scrapelib: GET - http://www.tn.gov/tacir/\r\n18:35:52 INFO billy: Save committee TACIR\r\n18:35:52 INFO scrapelib: GET - http://www.capitol.tn.gov/house/committees/\r\n18:35:52 INFO billy: imported 4 committee files\r\n```\r\n\r\nThis matches the [bobsled report](http://bobsled.openstates.org/run-tn-2017-03-17.html) and [TN committees available on openstates.org](https://openstates.org/tn/committees/?chamber=)\n", "before_files": [{"content": "\"\"\"\nArchived Committee notes:\n\nSenate committees only avail from 105th forward\n\nUrls are inconsistent\n'http://www.capitol.tn.gov/senate/archives/105GA/Committees/scommemb.htm'\n'http://www.capitol.tn.gov/senate/archives/106GA/Committees/index.html'\n\n'http://www.capitol.tn.gov/house/archives/99GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/100GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/101GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/102GA/Committees/HComm.htm'\n'http://www.capitol.tn.gov/house/archives/103GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/104GA/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/105GA/Committees/hcommemb.htm'\n'http://www.capitol.tn.gov/house/archives/106GA/Committees/index.html'\n\n\"\"\"\nimport re\n\nfrom billy.scrape.committees import Committee, CommitteeScraper\nimport lxml.html\nimport requests\n\n\ndef fix_whitespace(s):\n return re.sub(r'\\s+', ' ', s)\n\n\nclass TNCommitteeScraper(CommitteeScraper):\n jurisdiction = 'tn'\n base_href = 'http://www.capitol.tn.gov'\n chambers = {\n 'lower': 'house',\n 'upper': 'senate'\n }\n\n def scrape(self, chamber, term):\n self.validate_term(term, latest_only=True)\n url_chamber = self.chambers[chamber]\n url = 'http://www.capitol.tn.gov/%s/committees/' % (url_chamber)\n if chamber == 'upper':\n self.scrape_senate_committees(url)\n self.scrape_joint_committees()\n else:\n self.scrape_house_committees(url)\n\n def scrape_senate_committees(self, url):\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n find_expr = 'body/div/div/h1[text()=\"Senate Committees\"]/' \\\n 'following-sibling::div/div/div/div//a'\n links = [(a.text_content(), a.attrib['href']) for a in\n page.xpath(find_expr)]\n\n for committee_name, link in links:\n self._scrape_committee(committee_name, link, 'upper')\n\n def scrape_house_committees(self, url):\n html = self.get(url).text\n doc = lxml.html.fromstring(html)\n doc.make_links_absolute(url)\n\n links = doc.xpath(\n 'body/div/div/h1[text()=\"House Committees\"]/'\n 'following-sibling::div/div/div/div//a'\n )\n\n for a in links:\n self._scrape_committee(a.text.strip(), a.get('href'), 'lower')\n\n def _scrape_committee(self, committee_name, link, chamber):\n \"\"\"Scrape individual committee page and add members\"\"\"\n\n page = self.get(link).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(link)\n\n is_subcommittee = bool(page.xpath('//li/a[text()=\"Committee\"]'))\n if is_subcommittee:\n com = Committee(\n chamber,\n re.sub(r'\\s*Subcommittee\\s*', '', committee_name),\n committee_name\n )\n else:\n com = Committee(chamber, committee_name)\n\n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n HOUSE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n SENATE_SEARCH = '//h2[contains(text(), \"House Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (page.xpath(OFFICER_SEARCH) + page.xpath(MEMBER_SEARCH)):\n\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('text()') + a.xpath('span/text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n else:\n role = 'member'\n\n com.add_member(member_name, role)\n\n com.add_source(link)\n self.save_committee(com)\n\n #Scrapes joint committees\n def scrape_joint_committees(self):\n main_url = 'http://www.capitol.tn.gov/joint/'\n\n page = self.get(main_url).text\n page = lxml.html.fromstring(page)\n page.make_links_absolute(main_url)\n\n for el in page.xpath(\n '//div/h2[text()=\"Committees\"]/'\n 'following-sibling::div/div//a'\n ):\n com_name = el.text\n com_link = el.attrib[\"href\"]\n self.scrape_joint_committee(com_name, com_link)\n\n #Scrapes the individual joint committee - most of it is special case\n def scrape_joint_committee(self, committee_name, url):\n if 'state.tn.us' in url:\n com = Committee('joint', committee_name)\n try:\n page = self.get(url).text\n except requests.exceptions.ConnectionError:\n self.logger.warning(\"Committee link is broken, skipping\")\n return\n\n page = lxml.html.fromstring(page)\n\n for el in page.xpath(\"//div[@class='Blurb']/table//tr[2 <= position() and position() < 10]/td[1]\"):\n if el.xpath('text()') == ['Vacant']:\n continue\n\n (member_name, ) = el.xpath('a/text()')\n if el.xpath('text()'):\n role = el.xpath('text()')[0].strip(' ,')\n else:\n role = 'member'\n\n member_name = member_name.replace('Senator', '')\n member_name = member_name.replace('Representative', '')\n member_name = member_name.strip()\n com.add_member(member_name, role)\n\n com.add_source(url)\n self.save_committee(com)\n\n elif 'gov-opps' in url:\n com = Committee('joint', committee_name)\n page = self.get(url).text\n page = lxml.html.fromstring(page)\n\n links = ['senate', 'house']\n for link in links:\n chamber_link = self.base_href + '/' + link + '/committees/gov-opps.html'\n chamber_page = self.get(chamber_link).text\n chamber_page = lxml.html.fromstring(chamber_page)\n \n OFFICER_SEARCH = '//h2[contains(text(), \"Committee Officers\")]/' \\\n 'following-sibling::div/ul/li/a'\n MEMBER_SEARCH = '//h2[contains(text(), \"Committee Members\")]/' \\\n 'following-sibling::div/ul/li/a'\n for a in (\n chamber_page.xpath(OFFICER_SEARCH) + \n chamber_page.xpath(MEMBER_SEARCH)\n ):\n member_name = ' '.join([\n x.strip() for x in\n a.xpath('.//text()')\n if x.strip()\n ])\n role = a.xpath('small')\n if role:\n role = role[0].xpath('text()')[0].strip()\n member_name = member_name.replace(role, '').strip()\n else:\n role = 'member'\n com.add_member(member_name, role)\n\n com.add_source(chamber_link)\n\n com.add_source(url)\n self.save_committee(com)\n\n else:\n self._scrape_committee(committee_name, url, 'joint')\n", "path": "openstates/tn/committees.py"}]}
| 3,276 | 529 |
gh_patches_debug_2253
|
rasdani/github-patches
|
git_diff
|
coala__coala-bears-1082
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GofmtBear: Add advanced asciinema
The coala bear GofmtBear does not have a proper asciinema.
`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.
I'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.
</issue>
<code>
[start of bears/go/GofmtBear.py]
1 from coalib.bearlib.abstractions.Linter import linter
2 from coalib.bears.requirements.GoRequirement import GoRequirement
3
4
5 @linter(executable='gofmt',
6 use_stdin=True,
7 output_format='corrected',
8 result_message='Formatting can be improved.')
9 class GofmtBear:
10 """
11 Suggest better formatting options in Go code. Basic checks like alignment,
12 indentation, and redundant parentheses are provided.
13
14 This is done using the ``gofmt`` utility. For more information visit
15 <https://golang.org/cmd/gofmt/>.
16 """
17 LANGUAGES = {'Go'}
18 REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}
19 AUTHORS = {'The coala developers'}
20 AUTHORS_EMAILS = {'[email protected]'}
21 LICENSE = 'AGPL-3.0'
22 CAN_FIX = {'Formatting'}
23
24 @staticmethod
25 def create_arguments(filename, file, config_file):
26 return ()
27
[end of bears/go/GofmtBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py
--- a/bears/go/GofmtBear.py
+++ b/bears/go/GofmtBear.py
@@ -20,6 +20,7 @@
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'
@staticmethod
def create_arguments(filename, file, config_file):
|
{"golden_diff": "diff --git a/bears/go/GofmtBear.py b/bears/go/GofmtBear.py\n--- a/bears/go/GofmtBear.py\n+++ b/bears/go/GofmtBear.py\n@@ -20,6 +20,7 @@\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n+ ASCIINEMA_URL = 'https://asciinema.org/a/94812'\n \n @staticmethod\n def create_arguments(filename, file, config_file):\n", "issue": "GofmtBear: Add advanced asciinema\nThe coala bear GofmtBear does not have a proper asciinema.\r\n\r\n`gofmt` is a command line tool that automatically solves formatting / styling issues to the absolute coding style that Go has.\r\n\r\nI'm planning to use a working code as the sample where it's filled with mixed indentation (spaces and tabs), semicolons and demonstrate how gofmt formats the code automatically and correctly to the absolute Go coding style.\r\n\n", "before_files": [{"content": "from coalib.bearlib.abstractions.Linter import linter\nfrom coalib.bears.requirements.GoRequirement import GoRequirement\n\n\n@linter(executable='gofmt',\n use_stdin=True,\n output_format='corrected',\n result_message='Formatting can be improved.')\nclass GofmtBear:\n \"\"\"\n Suggest better formatting options in Go code. Basic checks like alignment,\n indentation, and redundant parentheses are provided.\n\n This is done using the ``gofmt`` utility. For more information visit\n <https://golang.org/cmd/gofmt/>.\n \"\"\"\n LANGUAGES = {'Go'}\n REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}\n AUTHORS = {'The coala developers'}\n AUTHORS_EMAILS = {'[email protected]'}\n LICENSE = 'AGPL-3.0'\n CAN_FIX = {'Formatting'}\n\n @staticmethod\n def create_arguments(filename, file, config_file):\n return ()\n", "path": "bears/go/GofmtBear.py"}]}
| 900 | 128 |
gh_patches_debug_31762
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-agent-1042
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows CPU times can add up to > 100%
We're collecting PercentUserTime and PercentIdleTime from [Win32_PerfFormattedData_PerfOS_Processor](http://msdn.microsoft.com/en-us/library/windows/desktop/aa394271%28v=vs.85%29.aspx) which shouldn't ever add up to more than 100%, but it seems like it does.
Maybe we need to use a different metric other than PercentIdleTime?
</issue>
<code>
[start of checks/system/win32.py]
1 from checks import Check
2
3 try:
4 import wmi
5 w = wmi.WMI()
6 except Exception:
7 wmi, w = None, None
8
9 # Device WMI drive types
10 class DriveType(object):
11 UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)
12 B2MB = float(1048576)
13 KB2MB = B2KB = float(1024)
14
15 def should_ignore_disk(name, blacklist_re):
16 # blacklist_re is a compiled regex, compilation done at config loading time
17 return name =='_total' or blacklist_re is not None and blacklist_re.match(name)
18
19 class Processes(Check):
20 def __init__(self, logger):
21 Check.__init__(self, logger)
22 self.gauge('system.proc.queue_length')
23 self.gauge('system.proc.count')
24
25 def check(self, agentConfig):
26 try:
27 os = w.Win32_PerfFormattedData_PerfOS_System()[0]
28 except AttributeError:
29 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \
30 ' No process metrics will be returned.')
31 return
32
33 try:
34 cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name="_Total")[0]
35 except AttributeError:
36 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
37 ' No process metrics will be returned.')
38 return
39 if os.ProcessorQueueLength is not None:
40 self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)
41 if os.Processes is not None:
42 self.save_sample('system.proc.count', os.Processes)
43
44 return self.get_metrics()
45
46 class Memory(Check):
47 def __init__(self, logger):
48 Check.__init__(self, logger)
49 self.logger = logger
50 self.gauge('system.mem.free')
51 self.gauge('system.mem.used')
52 self.gauge('system.mem.total')
53 self.gauge('system.mem.cached')
54 self.gauge('system.mem.committed')
55 self.gauge('system.mem.paged')
56 self.gauge('system.mem.nonpaged')
57
58 def check(self, agentConfig):
59 try:
60 os = w.Win32_OperatingSystem()[0]
61 except AttributeError:
62 self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')
63 return
64
65 if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:
66 total = int(os.TotalVisibleMemorySize) / KB2MB
67 free = int(os.FreePhysicalMemory) / KB2MB
68 self.save_sample('system.mem.total', total)
69 self.save_sample('system.mem.free', free)
70 self.save_sample('system.mem.used', total - free)
71
72 mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]
73 if mem.CacheBytes is not None:
74 self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)
75 if mem.CommittedBytes is not None:
76 self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)
77 if mem.PoolPagedBytes is not None:
78 self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)
79 if mem.PoolNonpagedBytes is not None:
80 self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)
81
82 return self.get_metrics()
83
84 class Cpu(Check):
85 def __init__(self, logger):
86 Check.__init__(self, logger)
87 self.logger = logger
88 self.gauge('system.cpu.user')
89 self.gauge('system.cpu.idle')
90 self.gauge('system.cpu.interrupt')
91 self.gauge('system.cpu.system')
92
93 def check(self, agentConfig):
94 try:
95 cpu = w.Win32_PerfFormattedData_PerfOS_Processor()
96 except AttributeError:
97 self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \
98 ' No CPU metrics will be returned.')
99 return
100
101 cpu_user = self._average_metric(cpu, 'PercentUserTime')
102 if cpu_user:
103 self.save_sample('system.cpu.user', cpu_user)
104
105 cpu_idle = self._average_metric(cpu, 'PercentIdleTime')
106 if cpu_idle:
107 self.save_sample('system.cpu.idle', cpu_idle)
108
109 cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')
110 if cpu_interrupt is not None:
111 self.save_sample('system.cpu.interrupt', cpu_interrupt)
112
113 cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')
114 if cpu_privileged is not None:
115 self.save_sample('system.cpu.system', cpu_privileged)
116
117 return self.get_metrics()
118
119 def _average_metric(self, wmi_class, wmi_prop):
120 ''' Sum all of the values of a metric from a WMI class object, excluding
121 the value for "_Total"
122 '''
123 val = 0
124 counter = 0
125 for wmi_object in wmi_class:
126 if wmi_object.Name == '_Total':
127 # Skip the _Total value
128 continue
129
130 if getattr(wmi_object, wmi_prop) is not None:
131 counter += 1
132 val += float(getattr(wmi_object, wmi_prop))
133
134 if counter > 0:
135 return val / counter
136
137 return val
138
139
140 class Network(Check):
141 def __init__(self, logger):
142 Check.__init__(self, logger)
143 self.logger = logger
144 self.gauge('system.net.bytes_rcvd')
145 self.gauge('system.net.bytes_sent')
146
147 def check(self, agentConfig):
148 try:
149 net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()
150 except AttributeError:
151 self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \
152 ' No network metrics will be returned')
153 return
154
155 for iface in net:
156 name = self.normalize_device_name(iface.name)
157 if iface.BytesReceivedPerSec is not None:
158 self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,
159 device_name=name)
160 if iface.BytesSentPerSec is not None:
161 self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,
162 device_name=name)
163 return self.get_metrics()
164
165 class Disk(Check):
166 def __init__(self, logger):
167 Check.__init__(self, logger)
168 self.logger = logger
169 self.gauge('system.disk.free')
170 self.gauge('system.disk.total')
171 self.gauge('system.disk.in_use')
172 self.gauge('system.disk.used')
173
174 def check(self, agentConfig):
175 try:
176 disk = w.Win32_LogicalDisk()
177 except AttributeError:
178 self.logger.info('Missing Win32_LogicalDisk WMI class.' \
179 ' No disk metrics will be returned.')
180 return
181
182 blacklist_re = agentConfig.get('device_blacklist_re', None)
183 for device in disk:
184 name = self.normalize_device_name(device.name)
185 if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):
186 continue
187 if device.FreeSpace is not None and device.Size is not None:
188 free = float(device.FreeSpace) / B2KB
189 total = float(device.Size) / B2KB
190 used = total - free
191 self.save_sample('system.disk.free', free, device_name=name)
192 self.save_sample('system.disk.total', total, device_name=name)
193 self.save_sample('system.disk.used', used, device_name=name)
194 self.save_sample('system.disk.in_use', (used / total),
195 device_name=name)
196 return self.get_metrics()
197
198 class IO(Check):
199 def __init__(self, logger):
200 Check.__init__(self, logger)
201 self.logger = logger
202 self.gauge('system.io.wkb_s')
203 self.gauge('system.io.w_s')
204 self.gauge('system.io.rkb_s')
205 self.gauge('system.io.r_s')
206 self.gauge('system.io.avg_q_sz')
207
208 def check(self, agentConfig):
209 try:
210 disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()
211 except AttributeError:
212 self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \
213 ' No I/O metrics will be returned.')
214 return
215 blacklist_re = agentConfig.get('device_blacklist_re', None)
216 for device in disk:
217 name = self.normalize_device_name(device.name)
218 if should_ignore_disk(name, blacklist_re):
219 continue
220 if device.DiskWriteBytesPerSec is not None:
221 self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,
222 device_name=name)
223 if device.DiskWritesPerSec is not None:
224 self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),
225 device_name=name)
226 if device.DiskReadBytesPerSec is not None:
227 self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,
228 device_name=name)
229 if device.DiskReadsPerSec is not None:
230 self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),
231 device_name=name)
232 if device.CurrentDiskQueueLength is not None:
233 self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,
234 device_name=name)
235 return self.get_metrics()
236
[end of checks/system/win32.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checks/system/win32.py b/checks/system/win32.py
--- a/checks/system/win32.py
+++ b/checks/system/win32.py
@@ -1,5 +1,9 @@
+# project
from checks import Check
+# 3rd party
+import psutil
+
try:
import wmi
w = wmi.WMI()
@@ -85,10 +89,10 @@
def __init__(self, logger):
Check.__init__(self, logger)
self.logger = logger
- self.gauge('system.cpu.user')
- self.gauge('system.cpu.idle')
+ self.counter('system.cpu.user')
+ self.counter('system.cpu.idle')
self.gauge('system.cpu.interrupt')
- self.gauge('system.cpu.system')
+ self.counter('system.cpu.system')
def check(self, agentConfig):
try:
@@ -98,21 +102,15 @@
' No CPU metrics will be returned.')
return
- cpu_user = self._average_metric(cpu, 'PercentUserTime')
- if cpu_user:
- self.save_sample('system.cpu.user', cpu_user)
-
- cpu_idle = self._average_metric(cpu, 'PercentIdleTime')
- if cpu_idle:
- self.save_sample('system.cpu.idle', cpu_idle)
-
cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')
if cpu_interrupt is not None:
self.save_sample('system.cpu.interrupt', cpu_interrupt)
- cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')
- if cpu_privileged is not None:
- self.save_sample('system.cpu.system', cpu_privileged)
+ cpu_percent = psutil.cpu_times()
+
+ self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)
+ self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)
+ self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)
return self.get_metrics()
|
{"golden_diff": "diff --git a/checks/system/win32.py b/checks/system/win32.py\n--- a/checks/system/win32.py\n+++ b/checks/system/win32.py\n@@ -1,5 +1,9 @@\n+# project\n from checks import Check\n \n+# 3rd party\n+import psutil\n+\n try:\n import wmi\n w = wmi.WMI()\n@@ -85,10 +89,10 @@\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n- self.gauge('system.cpu.user')\n- self.gauge('system.cpu.idle')\n+ self.counter('system.cpu.user')\n+ self.counter('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n- self.gauge('system.cpu.system')\n+ self.counter('system.cpu.system')\n \n def check(self, agentConfig):\n try:\n@@ -98,21 +102,15 @@\n ' No CPU metrics will be returned.')\n return\n \n- cpu_user = self._average_metric(cpu, 'PercentUserTime')\n- if cpu_user:\n- self.save_sample('system.cpu.user', cpu_user)\n-\n- cpu_idle = self._average_metric(cpu, 'PercentIdleTime')\n- if cpu_idle:\n- self.save_sample('system.cpu.idle', cpu_idle)\n-\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n \n- cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')\n- if cpu_privileged is not None:\n- self.save_sample('system.cpu.system', cpu_privileged)\n+ cpu_percent = psutil.cpu_times()\n+\n+ self.save_sample('system.cpu.user', 100 * cpu_percent.user / psutil.NUM_CPUS)\n+ self.save_sample('system.cpu.idle', 100 * cpu_percent.idle / psutil.NUM_CPUS)\n+ self.save_sample('system.cpu.system', 100 * cpu_percent.system/ psutil.NUM_CPUS)\n \n return self.get_metrics()\n", "issue": "Windows CPU times can add up to > 100%\nWe're collecting PercentUserTime and PercentIdleTime from [Win32_PerfFormattedData_PerfOS_Processor](http://msdn.microsoft.com/en-us/library/windows/desktop/aa394271%28v=vs.85%29.aspx) which shouldn't ever add up to more than 100%, but it seems like it does.\n\nMaybe we need to use a different metric other than PercentIdleTime?\n\n", "before_files": [{"content": "from checks import Check\n\ntry:\n import wmi\n w = wmi.WMI()\nexcept Exception:\n wmi, w = None, None\n\n# Device WMI drive types\nclass DriveType(object):\n UNKNOWN, NOROOT, REMOVEABLE, LOCAL, NETWORK, CD, RAM = (0, 1, 2, 3, 4, 5, 6)\nB2MB = float(1048576)\nKB2MB = B2KB = float(1024)\n\ndef should_ignore_disk(name, blacklist_re):\n # blacklist_re is a compiled regex, compilation done at config loading time\n return name =='_total' or blacklist_re is not None and blacklist_re.match(name)\n\nclass Processes(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.gauge('system.proc.queue_length')\n self.gauge('system.proc.count')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_PerfFormattedData_PerfOS_System()[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_System WMI class.' \\\n ' No process metrics will be returned.')\n return\n\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor(name=\"_Total\")[0]\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No process metrics will be returned.')\n return\n if os.ProcessorQueueLength is not None:\n self.save_sample('system.proc.queue_length', os.ProcessorQueueLength)\n if os.Processes is not None:\n self.save_sample('system.proc.count', os.Processes)\n\n return self.get_metrics()\n\nclass Memory(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.mem.free')\n self.gauge('system.mem.used')\n self.gauge('system.mem.total')\n self.gauge('system.mem.cached')\n self.gauge('system.mem.committed')\n self.gauge('system.mem.paged')\n self.gauge('system.mem.nonpaged')\n\n def check(self, agentConfig):\n try:\n os = w.Win32_OperatingSystem()[0]\n except AttributeError:\n self.logger.info('Missing Win32_OperatingSystem. No memory metrics will be returned.')\n return\n\n if os.TotalVisibleMemorySize is not None and os.FreePhysicalMemory is not None:\n total = int(os.TotalVisibleMemorySize) / KB2MB\n free = int(os.FreePhysicalMemory) / KB2MB\n self.save_sample('system.mem.total', total)\n self.save_sample('system.mem.free', free)\n self.save_sample('system.mem.used', total - free)\n\n mem = w.Win32_PerfFormattedData_PerfOS_Memory()[0]\n if mem.CacheBytes is not None:\n self.save_sample('system.mem.cached', int(mem.CacheBytes) / B2MB)\n if mem.CommittedBytes is not None:\n self.save_sample('system.mem.committed', int(mem.CommittedBytes) / B2MB)\n if mem.PoolPagedBytes is not None:\n self.save_sample('system.mem.paged', int(mem.PoolPagedBytes) / B2MB)\n if mem.PoolNonpagedBytes is not None:\n self.save_sample('system.mem.nonpaged', int(mem.PoolNonpagedBytes) / B2MB)\n\n return self.get_metrics()\n\nclass Cpu(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.cpu.user')\n self.gauge('system.cpu.idle')\n self.gauge('system.cpu.interrupt')\n self.gauge('system.cpu.system')\n\n def check(self, agentConfig):\n try:\n cpu = w.Win32_PerfFormattedData_PerfOS_Processor()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfOS_Processor WMI class.' \\\n ' No CPU metrics will be returned.')\n return\n\n cpu_user = self._average_metric(cpu, 'PercentUserTime')\n if cpu_user:\n self.save_sample('system.cpu.user', cpu_user)\n\n cpu_idle = self._average_metric(cpu, 'PercentIdleTime')\n if cpu_idle:\n self.save_sample('system.cpu.idle', cpu_idle)\n\n cpu_interrupt = self._average_metric(cpu, 'PercentInterruptTime')\n if cpu_interrupt is not None:\n self.save_sample('system.cpu.interrupt', cpu_interrupt)\n\n cpu_privileged = self._average_metric(cpu, 'PercentPrivilegedTime')\n if cpu_privileged is not None:\n self.save_sample('system.cpu.system', cpu_privileged)\n\n return self.get_metrics()\n\n def _average_metric(self, wmi_class, wmi_prop):\n ''' Sum all of the values of a metric from a WMI class object, excluding\n the value for \"_Total\"\n '''\n val = 0\n counter = 0\n for wmi_object in wmi_class:\n if wmi_object.Name == '_Total':\n # Skip the _Total value\n continue\n\n if getattr(wmi_object, wmi_prop) is not None:\n counter += 1\n val += float(getattr(wmi_object, wmi_prop))\n\n if counter > 0:\n return val / counter\n\n return val\n\n\nclass Network(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.net.bytes_rcvd')\n self.gauge('system.net.bytes_sent')\n\n def check(self, agentConfig):\n try:\n net = w.Win32_PerfFormattedData_Tcpip_NetworkInterface()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_Tcpip_NetworkInterface WMI class.' \\\n ' No network metrics will be returned')\n return\n\n for iface in net:\n name = self.normalize_device_name(iface.name)\n if iface.BytesReceivedPerSec is not None:\n self.save_sample('system.net.bytes_rcvd', iface.BytesReceivedPerSec,\n device_name=name)\n if iface.BytesSentPerSec is not None:\n self.save_sample('system.net.bytes_sent', iface.BytesSentPerSec,\n device_name=name)\n return self.get_metrics()\n\nclass Disk(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.disk.free')\n self.gauge('system.disk.total')\n self.gauge('system.disk.in_use')\n self.gauge('system.disk.used')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_LogicalDisk WMI class.' \\\n ' No disk metrics will be returned.')\n return\n\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if device.DriveType in (DriveType.CD, DriveType.UNKNOWN) or should_ignore_disk(name, blacklist_re):\n continue\n if device.FreeSpace is not None and device.Size is not None:\n free = float(device.FreeSpace) / B2KB\n total = float(device.Size) / B2KB\n used = total - free\n self.save_sample('system.disk.free', free, device_name=name)\n self.save_sample('system.disk.total', total, device_name=name)\n self.save_sample('system.disk.used', used, device_name=name)\n self.save_sample('system.disk.in_use', (used / total),\n device_name=name)\n return self.get_metrics()\n\nclass IO(Check):\n def __init__(self, logger):\n Check.__init__(self, logger)\n self.logger = logger\n self.gauge('system.io.wkb_s')\n self.gauge('system.io.w_s')\n self.gauge('system.io.rkb_s')\n self.gauge('system.io.r_s')\n self.gauge('system.io.avg_q_sz')\n\n def check(self, agentConfig):\n try:\n disk = w.Win32_PerfFormattedData_PerfDisk_LogicalDisk()\n except AttributeError:\n self.logger.info('Missing Win32_PerfFormattedData_PerfDisk_LogicalDiskUnable WMI class.' \\\n ' No I/O metrics will be returned.')\n return\n blacklist_re = agentConfig.get('device_blacklist_re', None)\n for device in disk:\n name = self.normalize_device_name(device.name)\n if should_ignore_disk(name, blacklist_re):\n continue\n if device.DiskWriteBytesPerSec is not None:\n self.save_sample('system.io.wkb_s', int(device.DiskWriteBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskWritesPerSec is not None:\n self.save_sample('system.io.w_s', int(device.DiskWritesPerSec),\n device_name=name)\n if device.DiskReadBytesPerSec is not None:\n self.save_sample('system.io.rkb_s', int(device.DiskReadBytesPerSec) / B2KB,\n device_name=name)\n if device.DiskReadsPerSec is not None:\n self.save_sample('system.io.r_s', int(device.DiskReadsPerSec),\n device_name=name)\n if device.CurrentDiskQueueLength is not None:\n self.save_sample('system.io.avg_q_sz', device.CurrentDiskQueueLength,\n device_name=name)\n return self.get_metrics()\n", "path": "checks/system/win32.py"}]}
| 3,363 | 478 |
gh_patches_debug_41237
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1655
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Secrets framework scan not working in 2.0.407
**Describe the bug**
This is to duplicate #1536 which has been closed. I don't believe the issue has been resolved.
```
bash-5.1# pwd
/test
bash-5.1# checkov -d . --framework all
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.407
bash-5.1# ls -la
total 4
drwxrwxrwx 1 root root 4096 Aug 25 16:35 .
drwxr-xr-x 1 root root 4096 Sep 12 16:29 ..
-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt
-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf
-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml
bash-5.1# cat providers.tf
access_key = "sfffglfgjldfjdlrt5454002lfofd"bash-5.1# cat password.txt
password: mypassword
b64password: bXlwYXNzd29yZAo=
mypassword=secretkey
bash-5.1# cat secret_password.yaml
pwd: correcthorsebatterystaple
bash-5.1# checkov -d ./ --framework secrets
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.0.407
bash-5.1# ls -la
total 4
drwxrwxrwx 1 root root 4096 Aug 25 16:35 .
drwxr-xr-x 1 root root 4096 Sep 12 16:29 ..
-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt
-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf
-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml
```
**To Reproduce**
Steps to reproduce the behavior:
1. See above and details in #1536
**Expected behavior**
I expect secrets to be detected.
**Desktop (please complete the following information):**
- running the checkov container
- Checkov Version: 2.0.204
**Additional context**
Add any other context about the problem here (e.g. code snippets).
</issue>
<code>
[start of checkov/secrets/plugins/entropy_keyword_combinator.py]
1 from detect_secrets.core.potential_secret import PotentialSecret
2 from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString
3 from detect_secrets.plugins.keyword import KeywordDetector
4 from detect_secrets.plugins.base import BasePlugin
5 from typing import Generator, Any, Set
6
7
8 class EntropyKeywordCombinator(BasePlugin):
9 secret_type = None
10
11 def __init__(self, limit: float) -> None:
12 self.high_entropy_scanners = (Base64HighEntropyString(limit=limit), HexHighEntropyString(limit=limit))
13 self.keyword_scanner = KeywordDetector()
14
15 def analyze_line(
16 self,
17 filename: str,
18 line: str,
19 line_number: int = 0,
20 **kwargs: Any
21 ) -> Set[PotentialSecret]:
22 """
23 This method first runs the keyword plugin. If it finds a match - it runs the entropy scanners, and if
24 one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.
25 """
26 keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)
27 if len(keyword_matches):
28 for entropy_scanner in self.high_entropy_scanners:
29 matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)
30 if len(matches) > 0:
31 return matches
32 return set([])
33
34 def analyze_string(self, string: str) -> Generator[str, None, None]:
35 raise NotImplementedError()
36
[end of checkov/secrets/plugins/entropy_keyword_combinator.py]
[start of checkov/secrets/runner.py]
1 import linecache
2 import logging
3 import os
4 import re
5 import time
6 from typing import Optional, List
7
8 from detect_secrets import SecretsCollection
9 from detect_secrets.core.potential_secret import PotentialSecret
10 from detect_secrets.settings import transient_settings
11 from typing_extensions import TypedDict
12
13 from checkov.common.bridgecrew.platform_integration import bc_integration
14 from checkov.common.comment.enum import COMMENT_REGEX
15 from checkov.common.graph.graph_builder.utils import run_function_multithreaded
16 from checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS
17 from checkov.common.models.enums import CheckResult
18 from checkov.common.output.record import Record
19 from checkov.common.output.report import Report
20 from checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths
21 from checkov.common.runners.base_runner import ignored_directories
22 from checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR
23 from checkov.runner_filter import RunnerFilter
24
25 SECRET_TYPE_TO_ID = {
26 'Artifactory Credentials': 'CKV_SECRET_1',
27 'AWS Access Key': 'CKV_SECRET_2',
28 'Azure Storage Account access key': 'CKV_SECRET_3',
29 'Basic Auth Credentials': 'CKV_SECRET_4',
30 'Cloudant Credentials': 'CKV_SECRET_5',
31 'Base64 High Entropy String': 'CKV_SECRET_6',
32 'IBM Cloud IAM Key': 'CKV_SECRET_7',
33 'IBM COS HMAC Credentials': 'CKV_SECRET_8',
34 'JSON Web Token': 'CKV_SECRET_9',
35 # 'Secret Keyword': 'CKV_SECRET_10',
36 'Mailchimp Access Key': 'CKV_SECRET_11',
37 'NPM tokens': 'CKV_SECRET_12',
38 'Private Key': 'CKV_SECRET_13',
39 'Slack Token': 'CKV_SECRET_14',
40 'SoftLayer Credentials': 'CKV_SECRET_15',
41 'Square OAuth Secret': 'CKV_SECRET_16',
42 'Stripe Access Key': 'CKV_SECRET_17',
43 'Twilio API Key': 'CKV_SECRET_18',
44 'Hex High Entropy String': 'CKV_SECRET_19'
45 }
46 CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}
47
48 PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']
49
50
51 class _CheckResult(TypedDict, total=False):
52 result: CheckResult
53 suppress_comment: str
54
55
56 class Runner(BaseRunner):
57 check_type = 'secrets'
58
59 def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
60 collect_skip_comments=True) -> Report:
61 current_dir = os.path.dirname(os.path.realpath(__file__))
62 secrets = SecretsCollection()
63 with transient_settings({
64 # Only run scans with only these plugins.
65 'plugins_used': [
66 {
67 'name': 'AWSKeyDetector'
68 },
69 {
70 'name': 'ArtifactoryDetector'
71 },
72 {
73 'name': 'AzureStorageKeyDetector'
74 },
75 {
76 'name': 'BasicAuthDetector'
77 },
78 {
79 'name': 'CloudantDetector'
80 },
81 {
82 'name': 'IbmCloudIamDetector'
83 },
84 {
85 'name': 'MailchimpDetector'
86 },
87 {
88 'name': 'PrivateKeyDetector'
89 },
90 {
91 'name': 'SlackDetector'
92 },
93 {
94 'name': 'SoftlayerDetector'
95 },
96 {
97 'name': 'SquareOAuthDetector'
98 },
99 {
100 'name': 'StripeDetector'
101 },
102 {
103 'name': 'TwilioKeyDetector'
104 },
105 {
106 'name': 'EntropyKeywordCombinator',
107 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',
108 'limit': 4.5
109 }
110 ]
111 }) as settings:
112 report = Report(self.check_type)
113 # Implement non IaC files (including .terraform dir)
114 files_to_scan = files or []
115 excluded_paths = (runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR]
116 if root_folder:
117 for root, d_names, f_names in os.walk(root_folder):
118 filter_ignored_paths(root, d_names, excluded_paths)
119 filter_ignored_paths(root, f_names, excluded_paths)
120 for file in f_names:
121 if file not in PROHIBITED_FILES and f".{file.split('.')[-1]}" in SUPPORTED_FILE_EXTENSIONS:
122 files_to_scan.append(os.path.join(root, file))
123 logging.info(f'Secrets scanning will scan {len(files_to_scan)} files')
124
125 settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])
126
127 def _scan_file(file_paths: List[str]):
128 for file_path in file_paths:
129 start = time.time()
130 try:
131 secrets.scan_file(file_path)
132 except Exception as err:
133 logging.warning(f"Secret scanning:could not process file {file_path}, {err}")
134 continue
135 end = time.time()
136 scan_time = end - start
137 if scan_time > 10:
138 logging.info(f'Scanned {file_path}, took {scan_time} seconds')
139
140 run_function_multithreaded(_scan_file, files_to_scan, 1, num_of_workers=os.cpu_count())
141
142 for _, secret in iter(secrets):
143 check_id = SECRET_TYPE_TO_ID.get(secret.type)
144 bc_check_id = bc_integration.ckv_to_bc_id_mapping.get(check_id) if bc_integration.ckv_to_bc_id_mapping else None
145 if not check_id:
146 continue
147 if runner_filter.checks and not runner_filter.should_run_check(check_id, bc_check_id):
148 continue
149 result: _CheckResult = {'result': CheckResult.FAILED}
150 line_text = linecache.getline(secret.filename, secret.line_number)
151 if line_text != "" and line_text.split()[0] == 'git_commit':
152 continue
153 result = self.search_for_suppression(
154 check_id=check_id,
155 bc_check_id=bc_check_id,
156 secret=secret,
157 skipped_checks=runner_filter.skip_checks,
158 ) or result
159 report.add_resource(f'{secret.filename}:{secret.secret_hash}')
160 report.add_record(Record(
161 check_id=check_id,
162 bc_check_id=bc_check_id,
163 check_name=secret.type,
164 check_result=result,
165 code_block=[(secret.line_number, line_text)],
166 file_path=f'/{os.path.relpath(secret.filename, root_folder)}',
167 file_line_range=[secret.line_number, secret.line_number + 1],
168 resource=secret.secret_hash,
169 check_class=None,
170 evaluations=None,
171 file_abs_path=os.path.abspath(secret.filename)
172 ))
173
174 return report
175
176 @staticmethod
177 def search_for_suppression(
178 check_id: str,
179 bc_check_id: str,
180 secret: PotentialSecret,
181 skipped_checks: List[str]
182 ) -> Optional[_CheckResult]:
183 if (check_id in skipped_checks or bc_check_id in skipped_checks) and check_id in CHECK_ID_TO_SECRET_TYPE.keys():
184 return {
185 "result": CheckResult.SKIPPED,
186 "suppress_comment": f"Secret scan {check_id} is skipped"
187 }
188 # Check for suppression comment in the line before, the line of, and the line after the secret
189 for line_number in [secret.line_number, secret.line_number - 1, secret.line_number + 1]:
190 lt = linecache.getline(secret.filename, line_number)
191 skip_search = re.search(COMMENT_REGEX, lt)
192 if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id):
193 return {
194 "result": CheckResult.SKIPPED,
195 "suppress_comment": skip_search.group(3)[1:] if skip_search.group(3) else "No comment provided"
196 }
197 return None
198
[end of checkov/secrets/runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/secrets/plugins/entropy_keyword_combinator.py b/checkov/secrets/plugins/entropy_keyword_combinator.py
--- a/checkov/secrets/plugins/entropy_keyword_combinator.py
+++ b/checkov/secrets/plugins/entropy_keyword_combinator.py
@@ -1,7 +1,7 @@
from detect_secrets.core.potential_secret import PotentialSecret
from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString
from detect_secrets.plugins.keyword import KeywordDetector
-from detect_secrets.plugins.base import BasePlugin
+from detect_secrets.plugins.base import BasePlugin
from typing import Generator, Any, Set
@@ -24,10 +24,10 @@
one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.
"""
keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)
- if len(keyword_matches):
+ if keyword_matches:
for entropy_scanner in self.high_entropy_scanners:
matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)
- if len(matches) > 0:
+ if matches:
return matches
return set([])
diff --git a/checkov/secrets/runner.py b/checkov/secrets/runner.py
--- a/checkov/secrets/runner.py
+++ b/checkov/secrets/runner.py
@@ -45,6 +45,7 @@
}
CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}
+ENTROPY_KEYWORD_LIMIT = 3
PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']
@@ -56,8 +57,14 @@
class Runner(BaseRunner):
check_type = 'secrets'
- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),
- collect_skip_comments=True) -> Report:
+ def run(
+ self,
+ root_folder: str,
+ external_checks_dir: Optional[List[str]] = None,
+ files: Optional[List[str]] = None,
+ runner_filter: RunnerFilter = RunnerFilter(),
+ collect_skip_comments: bool = True
+ ) -> Report:
current_dir = os.path.dirname(os.path.realpath(__file__))
secrets = SecretsCollection()
with transient_settings({
@@ -105,7 +112,7 @@
{
'name': 'EntropyKeywordCombinator',
'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',
- 'limit': 4.5
+ 'limit': ENTROPY_KEYWORD_LIMIT
}
]
}) as settings:
@@ -124,7 +131,7 @@
settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])
- def _scan_file(file_paths: List[str]):
+ def _scan_file(file_paths: List[str]) -> None:
for file_path in file_paths:
start = time.time()
try:
|
{"golden_diff": "diff --git a/checkov/secrets/plugins/entropy_keyword_combinator.py b/checkov/secrets/plugins/entropy_keyword_combinator.py\n--- a/checkov/secrets/plugins/entropy_keyword_combinator.py\n+++ b/checkov/secrets/plugins/entropy_keyword_combinator.py\n@@ -1,7 +1,7 @@\n from detect_secrets.core.potential_secret import PotentialSecret\n from detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString\n from detect_secrets.plugins.keyword import KeywordDetector\n-from detect_secrets.plugins.base import BasePlugin\n+from detect_secrets.plugins.base import BasePlugin\n from typing import Generator, Any, Set\n \n \n@@ -24,10 +24,10 @@\n one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.\n \"\"\"\n keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)\n- if len(keyword_matches):\n+ if keyword_matches:\n for entropy_scanner in self.high_entropy_scanners:\n matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)\n- if len(matches) > 0:\n+ if matches:\n return matches\n return set([])\n \ndiff --git a/checkov/secrets/runner.py b/checkov/secrets/runner.py\n--- a/checkov/secrets/runner.py\n+++ b/checkov/secrets/runner.py\n@@ -45,6 +45,7 @@\n }\n CHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}\n \n+ENTROPY_KEYWORD_LIMIT = 3\n PROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']\n \n \n@@ -56,8 +57,14 @@\n class Runner(BaseRunner):\n check_type = 'secrets'\n \n- def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n- collect_skip_comments=True) -> Report:\n+ def run(\n+ self,\n+ root_folder: str,\n+ external_checks_dir: Optional[List[str]] = None,\n+ files: Optional[List[str]] = None,\n+ runner_filter: RunnerFilter = RunnerFilter(),\n+ collect_skip_comments: bool = True\n+ ) -> Report:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n secrets = SecretsCollection()\n with transient_settings({\n@@ -105,7 +112,7 @@\n {\n 'name': 'EntropyKeywordCombinator',\n 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',\n- 'limit': 4.5\n+ 'limit': ENTROPY_KEYWORD_LIMIT\n }\n ]\n }) as settings:\n@@ -124,7 +131,7 @@\n \n settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])\n \n- def _scan_file(file_paths: List[str]):\n+ def _scan_file(file_paths: List[str]) -> None:\n for file_path in file_paths:\n start = time.time()\n try:\n", "issue": "Secrets framework scan not working in 2.0.407\n**Describe the bug**\r\nThis is to duplicate #1536 which has been closed. I don't believe the issue has been resolved.\r\n\r\n```\r\nbash-5.1# pwd\r\n/test\r\nbash-5.1# checkov -d . --framework all\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy bridgecrew.io | version: 2.0.407\r\n\r\nbash-5.1# ls -la\r\ntotal 4\r\ndrwxrwxrwx 1 root root 4096 Aug 25 16:35 .\r\ndrwxr-xr-x 1 root root 4096 Sep 12 16:29 ..\r\n-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt\r\n-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf\r\n-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml\r\n\r\nbash-5.1# cat providers.tf\r\naccess_key = \"sfffglfgjldfjdlrt5454002lfofd\"bash-5.1# cat password.txt\r\npassword: mypassword\r\nb64password: bXlwYXNzd29yZAo=\r\nmypassword=secretkey\r\n\r\nbash-5.1# cat secret_password.yaml\r\npwd: correcthorsebatterystaple\r\n\r\nbash-5.1# checkov -d ./ --framework secrets\r\n\r\n _ _\r\n ___| |__ ___ ___| | _______ __\r\n / __| '_ \\ / _ \\/ __| |/ / _ \\ \\ / /\r\n | (__| | | | __/ (__| < (_) \\ V /\r\n \\___|_| |_|\\___|\\___|_|\\_\\___/ \\_/\r\n\r\nBy bridgecrew.io | version: 2.0.407\r\n\r\nbash-5.1# ls -la\r\ntotal 4\r\ndrwxrwxrwx 1 root root 4096 Aug 25 16:35 .\r\ndrwxr-xr-x 1 root root 4096 Sep 12 16:29 ..\r\n-rwxrwxrwx 1 root root 73 Aug 25 16:26 password.txt\r\n-rwxrwxrwx 1 root root 44 Aug 25 16:34 providers.tf\r\n-rwxrwxrwx 1 root root 30 Aug 25 16:35 secret_password.yaml\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. See above and details in #1536 \r\n\r\n**Expected behavior**\r\nI expect secrets to be detected.\r\n\r\n**Desktop (please complete the following information):**\r\n - running the checkov container\r\n - Checkov Version: 2.0.204\r\n\r\n**Additional context**\r\nAdd any other context about the problem here (e.g. code snippets).\r\n\n", "before_files": [{"content": "from detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.plugins.high_entropy_strings import Base64HighEntropyString, HexHighEntropyString\nfrom detect_secrets.plugins.keyword import KeywordDetector\nfrom detect_secrets.plugins.base import BasePlugin\nfrom typing import Generator, Any, Set\n\n\nclass EntropyKeywordCombinator(BasePlugin):\n secret_type = None\n\n def __init__(self, limit: float) -> None:\n self.high_entropy_scanners = (Base64HighEntropyString(limit=limit), HexHighEntropyString(limit=limit))\n self.keyword_scanner = KeywordDetector()\n\n def analyze_line(\n self,\n filename: str,\n line: str,\n line_number: int = 0,\n **kwargs: Any\n ) -> Set[PotentialSecret]:\n \"\"\"\n This method first runs the keyword plugin. If it finds a match - it runs the entropy scanners, and if\n one of the entropy scanners find a match (on a line which was already matched by keyword plugin) - it is returned.\n \"\"\"\n keyword_matches = self.keyword_scanner.analyze_line(filename, line, line_number, **kwargs)\n if len(keyword_matches):\n for entropy_scanner in self.high_entropy_scanners:\n matches = entropy_scanner.analyze_line(filename, line, line_number, **kwargs)\n if len(matches) > 0:\n return matches\n return set([])\n\n def analyze_string(self, string: str) -> Generator[str, None, None]:\n raise NotImplementedError()\n", "path": "checkov/secrets/plugins/entropy_keyword_combinator.py"}, {"content": "import linecache\nimport logging\nimport os\nimport re\nimport time\nfrom typing import Optional, List\n\nfrom detect_secrets import SecretsCollection\nfrom detect_secrets.core.potential_secret import PotentialSecret\nfrom detect_secrets.settings import transient_settings\nfrom typing_extensions import TypedDict\n\nfrom checkov.common.bridgecrew.platform_integration import bc_integration\nfrom checkov.common.comment.enum import COMMENT_REGEX\nfrom checkov.common.graph.graph_builder.utils import run_function_multithreaded\nfrom checkov.common.models.consts import SUPPORTED_FILE_EXTENSIONS\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.record import Record\nfrom checkov.common.output.report import Report\nfrom checkov.common.runners.base_runner import BaseRunner, filter_ignored_paths\nfrom checkov.common.runners.base_runner import ignored_directories\nfrom checkov.common.util.consts import DEFAULT_EXTERNAL_MODULES_DIR\nfrom checkov.runner_filter import RunnerFilter\n\nSECRET_TYPE_TO_ID = {\n 'Artifactory Credentials': 'CKV_SECRET_1',\n 'AWS Access Key': 'CKV_SECRET_2',\n 'Azure Storage Account access key': 'CKV_SECRET_3',\n 'Basic Auth Credentials': 'CKV_SECRET_4',\n 'Cloudant Credentials': 'CKV_SECRET_5',\n 'Base64 High Entropy String': 'CKV_SECRET_6',\n 'IBM Cloud IAM Key': 'CKV_SECRET_7',\n 'IBM COS HMAC Credentials': 'CKV_SECRET_8',\n 'JSON Web Token': 'CKV_SECRET_9',\n # 'Secret Keyword': 'CKV_SECRET_10',\n 'Mailchimp Access Key': 'CKV_SECRET_11',\n 'NPM tokens': 'CKV_SECRET_12',\n 'Private Key': 'CKV_SECRET_13',\n 'Slack Token': 'CKV_SECRET_14',\n 'SoftLayer Credentials': 'CKV_SECRET_15',\n 'Square OAuth Secret': 'CKV_SECRET_16',\n 'Stripe Access Key': 'CKV_SECRET_17',\n 'Twilio API Key': 'CKV_SECRET_18',\n 'Hex High Entropy String': 'CKV_SECRET_19'\n}\nCHECK_ID_TO_SECRET_TYPE = {v: k for k, v in SECRET_TYPE_TO_ID.items()}\n\nPROHIBITED_FILES = ['Pipfile.lock', 'yarn.lock', 'package-lock.json', 'requirements.txt']\n\n\nclass _CheckResult(TypedDict, total=False):\n result: CheckResult\n suppress_comment: str\n\n\nclass Runner(BaseRunner):\n check_type = 'secrets'\n\n def run(self, root_folder, external_checks_dir=None, files=None, runner_filter=RunnerFilter(),\n collect_skip_comments=True) -> Report:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n secrets = SecretsCollection()\n with transient_settings({\n # Only run scans with only these plugins.\n 'plugins_used': [\n {\n 'name': 'AWSKeyDetector'\n },\n {\n 'name': 'ArtifactoryDetector'\n },\n {\n 'name': 'AzureStorageKeyDetector'\n },\n {\n 'name': 'BasicAuthDetector'\n },\n {\n 'name': 'CloudantDetector'\n },\n {\n 'name': 'IbmCloudIamDetector'\n },\n {\n 'name': 'MailchimpDetector'\n },\n {\n 'name': 'PrivateKeyDetector'\n },\n {\n 'name': 'SlackDetector'\n },\n {\n 'name': 'SoftlayerDetector'\n },\n {\n 'name': 'SquareOAuthDetector'\n },\n {\n 'name': 'StripeDetector'\n },\n {\n 'name': 'TwilioKeyDetector'\n },\n {\n 'name': 'EntropyKeywordCombinator',\n 'path': f'file://{current_dir}/plugins/entropy_keyword_combinator.py',\n 'limit': 4.5\n }\n ]\n }) as settings:\n report = Report(self.check_type)\n # Implement non IaC files (including .terraform dir)\n files_to_scan = files or []\n excluded_paths = (runner_filter.excluded_paths or []) + ignored_directories + [DEFAULT_EXTERNAL_MODULES_DIR]\n if root_folder:\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_paths(root, d_names, excluded_paths)\n filter_ignored_paths(root, f_names, excluded_paths)\n for file in f_names:\n if file not in PROHIBITED_FILES and f\".{file.split('.')[-1]}\" in SUPPORTED_FILE_EXTENSIONS:\n files_to_scan.append(os.path.join(root, file))\n logging.info(f'Secrets scanning will scan {len(files_to_scan)} files')\n\n settings.disable_filters(*['detect_secrets.filters.heuristic.is_indirect_reference'])\n\n def _scan_file(file_paths: List[str]):\n for file_path in file_paths:\n start = time.time()\n try:\n secrets.scan_file(file_path)\n except Exception as err:\n logging.warning(f\"Secret scanning:could not process file {file_path}, {err}\")\n continue\n end = time.time()\n scan_time = end - start\n if scan_time > 10:\n logging.info(f'Scanned {file_path}, took {scan_time} seconds')\n\n run_function_multithreaded(_scan_file, files_to_scan, 1, num_of_workers=os.cpu_count())\n\n for _, secret in iter(secrets):\n check_id = SECRET_TYPE_TO_ID.get(secret.type)\n bc_check_id = bc_integration.ckv_to_bc_id_mapping.get(check_id) if bc_integration.ckv_to_bc_id_mapping else None\n if not check_id:\n continue\n if runner_filter.checks and not runner_filter.should_run_check(check_id, bc_check_id):\n continue\n result: _CheckResult = {'result': CheckResult.FAILED}\n line_text = linecache.getline(secret.filename, secret.line_number)\n if line_text != \"\" and line_text.split()[0] == 'git_commit':\n continue\n result = self.search_for_suppression(\n check_id=check_id,\n bc_check_id=bc_check_id,\n secret=secret,\n skipped_checks=runner_filter.skip_checks,\n ) or result\n report.add_resource(f'{secret.filename}:{secret.secret_hash}')\n report.add_record(Record(\n check_id=check_id,\n bc_check_id=bc_check_id,\n check_name=secret.type,\n check_result=result,\n code_block=[(secret.line_number, line_text)],\n file_path=f'/{os.path.relpath(secret.filename, root_folder)}',\n file_line_range=[secret.line_number, secret.line_number + 1],\n resource=secret.secret_hash,\n check_class=None,\n evaluations=None,\n file_abs_path=os.path.abspath(secret.filename)\n ))\n\n return report\n\n @staticmethod\n def search_for_suppression(\n check_id: str,\n bc_check_id: str,\n secret: PotentialSecret,\n skipped_checks: List[str]\n ) -> Optional[_CheckResult]:\n if (check_id in skipped_checks or bc_check_id in skipped_checks) and check_id in CHECK_ID_TO_SECRET_TYPE.keys():\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": f\"Secret scan {check_id} is skipped\"\n }\n # Check for suppression comment in the line before, the line of, and the line after the secret\n for line_number in [secret.line_number, secret.line_number - 1, secret.line_number + 1]:\n lt = linecache.getline(secret.filename, line_number)\n skip_search = re.search(COMMENT_REGEX, lt)\n if skip_search and (skip_search.group(2) == check_id or skip_search.group(2) == bc_check_id):\n return {\n \"result\": CheckResult.SKIPPED,\n \"suppress_comment\": skip_search.group(3)[1:] if skip_search.group(3) else \"No comment provided\"\n }\n return None\n", "path": "checkov/secrets/runner.py"}]}
| 3,939 | 689 |
gh_patches_debug_9694
|
rasdani/github-patches
|
git_diff
|
pretix__pretix-883
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
rich_text: tel schema
When providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.
</issue>
<code>
[start of src/pretix/base/templatetags/rich_text.py]
1 import urllib.parse
2
3 import bleach
4 import markdown
5 from bleach import DEFAULT_CALLBACKS
6 from django import template
7 from django.conf import settings
8 from django.core import signing
9 from django.urls import reverse
10 from django.utils.http import is_safe_url
11 from django.utils.safestring import mark_safe
12
13 register = template.Library()
14
15 ALLOWED_TAGS = [
16 'a',
17 'abbr',
18 'acronym',
19 'b',
20 'blockquote',
21 'br',
22 'code',
23 'em',
24 'i',
25 'li',
26 'ol',
27 'strong',
28 'ul',
29 'p',
30 'table',
31 'tbody',
32 'thead',
33 'tr',
34 'td',
35 'th',
36 'div',
37 'span',
38 'hr',
39 'h1',
40 'h2',
41 'h3',
42 'h4',
43 'h5',
44 'h6',
45 'pre',
46 # Update doc/user/markdown.rst if you change this!
47 ]
48
49 ALLOWED_ATTRIBUTES = {
50 'a': ['href', 'title'],
51 'abbr': ['title'],
52 'acronym': ['title'],
53 'table': ['width'],
54 'td': ['width', 'align'],
55 'div': ['class'],
56 'p': ['class'],
57 'span': ['class'],
58 # Update doc/user/markdown.rst if you change this!
59 }
60
61 ALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']
62
63
64 def safelink_callback(attrs, new=False):
65 url = attrs.get((None, 'href'), '/')
66 if not is_safe_url(url) and not url.startswith('mailto:'):
67 signer = signing.Signer(salt='safe-redirect')
68 attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
69 attrs[None, 'target'] = '_blank'
70 attrs[None, 'rel'] = 'noopener'
71 return attrs
72
73
74 def abslink_callback(attrs, new=False):
75 attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))
76 attrs[None, 'target'] = '_blank'
77 attrs[None, 'rel'] = 'noopener'
78 return attrs
79
80
81 def markdown_compile(source):
82 return bleach.clean(
83 markdown.markdown(
84 source,
85 extensions=[
86 'markdown.extensions.sane_lists',
87 # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails
88 ]
89 ),
90 tags=ALLOWED_TAGS,
91 attributes=ALLOWED_ATTRIBUTES,
92 protocols=ALLOWED_PROTOCOLS,
93 )
94
95
96 @register.filter
97 def rich_text(text: str, **kwargs):
98 """
99 Processes markdown and cleans HTML in a text input.
100 """
101 text = str(text)
102 body_md = bleach.linkify(
103 markdown_compile(text),
104 callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])
105 )
106 return mark_safe(body_md)
107
[end of src/pretix/base/templatetags/rich_text.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py
--- a/src/pretix/base/templatetags/rich_text.py
+++ b/src/pretix/base/templatetags/rich_text.py
@@ -63,7 +63,7 @@
def safelink_callback(attrs, new=False):
url = attrs.get((None, 'href'), '/')
- if not is_safe_url(url) and not url.startswith('mailto:'):
+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):
signer = signing.Signer(salt='safe-redirect')
attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))
attrs[None, 'target'] = '_blank'
|
{"golden_diff": "diff --git a/src/pretix/base/templatetags/rich_text.py b/src/pretix/base/templatetags/rich_text.py\n--- a/src/pretix/base/templatetags/rich_text.py\n+++ b/src/pretix/base/templatetags/rich_text.py\n@@ -63,7 +63,7 @@\n \n def safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n- if not is_safe_url(url) and not url.startswith('mailto:'):\n+ if not is_safe_url(url) and not url.startswith('mailto:') and not url.startswith('tel:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n", "issue": "rich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\nrich_text: tel schema\nWhen providing email links using the mailto schema in richtext description like `[Email us!](mailto:[email protected])`, resulting in [Email us!](mailto:[email protected]), Pretix creates the correct `<a>` tag. However, users also use their mobile phone. It would be awesome to also be able to use the `tel` schema like `[Call us!](tel:+1-202-555-0102)`. At the moment, pretix just creates an `<a>` tag without an `href`, so the Telephone app is not opened.\n", "before_files": [{"content": "import urllib.parse\n\nimport bleach\nimport markdown\nfrom bleach import DEFAULT_CALLBACKS\nfrom django import template\nfrom django.conf import settings\nfrom django.core import signing\nfrom django.urls import reverse\nfrom django.utils.http import is_safe_url\nfrom django.utils.safestring import mark_safe\n\nregister = template.Library()\n\nALLOWED_TAGS = [\n 'a',\n 'abbr',\n 'acronym',\n 'b',\n 'blockquote',\n 'br',\n 'code',\n 'em',\n 'i',\n 'li',\n 'ol',\n 'strong',\n 'ul',\n 'p',\n 'table',\n 'tbody',\n 'thead',\n 'tr',\n 'td',\n 'th',\n 'div',\n 'span',\n 'hr',\n 'h1',\n 'h2',\n 'h3',\n 'h4',\n 'h5',\n 'h6',\n 'pre',\n # Update doc/user/markdown.rst if you change this!\n]\n\nALLOWED_ATTRIBUTES = {\n 'a': ['href', 'title'],\n 'abbr': ['title'],\n 'acronym': ['title'],\n 'table': ['width'],\n 'td': ['width', 'align'],\n 'div': ['class'],\n 'p': ['class'],\n 'span': ['class'],\n # Update doc/user/markdown.rst if you change this!\n}\n\nALLOWED_PROTOCOLS = ['http', 'https', 'mailto', 'tel']\n\n\ndef safelink_callback(attrs, new=False):\n url = attrs.get((None, 'href'), '/')\n if not is_safe_url(url) and not url.startswith('mailto:'):\n signer = signing.Signer(salt='safe-redirect')\n attrs[None, 'href'] = reverse('redirect') + '?url=' + urllib.parse.quote(signer.sign(url))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef abslink_callback(attrs, new=False):\n attrs[None, 'href'] = urllib.parse.urljoin(settings.SITE_URL, attrs.get((None, 'href'), '/'))\n attrs[None, 'target'] = '_blank'\n attrs[None, 'rel'] = 'noopener'\n return attrs\n\n\ndef markdown_compile(source):\n return bleach.clean(\n markdown.markdown(\n source,\n extensions=[\n 'markdown.extensions.sane_lists',\n # 'markdown.extensions.nl2br', # TODO: Enable, but check backwards-compatibility issues e.g. with mails\n ]\n ),\n tags=ALLOWED_TAGS,\n attributes=ALLOWED_ATTRIBUTES,\n protocols=ALLOWED_PROTOCOLS,\n )\n\n\[email protected]\ndef rich_text(text: str, **kwargs):\n \"\"\"\n Processes markdown and cleans HTML in a text input.\n \"\"\"\n text = str(text)\n body_md = bleach.linkify(\n markdown_compile(text),\n callbacks=DEFAULT_CALLBACKS + ([safelink_callback] if kwargs.get('safelinks', True) else [abslink_callback])\n )\n return mark_safe(body_md)\n", "path": "src/pretix/base/templatetags/rich_text.py"}]}
| 1,702 | 195 |
gh_patches_debug_37392
|
rasdani/github-patches
|
git_diff
|
wemake-services__wemake-python-styleguide-1670
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Stray WPS513 warning.
# Bug report
`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.
## What's wrong
The following snippet:
```python
if private_key is not None:
if isinstance(private_key, PKey):
key = private_key
else:
if isinstance(private_key, str): # WPS513 Found implicit `elif` condition
stream = io.StringIO()
stream.write(private_key)
stream.seek(0)
private_key = stream
key = RSAKey.from_private_key(private_key)
```
triggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.
## How is that should be
`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.
</issue>
<code>
[start of wemake_python_styleguide/visitors/tokenize/conditions.py]
1 import tokenize
2 from typing import ClassVar, FrozenSet
3
4 from typing_extensions import final
5
6 from wemake_python_styleguide.violations.refactoring import (
7 ImplicitElifViolation,
8 )
9 from wemake_python_styleguide.visitors.base import BaseTokenVisitor
10
11
12 @final
13 class IfElseVisitor(BaseTokenVisitor):
14 """
15 Checks if tokens tokens.
16
17 We use ``tokenize`` instead of ``ast`` because
18
19 .. code:: python
20
21 if some:
22 ...
23 else:
24 if other:
25 ...
26
27 has the same ``ast`` representation as:
28
29 .. code:: python
30
31 if some:
32 ...
33 elif other:
34 ...
35
36 That's why we have to use ``tokenize`` to find
37 the raw tokens inside the text.
38
39 """
40
41 _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((
42 tokenize.NEWLINE,
43 tokenize.NL,
44 tokenize.COLON,
45 tokenize.INDENT,
46 ))
47
48 def visit_name(self, token: tokenize.TokenInfo) -> None:
49 """
50 Checks that ``if`` nodes are defined correctly.
51
52 Raises:
53 ImplicitElifViolation
54
55 """
56 self._check_implicit_elif(token)
57
58 def _does_else_belong_to_if(self, start_index: int) -> bool:
59 previous_token = self.file_tokens[start_index - 1]
60
61 if previous_token.type != tokenize.DEDENT:
62 # This is not the first token on the line, which means that it can
63 # also be "embedded" else: x if A else B
64 return False
65
66 for token in reversed(self.file_tokens[:start_index - 1]):
67 if token.type != tokenize.NAME:
68 continue
69
70 # Here we rely upon an intuition that in Python else have to be
71 # on the same level (same indentation) as parent statement.
72 if token.start[1] == previous_token.start[1]:
73 return token.string in {'if', 'elif'}
74
75 return False
76
77 def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
78 if token.string != 'else':
79 return
80
81 index = self.file_tokens.index(token)
82
83 # `else` token can belong also to `for` and `try/except` statement,
84 # which can trigger false positive for that violation.
85 if not self._does_else_belong_to_if(index):
86 return
87
88 # There's a bug in coverage, I am not sure how to make it work.
89 for next_token in self.file_tokens[index + 1:]: # pragma: no cover
90 if next_token.exact_type in self._allowed_token_types:
91 continue
92 elif next_token.string == 'if':
93 self.add_violation(ImplicitElifViolation(next_token))
94 return
95
[end of wemake_python_styleguide/visitors/tokenize/conditions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py
--- a/wemake_python_styleguide/visitors/tokenize/conditions.py
+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py
@@ -1,5 +1,5 @@
import tokenize
-from typing import ClassVar, FrozenSet
+from typing import ClassVar, FrozenSet, Sequence
from typing_extensions import final
@@ -74,21 +74,63 @@
return False
- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
- if token.string != 'else':
- return
+ def _if_has_code_below(
+ self,
+ remaining_tokens: Sequence[tokenize.TokenInfo],
+ ) -> bool:
+ """
+ Checks code immediately below an if statement to remove false positives.
+
+ Checks that, below an if that comes immediately after an else, there is
+ more code to be considered so as not to throw an incorrect violation.
+ """
+ index = 1
+
+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:
+ index += 1
+
+ context_count = 1
- index = self.file_tokens.index(token)
+ while context_count:
+ next_token = remaining_tokens[index]
+ if next_token.exact_type == tokenize.INDENT:
+ context_count += 1
+ if next_token.exact_type == tokenize.DEDENT:
+ context_count -= 1
+ index += 1
+
+ return remaining_tokens[index].exact_type != tokenize.DEDENT
+
+ def _check_complex_else(
+ self,
+ tokens: Sequence[tokenize.TokenInfo],
+ current_token: tokenize.TokenInfo,
+ index: int,
+ ) -> None:
+ complex_else = self._if_has_code_below(tokens[index + 1:])
+ if not complex_else:
+ self.add_violation(ImplicitElifViolation(current_token))
+
+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:
+ is_not_else = token.string != 'else'
# `else` token can belong also to `for` and `try/except` statement,
# which can trigger false positive for that violation.
- if not self._does_else_belong_to_if(index):
+ belongs_to_if = self._does_else_belong_to_if(index)
+
+ return is_not_else or not belongs_to_if
+
+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:
+ token_index = self.file_tokens.index(token)
+
+ if self._is_invalid_token(token_index, token):
return
# There's a bug in coverage, I am not sure how to make it work.
- for next_token in self.file_tokens[index + 1:]: # pragma: no cover
+ next_tokens = self.file_tokens[token_index + 1:]
+ for index, next_token in enumerate(next_tokens): # pragma: no cover
if next_token.exact_type in self._allowed_token_types:
continue
elif next_token.string == 'if':
- self.add_violation(ImplicitElifViolation(next_token))
+ self._check_complex_else(next_tokens, next_token, index)
return
|
{"golden_diff": "diff --git a/wemake_python_styleguide/visitors/tokenize/conditions.py b/wemake_python_styleguide/visitors/tokenize/conditions.py\n--- a/wemake_python_styleguide/visitors/tokenize/conditions.py\n+++ b/wemake_python_styleguide/visitors/tokenize/conditions.py\n@@ -1,5 +1,5 @@\n import tokenize\n-from typing import ClassVar, FrozenSet\n+from typing import ClassVar, FrozenSet, Sequence\n \n from typing_extensions import final\n \n@@ -74,21 +74,63 @@\n \n return False\n \n- def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n- if token.string != 'else':\n- return\n+ def _if_has_code_below(\n+ self,\n+ remaining_tokens: Sequence[tokenize.TokenInfo],\n+ ) -> bool:\n+ \"\"\"\n+ Checks code immediately below an if statement to remove false positives.\n+\n+ Checks that, below an if that comes immediately after an else, there is\n+ more code to be considered so as not to throw an incorrect violation.\n+ \"\"\"\n+ index = 1\n+\n+ while remaining_tokens[index - 1].exact_type != tokenize.INDENT:\n+ index += 1\n+\n+ context_count = 1\n \n- index = self.file_tokens.index(token)\n+ while context_count:\n+ next_token = remaining_tokens[index]\n+ if next_token.exact_type == tokenize.INDENT:\n+ context_count += 1\n+ if next_token.exact_type == tokenize.DEDENT:\n+ context_count -= 1\n+ index += 1\n+\n+ return remaining_tokens[index].exact_type != tokenize.DEDENT\n+\n+ def _check_complex_else(\n+ self,\n+ tokens: Sequence[tokenize.TokenInfo],\n+ current_token: tokenize.TokenInfo,\n+ index: int,\n+ ) -> None:\n+ complex_else = self._if_has_code_below(tokens[index + 1:])\n+ if not complex_else:\n+ self.add_violation(ImplicitElifViolation(current_token))\n+\n+ def _is_invalid_token(self, index: int, token: tokenize.TokenInfo) -> bool:\n+ is_not_else = token.string != 'else'\n \n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n- if not self._does_else_belong_to_if(index):\n+ belongs_to_if = self._does_else_belong_to_if(index)\n+\n+ return is_not_else or not belongs_to_if\n+\n+ def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n+ token_index = self.file_tokens.index(token)\n+\n+ if self._is_invalid_token(token_index, token):\n return\n \n # There's a bug in coverage, I am not sure how to make it work.\n- for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n+ next_tokens = self.file_tokens[token_index + 1:]\n+ for index, next_token in enumerate(next_tokens): # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n- self.add_violation(ImplicitElifViolation(next_token))\n+ self._check_complex_else(next_tokens, next_token, index)\n return\n", "issue": "Stray WPS513 warning.\n# Bug report\r\n\r\n`WPS513` is emitted on `if`/`else`/`if` chains that contain other statements in the last `if` block.\r\n\r\n## What's wrong\r\n\r\nThe following snippet:\r\n\r\n```python\r\nif private_key is not None:\r\n if isinstance(private_key, PKey):\r\n key = private_key\r\n else:\r\n if isinstance(private_key, str): # WPS513 Found implicit `elif` condition\r\n stream = io.StringIO()\r\n stream.write(private_key)\r\n stream.seek(0)\r\n private_key = stream\r\n key = RSAKey.from_private_key(private_key)\r\n```\r\n\r\ntriggers a stray WPS513 warning. Seems like the assignment to `key` is not taken into account.\r\n\r\n## How is that should be\r\n\r\n`WPS513` should only be emitted if the dangling `if` statement is the only one present in the `else` block.\n", "before_files": [{"content": "import tokenize\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations.refactoring import (\n ImplicitElifViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseTokenVisitor\n\n\n@final\nclass IfElseVisitor(BaseTokenVisitor):\n \"\"\"\n Checks if tokens tokens.\n\n We use ``tokenize`` instead of ``ast`` because\n\n .. code:: python\n\n if some:\n ...\n else:\n if other:\n ...\n\n has the same ``ast`` representation as:\n\n .. code:: python\n\n if some:\n ...\n elif other:\n ...\n\n That's why we have to use ``tokenize`` to find\n the raw tokens inside the text.\n\n \"\"\"\n\n _allowed_token_types: ClassVar[FrozenSet[int]] = frozenset((\n tokenize.NEWLINE,\n tokenize.NL,\n tokenize.COLON,\n tokenize.INDENT,\n ))\n\n def visit_name(self, token: tokenize.TokenInfo) -> None:\n \"\"\"\n Checks that ``if`` nodes are defined correctly.\n\n Raises:\n ImplicitElifViolation\n\n \"\"\"\n self._check_implicit_elif(token)\n\n def _does_else_belong_to_if(self, start_index: int) -> bool:\n previous_token = self.file_tokens[start_index - 1]\n\n if previous_token.type != tokenize.DEDENT:\n # This is not the first token on the line, which means that it can\n # also be \"embedded\" else: x if A else B\n return False\n\n for token in reversed(self.file_tokens[:start_index - 1]):\n if token.type != tokenize.NAME:\n continue\n\n # Here we rely upon an intuition that in Python else have to be\n # on the same level (same indentation) as parent statement.\n if token.start[1] == previous_token.start[1]:\n return token.string in {'if', 'elif'}\n\n return False\n\n def _check_implicit_elif(self, token: tokenize.TokenInfo) -> None:\n if token.string != 'else':\n return\n\n index = self.file_tokens.index(token)\n\n # `else` token can belong also to `for` and `try/except` statement,\n # which can trigger false positive for that violation.\n if not self._does_else_belong_to_if(index):\n return\n\n # There's a bug in coverage, I am not sure how to make it work.\n for next_token in self.file_tokens[index + 1:]: # pragma: no cover\n if next_token.exact_type in self._allowed_token_types:\n continue\n elif next_token.string == 'if':\n self.add_violation(ImplicitElifViolation(next_token))\n return\n", "path": "wemake_python_styleguide/visitors/tokenize/conditions.py"}]}
| 1,553 | 757 |
gh_patches_debug_16949
|
rasdani/github-patches
|
git_diff
|
fonttools__fonttools-579
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[feaLib] Lexer for double quote strings should not stop at newline?
I have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec
> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters ("). Newlines embedded within the string are removed from the character sequence to be stored.
Looking at a ttx dump these strings have ` ` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('"\r\n')`
If I remove `\r\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\r\n` in the double quote lexer.
[feaLib] Lexer for double quote strings should not stop at newline?
I have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec
> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters ("). Newlines embedded within the string are removed from the character sequence to be stored.
Looking at a ttx dump these strings have ` ` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('"\r\n')`
If I remove `\r\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\r\n` in the double quote lexer.
</issue>
<code>
[start of Lib/fontTools/feaLib/lexer.py]
1 from __future__ import print_function, division, absolute_import
2 from __future__ import unicode_literals
3 from fontTools.misc.py23 import *
4 from fontTools.feaLib.error import FeatureLibError
5 import re
6 import os
7
8
9 class Lexer(object):
10 NUMBER = "NUMBER"
11 FLOAT = "FLOAT"
12 STRING = "STRING"
13 NAME = "NAME"
14 FILENAME = "FILENAME"
15 GLYPHCLASS = "GLYPHCLASS"
16 CID = "CID"
17 SYMBOL = "SYMBOL"
18 COMMENT = "COMMENT"
19 NEWLINE = "NEWLINE"
20
21 CHAR_WHITESPACE_ = " \t"
22 CHAR_NEWLINE_ = "\r\n"
23 CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
24 CHAR_DIGIT_ = "0123456789"
25 CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
26 CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
27 CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
28 CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/"
29
30 RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.]+$")
31
32 MODE_NORMAL_ = "NORMAL"
33 MODE_FILENAME_ = "FILENAME"
34
35 def __init__(self, text, filename):
36 self.filename_ = filename
37 self.line_ = 1
38 self.pos_ = 0
39 self.line_start_ = 0
40 self.text_ = text
41 self.text_length_ = len(text)
42 self.mode_ = Lexer.MODE_NORMAL_
43
44 def __iter__(self):
45 return self
46
47 def next(self): # Python 2
48 return self.__next__()
49
50 def __next__(self): # Python 3
51 while True:
52 token_type, token, location = self.next_()
53 if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}:
54 return (token_type, token, location)
55
56 def next_(self):
57 self.scan_over_(Lexer.CHAR_WHITESPACE_)
58 column = self.pos_ - self.line_start_ + 1
59 location = (self.filename_, self.line_, column)
60 start = self.pos_
61 text = self.text_
62 limit = len(text)
63 if start >= limit:
64 raise StopIteration()
65 cur_char = text[start]
66 next_char = text[start + 1] if start + 1 < limit else None
67
68 if cur_char == "\n":
69 self.pos_ += 1
70 self.line_ += 1
71 self.line_start_ = self.pos_
72 return (Lexer.NEWLINE, None, location)
73 if cur_char == "\r":
74 self.pos_ += (2 if next_char == "\n" else 1)
75 self.line_ += 1
76 self.line_start_ = self.pos_
77 return (Lexer.NEWLINE, None, location)
78 if cur_char == "#":
79 self.scan_until_(Lexer.CHAR_NEWLINE_)
80 return (Lexer.COMMENT, text[start:self.pos_], location)
81
82 if self.mode_ is Lexer.MODE_FILENAME_:
83 if cur_char != "(":
84 raise FeatureLibError("Expected '(' before file name",
85 location)
86 self.scan_until_(")")
87 cur_char = text[self.pos_] if self.pos_ < limit else None
88 if cur_char != ")":
89 raise FeatureLibError("Expected ')' after file name",
90 location)
91 self.pos_ += 1
92 self.mode_ = Lexer.MODE_NORMAL_
93 return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)
94
95 if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
96 self.pos_ += 1
97 self.scan_over_(Lexer.CHAR_DIGIT_)
98 return (Lexer.CID, int(text[start + 1:self.pos_], 10), location)
99 if cur_char == "@":
100 self.pos_ += 1
101 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
102 glyphclass = text[start + 1:self.pos_]
103 if len(glyphclass) < 1:
104 raise FeatureLibError("Expected glyph class name", location)
105 if len(glyphclass) > 63:
106 raise FeatureLibError(
107 "Glyph class names must not be longer than 63 characters",
108 location)
109 if not Lexer.RE_GLYPHCLASS.match(glyphclass):
110 raise FeatureLibError(
111 "Glyph class names must consist of letters, digits, "
112 "underscore, or period", location)
113 return (Lexer.GLYPHCLASS, glyphclass, location)
114 if cur_char in Lexer.CHAR_NAME_START_:
115 self.pos_ += 1
116 self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
117 token = text[start:self.pos_]
118 if token == "include":
119 self.mode_ = Lexer.MODE_FILENAME_
120 return (Lexer.NAME, token, location)
121 if cur_char == "0" and next_char in "xX":
122 self.pos_ += 2
123 self.scan_over_(Lexer.CHAR_HEXDIGIT_)
124 return (Lexer.NUMBER, int(text[start:self.pos_], 16), location)
125 if cur_char in Lexer.CHAR_DIGIT_:
126 self.scan_over_(Lexer.CHAR_DIGIT_)
127 if self.pos_ >= limit or text[self.pos_] != ".":
128 return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
129 self.scan_over_(".")
130 self.scan_over_(Lexer.CHAR_DIGIT_)
131 return (Lexer.FLOAT, float(text[start:self.pos_]), location)
132 if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
133 self.pos_ += 1
134 self.scan_over_(Lexer.CHAR_DIGIT_)
135 if self.pos_ >= limit or text[self.pos_] != ".":
136 return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)
137 self.scan_over_(".")
138 self.scan_over_(Lexer.CHAR_DIGIT_)
139 return (Lexer.FLOAT, float(text[start:self.pos_]), location)
140 if cur_char in Lexer.CHAR_SYMBOL_:
141 self.pos_ += 1
142 return (Lexer.SYMBOL, cur_char, location)
143 if cur_char == '"':
144 self.pos_ += 1
145 self.scan_until_('"\r\n')
146 if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
147 self.pos_ += 1
148 return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
149 else:
150 raise FeatureLibError("Expected '\"' to terminate string",
151 location)
152 raise FeatureLibError("Unexpected character: '%s'" % cur_char,
153 location)
154
155 def scan_over_(self, valid):
156 p = self.pos_
157 while p < self.text_length_ and self.text_[p] in valid:
158 p += 1
159 self.pos_ = p
160
161 def scan_until_(self, stop_at):
162 p = self.pos_
163 while p < self.text_length_ and self.text_[p] not in stop_at:
164 p += 1
165 self.pos_ = p
166
167
168 class IncludingLexer(object):
169 def __init__(self, featurefile):
170 self.lexers_ = [self.make_lexer_(featurefile)]
171
172 def __iter__(self):
173 return self
174
175 def next(self): # Python 2
176 return self.__next__()
177
178 def __next__(self): # Python 3
179 while self.lexers_:
180 lexer = self.lexers_[-1]
181 try:
182 token_type, token, location = lexer.next()
183 except StopIteration:
184 self.lexers_.pop()
185 continue
186 if token_type is Lexer.NAME and token == "include":
187 fname_type, fname_token, fname_location = lexer.next()
188 if fname_type is not Lexer.FILENAME:
189 raise FeatureLibError("Expected file name", fname_location)
190 #semi_type, semi_token, semi_location = lexer.next()
191 #if semi_type is not Lexer.SYMBOL or semi_token != ";":
192 # raise FeatureLibError("Expected ';'", semi_location)
193 curpath = os.path.dirname(lexer.filename_)
194 path = os.path.join(curpath, fname_token)
195 if len(self.lexers_) >= 5:
196 raise FeatureLibError("Too many recursive includes",
197 fname_location)
198 self.lexers_.append(self.make_lexer_(path, fname_location))
199 continue
200 else:
201 return (token_type, token, location)
202 raise StopIteration()
203
204 @staticmethod
205 def make_lexer_(file_or_path, location=None):
206 if hasattr(file_or_path, "read"):
207 fileobj, closing = file_or_path, False
208 else:
209 filename, closing = file_or_path, True
210 try:
211 fileobj = open(filename, "r", encoding="utf-8")
212 except IOError as err:
213 raise FeatureLibError(str(err), location)
214 data = fileobj.read()
215 filename = fileobj.name if hasattr(fileobj, "name") else "<features>"
216 if closing:
217 fileobj.close()
218 return Lexer(data, filename)
219
[end of Lib/fontTools/feaLib/lexer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/Lib/fontTools/feaLib/lexer.py b/Lib/fontTools/feaLib/lexer.py
--- a/Lib/fontTools/feaLib/lexer.py
+++ b/Lib/fontTools/feaLib/lexer.py
@@ -142,10 +142,12 @@
return (Lexer.SYMBOL, cur_char, location)
if cur_char == '"':
self.pos_ += 1
- self.scan_until_('"\r\n')
+ self.scan_until_('"')
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
self.pos_ += 1
- return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)
+ # strip newlines embedded within a string
+ string = re.sub("[\r\n]", "", text[start + 1:self.pos_ - 1])
+ return (Lexer.STRING, string, location)
else:
raise FeatureLibError("Expected '\"' to terminate string",
location)
|
{"golden_diff": "diff --git a/Lib/fontTools/feaLib/lexer.py b/Lib/fontTools/feaLib/lexer.py\n--- a/Lib/fontTools/feaLib/lexer.py\n+++ b/Lib/fontTools/feaLib/lexer.py\n@@ -142,10 +142,12 @@\n return (Lexer.SYMBOL, cur_char, location)\n if cur_char == '\"':\n self.pos_ += 1\n- self.scan_until_('\"\\r\\n')\n+ self.scan_until_('\"')\n if self.pos_ < self.text_length_ and self.text_[self.pos_] == '\"':\n self.pos_ += 1\n- return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)\n+ # strip newlines embedded within a string\n+ string = re.sub(\"[\\r\\n]\", \"\", text[start + 1:self.pos_ - 1])\n+ return (Lexer.STRING, string, location)\n else:\n raise FeatureLibError(\"Expected '\\\"' to terminate string\",\n location)\n", "issue": "[feaLib] Lexer for double quote strings should not stop at newline?\nI have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec \n\n> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters (\"). Newlines embedded within the string are removed from the character sequence to be stored.\n\nLooking at a ttx dump these strings have ` ` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('\"\\r\\n')`\nIf I remove `\\r\\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\\r\\n` in the double quote lexer. \n\n[feaLib] Lexer for double quote strings should not stop at newline?\nI have fonts and fea files with a name table where some nameid strings have newlines in the string. According to the spec \n\n> A string is composed of 1-byte ASCII characters enclosed by ASCII double quote characters (\"). Newlines embedded within the string are removed from the character sequence to be stored.\n\nLooking at a ttx dump these strings have ` ` inserted for the newlines so ttx seems to have no issue, but loading the fea file with feaLib fails because in lexer.py there is `self.scan_until_('\"\\r\\n')`\nIf I remove `\\r\\n` it loads fine, but then test_string fails in lexer_test.py because it doesn't raise an Exception at the embedded newline. Maybe I'm missing something, but I don't see a reason for the `\\r\\n` in the double quote lexer. \n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom __future__ import unicode_literals\nfrom fontTools.misc.py23 import *\nfrom fontTools.feaLib.error import FeatureLibError\nimport re\nimport os\n\n\nclass Lexer(object):\n NUMBER = \"NUMBER\"\n FLOAT = \"FLOAT\"\n STRING = \"STRING\"\n NAME = \"NAME\"\n FILENAME = \"FILENAME\"\n GLYPHCLASS = \"GLYPHCLASS\"\n CID = \"CID\"\n SYMBOL = \"SYMBOL\"\n COMMENT = \"COMMENT\"\n NEWLINE = \"NEWLINE\"\n\n CHAR_WHITESPACE_ = \" \\t\"\n CHAR_NEWLINE_ = \"\\r\\n\"\n CHAR_SYMBOL_ = \",;:-+'{}[]<>()=\"\n CHAR_DIGIT_ = \"0123456789\"\n CHAR_HEXDIGIT_ = \"0123456789ABCDEFabcdef\"\n CHAR_LETTER_ = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n CHAR_NAME_START_ = CHAR_LETTER_ + \"_+*:.^~!\\\\\"\n CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + \"_.+*:^~!/\"\n\n RE_GLYPHCLASS = re.compile(r\"^[A-Za-z_0-9.]+$\")\n\n MODE_NORMAL_ = \"NORMAL\"\n MODE_FILENAME_ = \"FILENAME\"\n\n def __init__(self, text, filename):\n self.filename_ = filename\n self.line_ = 1\n self.pos_ = 0\n self.line_start_ = 0\n self.text_ = text\n self.text_length_ = len(text)\n self.mode_ = Lexer.MODE_NORMAL_\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while True:\n token_type, token, location = self.next_()\n if token_type not in {Lexer.COMMENT, Lexer.NEWLINE}:\n return (token_type, token, location)\n\n def next_(self):\n self.scan_over_(Lexer.CHAR_WHITESPACE_)\n column = self.pos_ - self.line_start_ + 1\n location = (self.filename_, self.line_, column)\n start = self.pos_\n text = self.text_\n limit = len(text)\n if start >= limit:\n raise StopIteration()\n cur_char = text[start]\n next_char = text[start + 1] if start + 1 < limit else None\n\n if cur_char == \"\\n\":\n self.pos_ += 1\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"\\r\":\n self.pos_ += (2 if next_char == \"\\n\" else 1)\n self.line_ += 1\n self.line_start_ = self.pos_\n return (Lexer.NEWLINE, None, location)\n if cur_char == \"#\":\n self.scan_until_(Lexer.CHAR_NEWLINE_)\n return (Lexer.COMMENT, text[start:self.pos_], location)\n\n if self.mode_ is Lexer.MODE_FILENAME_:\n if cur_char != \"(\":\n raise FeatureLibError(\"Expected '(' before file name\",\n location)\n self.scan_until_(\")\")\n cur_char = text[self.pos_] if self.pos_ < limit else None\n if cur_char != \")\":\n raise FeatureLibError(\"Expected ')' after file name\",\n location)\n self.pos_ += 1\n self.mode_ = Lexer.MODE_NORMAL_\n return (Lexer.FILENAME, text[start + 1:self.pos_ - 1], location)\n\n if cur_char == \"\\\\\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.CID, int(text[start + 1:self.pos_], 10), location)\n if cur_char == \"@\":\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n glyphclass = text[start + 1:self.pos_]\n if len(glyphclass) < 1:\n raise FeatureLibError(\"Expected glyph class name\", location)\n if len(glyphclass) > 63:\n raise FeatureLibError(\n \"Glyph class names must not be longer than 63 characters\",\n location)\n if not Lexer.RE_GLYPHCLASS.match(glyphclass):\n raise FeatureLibError(\n \"Glyph class names must consist of letters, digits, \"\n \"underscore, or period\", location)\n return (Lexer.GLYPHCLASS, glyphclass, location)\n if cur_char in Lexer.CHAR_NAME_START_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)\n token = text[start:self.pos_]\n if token == \"include\":\n self.mode_ = Lexer.MODE_FILENAME_\n return (Lexer.NAME, token, location)\n if cur_char == \"0\" and next_char in \"xX\":\n self.pos_ += 2\n self.scan_over_(Lexer.CHAR_HEXDIGIT_)\n return (Lexer.NUMBER, int(text[start:self.pos_], 16), location)\n if cur_char in Lexer.CHAR_DIGIT_:\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char == \"-\" and next_char in Lexer.CHAR_DIGIT_:\n self.pos_ += 1\n self.scan_over_(Lexer.CHAR_DIGIT_)\n if self.pos_ >= limit or text[self.pos_] != \".\":\n return (Lexer.NUMBER, int(text[start:self.pos_], 10), location)\n self.scan_over_(\".\")\n self.scan_over_(Lexer.CHAR_DIGIT_)\n return (Lexer.FLOAT, float(text[start:self.pos_]), location)\n if cur_char in Lexer.CHAR_SYMBOL_:\n self.pos_ += 1\n return (Lexer.SYMBOL, cur_char, location)\n if cur_char == '\"':\n self.pos_ += 1\n self.scan_until_('\"\\r\\n')\n if self.pos_ < self.text_length_ and self.text_[self.pos_] == '\"':\n self.pos_ += 1\n return (Lexer.STRING, text[start + 1:self.pos_ - 1], location)\n else:\n raise FeatureLibError(\"Expected '\\\"' to terminate string\",\n location)\n raise FeatureLibError(\"Unexpected character: '%s'\" % cur_char,\n location)\n\n def scan_over_(self, valid):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] in valid:\n p += 1\n self.pos_ = p\n\n def scan_until_(self, stop_at):\n p = self.pos_\n while p < self.text_length_ and self.text_[p] not in stop_at:\n p += 1\n self.pos_ = p\n\n\nclass IncludingLexer(object):\n def __init__(self, featurefile):\n self.lexers_ = [self.make_lexer_(featurefile)]\n\n def __iter__(self):\n return self\n\n def next(self): # Python 2\n return self.__next__()\n\n def __next__(self): # Python 3\n while self.lexers_:\n lexer = self.lexers_[-1]\n try:\n token_type, token, location = lexer.next()\n except StopIteration:\n self.lexers_.pop()\n continue\n if token_type is Lexer.NAME and token == \"include\":\n fname_type, fname_token, fname_location = lexer.next()\n if fname_type is not Lexer.FILENAME:\n raise FeatureLibError(\"Expected file name\", fname_location)\n #semi_type, semi_token, semi_location = lexer.next()\n #if semi_type is not Lexer.SYMBOL or semi_token != \";\":\n # raise FeatureLibError(\"Expected ';'\", semi_location)\n curpath = os.path.dirname(lexer.filename_)\n path = os.path.join(curpath, fname_token)\n if len(self.lexers_) >= 5:\n raise FeatureLibError(\"Too many recursive includes\",\n fname_location)\n self.lexers_.append(self.make_lexer_(path, fname_location))\n continue\n else:\n return (token_type, token, location)\n raise StopIteration()\n\n @staticmethod\n def make_lexer_(file_or_path, location=None):\n if hasattr(file_or_path, \"read\"):\n fileobj, closing = file_or_path, False\n else:\n filename, closing = file_or_path, True\n try:\n fileobj = open(filename, \"r\", encoding=\"utf-8\")\n except IOError as err:\n raise FeatureLibError(str(err), location)\n data = fileobj.read()\n filename = fileobj.name if hasattr(fileobj, \"name\") else \"<features>\"\n if closing:\n fileobj.close()\n return Lexer(data, filename)\n", "path": "Lib/fontTools/feaLib/lexer.py"}]}
| 3,489 | 228 |
gh_patches_debug_20297
|
rasdani/github-patches
|
git_diff
|
mozmeao__snippets-service-1063
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Snippets ICal AttributeError exception
Snippets ICal feed raises:
`AttributeError: 'ASRSnippet' object has no attribute 'locales'`
Fix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile:
</issue>
<code>
[start of snippets/base/urls.py]
1 from django.urls import path
2
3 from watchman import views as watchman_views
4
5 from snippets.base import views
6 from snippets.base import feed
7
8
9 urlpatterns = [
10 path('', views.HomeView.as_view()),
11 path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'
12 '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',
13 views.fetch_snippets, name='base.fetch_snippets'),
14 path('preview/', views.preview_snippet, name='base.preview'),
15 path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),
16 path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),
17 path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),
18 path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
19 path('healthz/', watchman_views.ping, name='watchman.ping'),
20 path('readiness/', watchman_views.status, name='watchman.status'),
21 path('feeds/snippets.ics', feed.SnippetsFeed()),
22 ]
23
[end of snippets/base/urls.py]
[start of snippets/base/feed.py]
1 import operator
2 from datetime import timedelta
3 from distutils.util import strtobool
4 from textwrap import dedent
5 from urllib.parse import urlparse
6
7 from django.conf import settings
8 from django.db.models import Q
9
10 import django_filters
11 from django_ical.views import ICalFeed
12
13 from snippets.base import models
14
15
16 class ASRSnippetFilter(django_filters.FilterSet):
17 name = django_filters.CharFilter(lookup_expr='icontains')
18 locale = django_filters.CharFilter(method='filter_locale')
19 only_scheduled = django_filters.ChoiceFilter(
20 method='filter_scheduled', choices=(('true', 'Yes'),
21 ('false', 'No'),
22 ('all', 'All')))
23
24 def filter_locale(self, queryset, name, value):
25 if not value:
26 return queryset
27
28 locales = value.split(',')
29 return queryset.filter(
30 operator.or_(
31 *[Q(locale__code=',{},'.format(locale)) for locale in locales]
32 )
33 )
34
35 def filter_scheduled(self, queryset, name, value):
36 if value == 'all':
37 return queryset
38
39 value = strtobool(value)
40
41 if value:
42 return queryset.exclude(publish_start=None, publish_end=None)
43
44 return queryset.filter(publish_start=None, publish_end=None)
45
46 class Meta:
47 model = models.ASRSnippet
48 fields = []
49
50
51 class SnippetsFeed(ICalFeed):
52 timezone = 'UTC'
53 title = 'Snippets'
54
55 def __call__(self, request, *args, **kwargs):
56 self.request = request
57 return super().__call__(request, *args, **kwargs)
58
59 @property
60 def product_id(self):
61 return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,
62 self.request.GET.urlencode())
63
64 def items(self):
65 queryset = (models.ASRSnippet.objects
66 .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])
67 .order_by('publish_start'))
68 filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)
69 return filtr.qs
70
71 def item_title(self, item):
72 return item.name
73
74 def item_link(self, item):
75 return item.get_admin_url()
76
77 def item_description(self, item):
78 description = dedent('''\
79 Channels: {}
80 Locales: {}'
81 Preview Link: {}
82 '''.format(', '.join(item.channels),
83 ', '.join(item.locales.values_list('name', flat=True)),
84 item.get_preview_url()))
85 return description
86
87 def item_start_datetime(self, item):
88 return item.publish_start or item.created
89
90 def item_end_datetime(self, item):
91 return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))
92
93 def item_created(self, item):
94 return item.created
95
96 def item_updateddate(self, item):
97 return item.modified
98
[end of snippets/base/feed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/snippets/base/feed.py b/snippets/base/feed.py
--- a/snippets/base/feed.py
+++ b/snippets/base/feed.py
@@ -77,10 +77,10 @@
def item_description(self, item):
description = dedent('''\
Channels: {}
- Locales: {}'
+ Locale: {}'
Preview Link: {}
'''.format(', '.join(item.channels),
- ', '.join(item.locales.values_list('name', flat=True)),
+ item.locale,
item.get_preview_url()))
return description
diff --git a/snippets/base/urls.py b/snippets/base/urls.py
--- a/snippets/base/urls.py
+++ b/snippets/base/urls.py
@@ -18,5 +18,5 @@
path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),
path('healthz/', watchman_views.ping, name='watchman.ping'),
path('readiness/', watchman_views.status, name='watchman.status'),
- path('feeds/snippets.ics', feed.SnippetsFeed()),
+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),
]
|
{"golden_diff": "diff --git a/snippets/base/feed.py b/snippets/base/feed.py\n--- a/snippets/base/feed.py\n+++ b/snippets/base/feed.py\n@@ -77,10 +77,10 @@\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n- Locales: {}'\n+ Locale: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n- ', '.join(item.locales.values_list('name', flat=True)),\n+ item.locale,\n item.get_preview_url()))\n return description\n \ndiff --git a/snippets/base/urls.py b/snippets/base/urls.py\n--- a/snippets/base/urls.py\n+++ b/snippets/base/urls.py\n@@ -18,5 +18,5 @@\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n- path('feeds/snippets.ics', feed.SnippetsFeed()),\n+ path('feeds/snippets.ics', feed.SnippetsFeed(), name='ical-feed'),\n ]\n", "issue": "Fix Snippets ICal AttributeError exception \nSnippets ICal feed raises:\r\n\r\n`AttributeError: 'ASRSnippet' object has no attribute 'locales'`\r\n\r\nFix the error and add tests catch this on the unit test level. We did catch this on the acceptance tests level though :sweat_smile: \r\n\n", "before_files": [{"content": "from django.urls import path\n\nfrom watchman import views as watchman_views\n\nfrom snippets.base import views\nfrom snippets.base import feed\n\n\nurlpatterns = [\n path('', views.HomeView.as_view()),\n path('<int:startpage_version>/<name>/<version>/<appbuildid>/<build_target>/'\n '<locale>/<channel>/<os_version>/<distribution>/<distribution_version>/',\n views.fetch_snippets, name='base.fetch_snippets'),\n path('preview/', views.preview_snippet, name='base.preview'),\n path('preview-asr/<str:uuid>/', views.preview_asr_snippet, name='asr-preview'),\n path('show/<int:snippet_id>/', views.show_snippet, name='base.show'),\n path('show/uuid/<str:snippet_id>/', views.show_snippet, {'uuid': True}, name='base.show_uuid'),\n path('csp-violation-capture', views.csp_violation_capture, name='csp-violation-capture'),\n path('healthz/', watchman_views.ping, name='watchman.ping'),\n path('readiness/', watchman_views.status, name='watchman.status'),\n path('feeds/snippets.ics', feed.SnippetsFeed()),\n]\n", "path": "snippets/base/urls.py"}, {"content": "import operator\nfrom datetime import timedelta\nfrom distutils.util import strtobool\nfrom textwrap import dedent\nfrom urllib.parse import urlparse\n\nfrom django.conf import settings\nfrom django.db.models import Q\n\nimport django_filters\nfrom django_ical.views import ICalFeed\n\nfrom snippets.base import models\n\n\nclass ASRSnippetFilter(django_filters.FilterSet):\n name = django_filters.CharFilter(lookup_expr='icontains')\n locale = django_filters.CharFilter(method='filter_locale')\n only_scheduled = django_filters.ChoiceFilter(\n method='filter_scheduled', choices=(('true', 'Yes'),\n ('false', 'No'),\n ('all', 'All')))\n\n def filter_locale(self, queryset, name, value):\n if not value:\n return queryset\n\n locales = value.split(',')\n return queryset.filter(\n operator.or_(\n *[Q(locale__code=',{},'.format(locale)) for locale in locales]\n )\n )\n\n def filter_scheduled(self, queryset, name, value):\n if value == 'all':\n return queryset\n\n value = strtobool(value)\n\n if value:\n return queryset.exclude(publish_start=None, publish_end=None)\n\n return queryset.filter(publish_start=None, publish_end=None)\n\n class Meta:\n model = models.ASRSnippet\n fields = []\n\n\nclass SnippetsFeed(ICalFeed):\n timezone = 'UTC'\n title = 'Snippets'\n\n def __call__(self, request, *args, **kwargs):\n self.request = request\n return super().__call__(request, *args, **kwargs)\n\n @property\n def product_id(self):\n return '//{}/Snippets?{}'.format(urlparse(settings.SITE_URL).netloc,\n self.request.GET.urlencode())\n\n def items(self):\n queryset = (models.ASRSnippet.objects\n .filter(for_qa=False, status=models.STATUS_CHOICES['Published'])\n .order_by('publish_start'))\n filtr = ASRSnippetFilter(self.request.GET, queryset=queryset)\n return filtr.qs\n\n def item_title(self, item):\n return item.name\n\n def item_link(self, item):\n return item.get_admin_url()\n\n def item_description(self, item):\n description = dedent('''\\\n Channels: {}\n Locales: {}'\n Preview Link: {}\n '''.format(', '.join(item.channels),\n ', '.join(item.locales.values_list('name', flat=True)),\n item.get_preview_url()))\n return description\n\n def item_start_datetime(self, item):\n return item.publish_start or item.created\n\n def item_end_datetime(self, item):\n return item.publish_end or (self.item_start_datetime(item) + timedelta(days=365))\n\n def item_created(self, item):\n return item.created\n\n def item_updateddate(self, item):\n return item.modified\n", "path": "snippets/base/feed.py"}]}
| 1,722 | 269 |
gh_patches_debug_29258
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-4250
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/examples/ray/mmmt_prompt.py]
1 import argparse
2 import os
3 import socket
4 from functools import partial
5
6 import pandas as pd
7 import ray
8 import torch
9 from coati.quant import llama_load_quant, low_resource_init
10 from coati.ray.detached_trainer_ppo import DetachedPPOTrainer
11 from coati.ray.experience_maker_holder import ExperienceMakerHolder
12 from coati.ray.utils import (
13 get_actor_from_args,
14 get_critic_from_args,
15 get_receivers_per_sender,
16 get_reward_model_from_args,
17 get_strategy_from_args,
18 )
19 from torch.utils.data import DataLoader
20 from transformers import AutoConfig, AutoTokenizer
21 from transformers.modeling_utils import no_init_weights
22
23
24 def get_free_port():
25 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
26 s.bind(('', 0))
27 return s.getsockname()[1]
28
29
30 def get_local_ip():
31 with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
32 s.connect(('8.8.8.8', 80))
33 return s.getsockname()[0]
34
35
36 def main(args):
37 master_addr = str(get_local_ip())
38 # trainer_env_info
39 trainer_port = str(get_free_port())
40 env_info_trainers = [{
41 'local_rank': '0',
42 'rank': str(rank),
43 'world_size': str(args.num_trainers),
44 'master_port': trainer_port,
45 'master_addr': master_addr
46 } for rank in range(args.num_trainers)]
47
48 # maker_env_info
49 maker_port = str(get_free_port())
50 env_info_makers = [{
51 'local_rank': '0',
52 'rank': str(rank),
53 'world_size': str(args.num_makers),
54 'master_port': maker_port,
55 'master_addr': master_addr
56 } for rank in range(args.num_makers)]
57
58 # configure tokenizer
59 tokenizer = AutoTokenizer.from_pretrained(args.pretrain)
60 tokenizer.pad_token = tokenizer.eos_token
61
62 def model_fn():
63 actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()
64 critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()
65 reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()
66 if args.initial_model_quant_ckpt is not None and args.model == 'llama':
67 # quantize initial model
68 actor_cfg = AutoConfig.from_pretrained(args.pretrain)
69 with low_resource_init(), no_init_weights():
70 initial_model = get_actor_from_args(args.model, config=actor_cfg)
71 initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits,
72 args.quant_group_size).cuda().requires_grad_(False)
73 else:
74 initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()
75 return actor, critic, reward_model, initial_model
76
77 # configure Experience Maker
78 experience_holder_refs = [
79 ExperienceMakerHolder.options(name=f"maker{i}", num_gpus=1, max_concurrency=2).remote(
80 detached_trainer_name_list=[
81 f'trainer{x}'
82 for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False)
83 ],
84 strategy_fn=partial(get_strategy_from_args, args.maker_strategy),
85 model_fn=model_fn,
86 env_info=env_info_maker,
87 kl_coef=0.1,
88 debug=args.debug,
89 update_lora_weights=not (args.lora_rank == 0),
90 # sync_models_from_trainers=True,
91 # generation kwargs:
92 max_length=512,
93 do_sample=True,
94 temperature=1.0,
95 top_k=50,
96 pad_token_id=tokenizer.pad_token_id,
97 eos_token_id=tokenizer.eos_token_id,
98 eval_performance=True,
99 use_cache=True,
100 )
101 for i, env_info_maker in enumerate(env_info_makers)
102 ]
103
104 def trainer_model_fn():
105 actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda()
106 critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda()
107 return actor, critic
108
109 # configure Trainer
110 trainer_refs = [
111 DetachedPPOTrainer.options(name=f"trainer{i}", num_gpus=1, max_concurrency=2).remote(
112 experience_maker_holder_name_list=[
113 f"maker{x}"
114 for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True)
115 ],
116 strategy_fn=partial(get_strategy_from_args, args.trainer_strategy),
117 model_fn=trainer_model_fn,
118 env_info=env_info_trainer,
119 train_batch_size=args.train_batch_size,
120 buffer_limit=16,
121 eval_performance=True,
122 debug=args.debug,
123 update_lora_weights=not (args.lora_rank == 0),
124 )
125 for i, env_info_trainer in enumerate(env_info_trainers)
126 ]
127
128 dataset_size = args.experience_batch_size * 4
129
130 def build_dataloader():
131
132 def tokenize_fn(texts):
133 batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)
134 return {k: v.cuda() for k, v in batch.items()}
135
136 dataset = pd.read_csv(args.prompt_path)['prompt']
137 dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn)
138 return dataloader
139
140 # uncomment this function if sync_models_from_trainers is True
141 # ray.get([
142 # trainer_ref.sync_models_to_remote_makers.remote()
143 # for trainer_ref in trainer_refs
144 # ])
145
146 wait_tasks = []
147
148 for experience_holder_ref in experience_holder_refs:
149 wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps))
150
151 total_steps = args.experience_batch_size * args.experience_steps * \
152 args.num_makers // (args.num_trainers * args.train_batch_size)
153 for trainer_ref in trainer_refs:
154 wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs))
155
156 ray.get(wait_tasks)
157
158
159 if __name__ == '__main__':
160 parser = argparse.ArgumentParser()
161 parser.add_argument('--prompt_path', type=str, default=None)
162 parser.add_argument('--num_makers', type=int, default=1)
163 parser.add_argument('--num_trainers', type=int, default=1)
164 parser.add_argument('--trainer_strategy',
165 choices=[
166 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',
167 'colossalai_zero2_cpu'
168 ],
169 default='ddp')
170 parser.add_argument('--maker_strategy', choices=['naive'], default='naive')
171 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
172 parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
173 parser.add_argument('--pretrain', type=str, default=None)
174 parser.add_argument('--critic_pretrain', type=str, default=None)
175 parser.add_argument('--experience_steps', type=int, default=4)
176 parser.add_argument('--experience_batch_size', type=int, default=8)
177 parser.add_argument('--train_epochs', type=int, default=1)
178 parser.add_argument('--update_steps', type=int, default=2)
179 parser.add_argument('--train_batch_size', type=int, default=8)
180 parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
181
182 parser.add_argument('--initial_model_quant_ckpt', type=str, default=None)
183 parser.add_argument('--quant_bits', type=int, default=4)
184 parser.add_argument('--quant_group_size', type=int, default=128)
185 parser.add_argument('--debug', action='store_true')
186 args = parser.parse_args()
187
188 ray.init(namespace=os.environ["RAY_NAMESPACE"], runtime_env={"env_vars": dict(os.environ)})
189 main(args)
190
[end of applications/Chat/examples/ray/mmmt_prompt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py
--- a/applications/Chat/examples/ray/mmmt_prompt.py
+++ b/applications/Chat/examples/ray/mmmt_prompt.py
@@ -87,8 +87,8 @@
kl_coef=0.1,
debug=args.debug,
update_lora_weights=not (args.lora_rank == 0),
- # sync_models_from_trainers=True,
- # generation kwargs:
+ # sync_models_from_trainers=True,
+ # generation kwargs:
max_length=512,
do_sample=True,
temperature=1.0,
@@ -161,12 +161,10 @@
parser.add_argument('--prompt_path', type=str, default=None)
parser.add_argument('--num_makers', type=int, default=1)
parser.add_argument('--num_trainers', type=int, default=1)
- parser.add_argument('--trainer_strategy',
- choices=[
- 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',
- 'colossalai_zero2_cpu'
- ],
- default='ddp')
+ parser.add_argument(
+ '--trainer_strategy',
+ choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 'colossalai_zero2_cpu'],
+ default='ddp')
parser.add_argument('--maker_strategy', choices=['naive'], default='naive')
parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])
|
{"golden_diff": "diff --git a/applications/Chat/examples/ray/mmmt_prompt.py b/applications/Chat/examples/ray/mmmt_prompt.py\n--- a/applications/Chat/examples/ray/mmmt_prompt.py\n+++ b/applications/Chat/examples/ray/mmmt_prompt.py\n@@ -87,8 +87,8 @@\n kl_coef=0.1,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n- # sync_models_from_trainers=True,\n- # generation kwargs:\n+ # sync_models_from_trainers=True,\n+ # generation kwargs:\n max_length=512,\n do_sample=True,\n temperature=1.0,\n@@ -161,12 +161,10 @@\n parser.add_argument('--prompt_path', type=str, default=None)\n parser.add_argument('--num_makers', type=int, default=1)\n parser.add_argument('--num_trainers', type=int, default=1)\n- parser.add_argument('--trainer_strategy',\n- choices=[\n- 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',\n- 'colossalai_zero2_cpu'\n- ],\n- default='ddp')\n+ parser.add_argument(\n+ '--trainer_strategy',\n+ choices=['ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu', 'colossalai_zero2_cpu'],\n+ default='ddp')\n parser.add_argument('--maker_strategy', choices=['naive'], default='naive')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport os\nimport socket\nfrom functools import partial\n\nimport pandas as pd\nimport ray\nimport torch\nfrom coati.quant import llama_load_quant, low_resource_init\nfrom coati.ray.detached_trainer_ppo import DetachedPPOTrainer\nfrom coati.ray.experience_maker_holder import ExperienceMakerHolder\nfrom coati.ray.utils import (\n get_actor_from_args,\n get_critic_from_args,\n get_receivers_per_sender,\n get_reward_model_from_args,\n get_strategy_from_args,\n)\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoConfig, AutoTokenizer\nfrom transformers.modeling_utils import no_init_weights\n\n\ndef get_free_port():\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind(('', 0))\n return s.getsockname()[1]\n\n\ndef get_local_ip():\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:\n s.connect(('8.8.8.8', 80))\n return s.getsockname()[0]\n\n\ndef main(args):\n master_addr = str(get_local_ip())\n # trainer_env_info\n trainer_port = str(get_free_port())\n env_info_trainers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_trainers),\n 'master_port': trainer_port,\n 'master_addr': master_addr\n } for rank in range(args.num_trainers)]\n\n # maker_env_info\n maker_port = str(get_free_port())\n env_info_makers = [{\n 'local_rank': '0',\n 'rank': str(rank),\n 'world_size': str(args.num_makers),\n 'master_port': maker_port,\n 'master_addr': master_addr\n } for rank in range(args.num_makers)]\n\n # configure tokenizer\n tokenizer = AutoTokenizer.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n\n def model_fn():\n actor = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n reward_model = get_reward_model_from_args(args.model, args.critic_pretrain).requires_grad_(False).half().cuda()\n if args.initial_model_quant_ckpt is not None and args.model == 'llama':\n # quantize initial model\n actor_cfg = AutoConfig.from_pretrained(args.pretrain)\n with low_resource_init(), no_init_weights():\n initial_model = get_actor_from_args(args.model, config=actor_cfg)\n initial_model.model = llama_load_quant(initial_model.model, args.initial_model_quant_ckpt, args.quant_bits,\n args.quant_group_size).cuda().requires_grad_(False)\n else:\n initial_model = get_actor_from_args(args.model, args.pretrain).requires_grad_(False).half().cuda()\n return actor, critic, reward_model, initial_model\n\n # configure Experience Maker\n experience_holder_refs = [\n ExperienceMakerHolder.options(name=f\"maker{i}\", num_gpus=1, max_concurrency=2).remote(\n detached_trainer_name_list=[\n f'trainer{x}'\n for x in get_receivers_per_sender(i, args.num_makers, args.num_trainers, allow_idle_sender=False)\n ],\n strategy_fn=partial(get_strategy_from_args, args.maker_strategy),\n model_fn=model_fn,\n env_info=env_info_maker,\n kl_coef=0.1,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n # sync_models_from_trainers=True,\n # generation kwargs:\n max_length=512,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n eval_performance=True,\n use_cache=True,\n )\n for i, env_info_maker in enumerate(env_info_makers)\n ]\n\n def trainer_model_fn():\n actor = get_actor_from_args(args.model, args.pretrain, lora_rank=args.lora_rank).half().cuda()\n critic = get_critic_from_args(args.model, args.critic_pretrain, lora_rank=args.lora_rank).half().cuda()\n return actor, critic\n\n # configure Trainer\n trainer_refs = [\n DetachedPPOTrainer.options(name=f\"trainer{i}\", num_gpus=1, max_concurrency=2).remote(\n experience_maker_holder_name_list=[\n f\"maker{x}\"\n for x in get_receivers_per_sender(i, args.num_trainers, args.num_makers, allow_idle_sender=True)\n ],\n strategy_fn=partial(get_strategy_from_args, args.trainer_strategy),\n model_fn=trainer_model_fn,\n env_info=env_info_trainer,\n train_batch_size=args.train_batch_size,\n buffer_limit=16,\n eval_performance=True,\n debug=args.debug,\n update_lora_weights=not (args.lora_rank == 0),\n )\n for i, env_info_trainer in enumerate(env_info_trainers)\n ]\n\n dataset_size = args.experience_batch_size * 4\n\n def build_dataloader():\n\n def tokenize_fn(texts):\n batch = tokenizer(texts, return_tensors='pt', max_length=96, padding='max_length', truncation=True)\n return {k: v.cuda() for k, v in batch.items()}\n\n dataset = pd.read_csv(args.prompt_path)['prompt']\n dataloader = DataLoader(dataset=dataset, batch_size=dataset_size, shuffle=True, collate_fn=tokenize_fn)\n return dataloader\n\n # uncomment this function if sync_models_from_trainers is True\n # ray.get([\n # trainer_ref.sync_models_to_remote_makers.remote()\n # for trainer_ref in trainer_refs\n # ])\n\n wait_tasks = []\n\n for experience_holder_ref in experience_holder_refs:\n wait_tasks.append(experience_holder_ref.workingloop.remote(build_dataloader, num_steps=args.experience_steps))\n\n total_steps = args.experience_batch_size * args.experience_steps * \\\n args.num_makers // (args.num_trainers * args.train_batch_size)\n for trainer_ref in trainer_refs:\n wait_tasks.append(trainer_ref.fit.remote(total_steps, args.update_steps, args.train_epochs))\n\n ray.get(wait_tasks)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None)\n parser.add_argument('--num_makers', type=int, default=1)\n parser.add_argument('--num_trainers', type=int, default=1)\n parser.add_argument('--trainer_strategy',\n choices=[\n 'ddp', 'colossalai_gemini', 'colossalai_zero2', 'colossalai_gemini_cpu',\n 'colossalai_zero2_cpu'\n ],\n default='ddp')\n parser.add_argument('--maker_strategy', choices=['naive'], default='naive')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--critic_model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--critic_pretrain', type=str, default=None)\n parser.add_argument('--experience_steps', type=int, default=4)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--train_epochs', type=int, default=1)\n parser.add_argument('--update_steps', type=int, default=2)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n\n parser.add_argument('--initial_model_quant_ckpt', type=str, default=None)\n parser.add_argument('--quant_bits', type=int, default=4)\n parser.add_argument('--quant_group_size', type=int, default=128)\n parser.add_argument('--debug', action='store_true')\n args = parser.parse_args()\n\n ray.init(namespace=os.environ[\"RAY_NAMESPACE\"], runtime_env={\"env_vars\": dict(os.environ)})\n main(args)\n", "path": "applications/Chat/examples/ray/mmmt_prompt.py"}]}
| 2,842 | 408 |
gh_patches_debug_12493
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1082
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to open Smashcast streams
I get the following error whenever I try to open a Smashcast stream
```
streamlink smashcast.tv/greatvaluesmash best
[cli][info] Found matching plugin hitbox for URL smashcast.tv/greatvaluesmash
Traceback (most recent call last):
File "C:\Program Files (x86)\Streamlink\bin\streamlink-script.py", line 15, in
<module>
main()
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 103
8, in main
handle_url()
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 482
, in handle_url
streams = fetch_streams(plugin)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink_cli\main.py", line 394
, in fetch_streams
sorting_excludes=args.stream_sorting_excludes)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", lin
e 345, in get_streams
return self.streams(*args, **kwargs)
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugin\plugin.py", lin
e 248, in streams
ostreams = self._get_streams()
File "C:\Program Files (x86)\Streamlink\pkgs\streamlink\plugins\hitbox.py", li
ne 181, in _get_streams
if livestream["media_hosted_media"]:
KeyError: 'media_hosted_media'
```
Tried with multiple different streams and using a hitbox url instead of smashcast and it still occurs. It still works perfectly fine on Livestreamer, so I'm not really sure what's up. Asked a friend to test it out to see if it's not just me and he had the same error.
Using the latest version (0.7.0) on Windows 7, and my friend was using Windows 8.1.
</issue>
<code>
[start of src/streamlink/plugins/hitbox.py]
1 import re
2
3 from itertools import chain
4
5 from streamlink.compat import urlparse
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import StreamMapper, http, validate
8 from streamlink.stream import HLSStream, HTTPStream, RTMPStream
9 from streamlink.utils import absolute_url
10
11 HLS_PLAYLIST_BASE = "http://www.smashcast.tv{0}"
12 LIVE_API = "http://www.smashcast.tv/api/media/live/{0}?showHidden=true&liveonly=false"
13 PLAYER_API = "http://www.smashcast.tv/api/player/config/{0}/{1}?embed=false&showHidden=true"
14 SWF_BASE = "http://edge.vie.hitbox.tv/static/player/flowplayer/"
15 SWF_URL = SWF_BASE + "flowplayer.commercial-3.2.16.swf"
16 VOD_BASE_URL = "http://www.smashcast.tv/"
17
18 _quality_re = re.compile(r"(\d+p)$")
19 _url_re = re.compile(r"""
20 http(s)?://(www\.)?(hitbox|smashcast).tv
21 /(?P<channel>[^/]+)
22 (?:
23 (?:/videos)?/(?P<media_id>[^/]+)
24 )?
25 """, re.VERBOSE)
26
27 _live_schema = validate.Schema(
28 {
29 "livestream": [{
30 "media_user_name": validate.text,
31 validate.optional("media_hosted_media"): object,
32 "media_is_live": validate.all(
33 validate.text,
34 validate.transform(int),
35 validate.transform(bool)
36 ),
37 "media_id": validate.text
38 }],
39 },
40 validate.get("livestream"),
41 validate.length(1),
42 validate.get(0)
43 )
44 _player_schema = validate.Schema(
45 {
46 "clip": {
47 "baseUrl": validate.any(None, validate.text),
48 "bitrates": validate.all(
49 validate.filter(lambda b: b.get("url") and b.get("label")),
50 [{
51 "label": validate.text,
52 "url": validate.text,
53 }],
54 )
55 },
56 validate.optional("playlist"): [{
57 validate.optional("connectionProvider"): validate.text,
58 validate.optional("netConnectionUrl"): validate.text,
59 validate.optional("bitrates"): [{
60 "label": validate.text,
61 "url": validate.text,
62 "provider": validate.text
63 }]
64 }],
65 "plugins": validate.all(
66 dict,
67 validate.filter(lambda k, v: k in ["rtmp", "rtmpHitbox", "hls"]),
68 {
69 validate.text: {
70 validate.optional("netConnectionUrl"): validate.text,
71 "url": validate.text
72 }
73 }
74 )
75 }
76 )
77
78
79 class Hitbox(Plugin):
80 @classmethod
81 def can_handle_url(cls, url):
82 return _url_re.match(url)
83
84 def _get_quality(self, label):
85 match = _quality_re.search(label)
86 if match:
87 return match.group(1)
88
89 return "live"
90
91 def _create_hls_streams(self, bitrate):
92 url = bitrate["url"]
93 quality = self._get_quality(bitrate["label"])
94
95 if not url.startswith("http"):
96 url = HLS_PLAYLIST_BASE.format(url)
97
98 if bitrate["label"] == "Auto":
99 try:
100 streams = HLSStream.parse_variant_playlist(self.session, url)
101 return streams.items()
102 except IOError as err:
103 self.logger.warning("Failed to extract HLS streams: {0}", err)
104 else:
105 return quality, HLSStream(self.session, url)
106
107 def _create_rtmp_stream(self, rtmp, swf_url, bitrate):
108 quality = self._get_quality(bitrate["label"])
109 url = bitrate["url"]
110 stream = RTMPStream(self.session, {
111 "rtmp": rtmp,
112 "pageUrl": self.url,
113 "playpath": url,
114 "swfVfy": swf_url,
115 "live": True
116 })
117
118 return quality, stream
119
120 def _get_live_streams(self, player):
121 mappers = []
122 swf_url = SWF_URL
123 for playlist in player.get("playlist", []):
124 bitrates = playlist.get("bitrates")
125 provider = playlist.get("connectionProvider")
126 rtmp = None
127
128 if bitrates:
129 rtmp = playlist.get("netConnectionUrl")
130 elif provider and provider in player["plugins"]:
131 provider = player["plugins"][provider]
132 swf_name = provider["url"]
133 swf_url = SWF_BASE + swf_name
134 rtmp = provider["netConnectionUrl"]
135 bitrates = player["clip"]["bitrates"]
136 else:
137 continue
138
139 mapper = StreamMapper(
140 cmp=lambda provider, bitrate: bitrate["provider"].startswith(provider)
141 )
142 mapper.map("hls", self._create_hls_streams)
143 mapper.map("rtmp", self._create_rtmp_stream, rtmp, swf_url)
144 mappers.append(mapper(bitrates))
145
146 return chain.from_iterable(mappers)
147
148 def _create_video_stream(self, cls, base_url, bitrate):
149 url = absolute_url(base_url, bitrate["url"])
150 if bitrate["label"].lower() == "auto":
151 try:
152 return cls.parse_variant_playlist(self.session, url).items()
153 except IOError as err:
154 self.logger.warning("Failed to extract HLS streams: {0}", err)
155 return
156
157 quality = self._get_quality(bitrate["label"])
158 return quality, cls(self.session, url)
159
160 def _get_video_streams(self, player):
161 base_url = player["clip"]["baseUrl"] or VOD_BASE_URL
162 mapper = StreamMapper(
163 cmp=lambda ext, bitrate: urlparse(bitrate["url"]).path.endswith(ext)
164 )
165 mapper.map(".m3u8", self._create_video_stream, HLSStream, base_url)
166 mapper.map(".mp4", self._create_video_stream, HTTPStream, base_url)
167 mapper.map(".flv", self._create_video_stream, HTTPStream, base_url)
168
169 return mapper(player["clip"]["bitrates"])
170
171 def _get_streams(self):
172 match = _url_re.match(self.url)
173 if not match:
174 return
175
176 channel, media_id = match.group("channel", "media_id")
177 self.logger.debug("Matched URL: channel={0}, media_id={1}".format(channel, media_id))
178 if not media_id:
179 res = http.get(LIVE_API.format(channel))
180 livestream = http.json(res, schema=_live_schema)
181 if livestream["media_hosted_media"]:
182 hosted = _live_schema.validate(livestream["media_hosted_media"])
183 self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"])
184 livestream = hosted
185
186 if not livestream["media_is_live"]:
187 return
188
189 media_id = livestream["media_id"]
190 media_type = "live"
191 else:
192 media_type = "video"
193
194 res = http.get(PLAYER_API.format(media_type, media_id))
195 player = http.json(res, schema=_player_schema)
196
197 if media_type == "live":
198 return self._get_live_streams(player)
199 else:
200 return self._get_video_streams(player)
201
202
203 __plugin__ = Hitbox
204
[end of src/streamlink/plugins/hitbox.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py
--- a/src/streamlink/plugins/hitbox.py
+++ b/src/streamlink/plugins/hitbox.py
@@ -178,7 +178,7 @@
if not media_id:
res = http.get(LIVE_API.format(channel))
livestream = http.json(res, schema=_live_schema)
- if livestream["media_hosted_media"]:
+ if livestream.get("media_hosted_media"):
hosted = _live_schema.validate(livestream["media_hosted_media"])
self.logger.info("{0} is hosting {1}", livestream["media_user_name"], hosted["media_user_name"])
livestream = hosted
|
{"golden_diff": "diff --git a/src/streamlink/plugins/hitbox.py b/src/streamlink/plugins/hitbox.py\n--- a/src/streamlink/plugins/hitbox.py\n+++ b/src/streamlink/plugins/hitbox.py\n@@ -178,7 +178,7 @@\n if not media_id:\n res = http.get(LIVE_API.format(channel))\n livestream = http.json(res, schema=_live_schema)\n- if livestream[\"media_hosted_media\"]:\n+ if livestream.get(\"media_hosted_media\"):\n hosted = _live_schema.validate(livestream[\"media_hosted_media\"])\n self.logger.info(\"{0} is hosting {1}\", livestream[\"media_user_name\"], hosted[\"media_user_name\"])\n livestream = hosted\n", "issue": "Unable to open Smashcast streams\nI get the following error whenever I try to open a Smashcast stream \r\n\r\n```\r\nstreamlink smashcast.tv/greatvaluesmash best\r\n[cli][info] Found matching plugin hitbox for URL smashcast.tv/greatvaluesmash\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files (x86)\\Streamlink\\bin\\streamlink-script.py\", line 15, in\r\n <module>\r\n main()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 103\r\n8, in main\r\n handle_url()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 482\r\n, in handle_url\r\n streams = fetch_streams(plugin)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink_cli\\main.py\", line 394\r\n, in fetch_streams\r\n sorting_excludes=args.stream_sorting_excludes)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", lin\r\ne 345, in get_streams\r\n return self.streams(*args, **kwargs)\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugin\\plugin.py\", lin\r\ne 248, in streams\r\n ostreams = self._get_streams()\r\n File \"C:\\Program Files (x86)\\Streamlink\\pkgs\\streamlink\\plugins\\hitbox.py\", li\r\nne 181, in _get_streams\r\n if livestream[\"media_hosted_media\"]:\r\nKeyError: 'media_hosted_media'\r\n```\r\n\r\nTried with multiple different streams and using a hitbox url instead of smashcast and it still occurs. It still works perfectly fine on Livestreamer, so I'm not really sure what's up. Asked a friend to test it out to see if it's not just me and he had the same error. \r\n\r\nUsing the latest version (0.7.0) on Windows 7, and my friend was using Windows 8.1. \n", "before_files": [{"content": "import re\n\nfrom itertools import chain\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import StreamMapper, http, validate\nfrom streamlink.stream import HLSStream, HTTPStream, RTMPStream\nfrom streamlink.utils import absolute_url\n\nHLS_PLAYLIST_BASE = \"http://www.smashcast.tv{0}\"\nLIVE_API = \"http://www.smashcast.tv/api/media/live/{0}?showHidden=true&liveonly=false\"\nPLAYER_API = \"http://www.smashcast.tv/api/player/config/{0}/{1}?embed=false&showHidden=true\"\nSWF_BASE = \"http://edge.vie.hitbox.tv/static/player/flowplayer/\"\nSWF_URL = SWF_BASE + \"flowplayer.commercial-3.2.16.swf\"\nVOD_BASE_URL = \"http://www.smashcast.tv/\"\n\n_quality_re = re.compile(r\"(\\d+p)$\")\n_url_re = re.compile(r\"\"\"\n http(s)?://(www\\.)?(hitbox|smashcast).tv\n /(?P<channel>[^/]+)\n (?:\n (?:/videos)?/(?P<media_id>[^/]+)\n )?\n\"\"\", re.VERBOSE)\n\n_live_schema = validate.Schema(\n {\n \"livestream\": [{\n \"media_user_name\": validate.text,\n validate.optional(\"media_hosted_media\"): object,\n \"media_is_live\": validate.all(\n validate.text,\n validate.transform(int),\n validate.transform(bool)\n ),\n \"media_id\": validate.text\n }],\n },\n validate.get(\"livestream\"),\n validate.length(1),\n validate.get(0)\n)\n_player_schema = validate.Schema(\n {\n \"clip\": {\n \"baseUrl\": validate.any(None, validate.text),\n \"bitrates\": validate.all(\n validate.filter(lambda b: b.get(\"url\") and b.get(\"label\")),\n [{\n \"label\": validate.text,\n \"url\": validate.text,\n }],\n )\n },\n validate.optional(\"playlist\"): [{\n validate.optional(\"connectionProvider\"): validate.text,\n validate.optional(\"netConnectionUrl\"): validate.text,\n validate.optional(\"bitrates\"): [{\n \"label\": validate.text,\n \"url\": validate.text,\n \"provider\": validate.text\n }]\n }],\n \"plugins\": validate.all(\n dict,\n validate.filter(lambda k, v: k in [\"rtmp\", \"rtmpHitbox\", \"hls\"]),\n {\n validate.text: {\n validate.optional(\"netConnectionUrl\"): validate.text,\n \"url\": validate.text\n }\n }\n )\n }\n)\n\n\nclass Hitbox(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def _get_quality(self, label):\n match = _quality_re.search(label)\n if match:\n return match.group(1)\n\n return \"live\"\n\n def _create_hls_streams(self, bitrate):\n url = bitrate[\"url\"]\n quality = self._get_quality(bitrate[\"label\"])\n\n if not url.startswith(\"http\"):\n url = HLS_PLAYLIST_BASE.format(url)\n\n if bitrate[\"label\"] == \"Auto\":\n try:\n streams = HLSStream.parse_variant_playlist(self.session, url)\n return streams.items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n else:\n return quality, HLSStream(self.session, url)\n\n def _create_rtmp_stream(self, rtmp, swf_url, bitrate):\n quality = self._get_quality(bitrate[\"label\"])\n url = bitrate[\"url\"]\n stream = RTMPStream(self.session, {\n \"rtmp\": rtmp,\n \"pageUrl\": self.url,\n \"playpath\": url,\n \"swfVfy\": swf_url,\n \"live\": True\n })\n\n return quality, stream\n\n def _get_live_streams(self, player):\n mappers = []\n swf_url = SWF_URL\n for playlist in player.get(\"playlist\", []):\n bitrates = playlist.get(\"bitrates\")\n provider = playlist.get(\"connectionProvider\")\n rtmp = None\n\n if bitrates:\n rtmp = playlist.get(\"netConnectionUrl\")\n elif provider and provider in player[\"plugins\"]:\n provider = player[\"plugins\"][provider]\n swf_name = provider[\"url\"]\n swf_url = SWF_BASE + swf_name\n rtmp = provider[\"netConnectionUrl\"]\n bitrates = player[\"clip\"][\"bitrates\"]\n else:\n continue\n\n mapper = StreamMapper(\n cmp=lambda provider, bitrate: bitrate[\"provider\"].startswith(provider)\n )\n mapper.map(\"hls\", self._create_hls_streams)\n mapper.map(\"rtmp\", self._create_rtmp_stream, rtmp, swf_url)\n mappers.append(mapper(bitrates))\n\n return chain.from_iterable(mappers)\n\n def _create_video_stream(self, cls, base_url, bitrate):\n url = absolute_url(base_url, bitrate[\"url\"])\n if bitrate[\"label\"].lower() == \"auto\":\n try:\n return cls.parse_variant_playlist(self.session, url).items()\n except IOError as err:\n self.logger.warning(\"Failed to extract HLS streams: {0}\", err)\n return\n\n quality = self._get_quality(bitrate[\"label\"])\n return quality, cls(self.session, url)\n\n def _get_video_streams(self, player):\n base_url = player[\"clip\"][\"baseUrl\"] or VOD_BASE_URL\n mapper = StreamMapper(\n cmp=lambda ext, bitrate: urlparse(bitrate[\"url\"]).path.endswith(ext)\n )\n mapper.map(\".m3u8\", self._create_video_stream, HLSStream, base_url)\n mapper.map(\".mp4\", self._create_video_stream, HTTPStream, base_url)\n mapper.map(\".flv\", self._create_video_stream, HTTPStream, base_url)\n\n return mapper(player[\"clip\"][\"bitrates\"])\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n if not match:\n return\n\n channel, media_id = match.group(\"channel\", \"media_id\")\n self.logger.debug(\"Matched URL: channel={0}, media_id={1}\".format(channel, media_id))\n if not media_id:\n res = http.get(LIVE_API.format(channel))\n livestream = http.json(res, schema=_live_schema)\n if livestream[\"media_hosted_media\"]:\n hosted = _live_schema.validate(livestream[\"media_hosted_media\"])\n self.logger.info(\"{0} is hosting {1}\", livestream[\"media_user_name\"], hosted[\"media_user_name\"])\n livestream = hosted\n\n if not livestream[\"media_is_live\"]:\n return\n\n media_id = livestream[\"media_id\"]\n media_type = \"live\"\n else:\n media_type = \"video\"\n\n res = http.get(PLAYER_API.format(media_type, media_id))\n player = http.json(res, schema=_player_schema)\n\n if media_type == \"live\":\n return self._get_live_streams(player)\n else:\n return self._get_video_streams(player)\n\n\n__plugin__ = Hitbox\n", "path": "src/streamlink/plugins/hitbox.py"}]}
| 3,078 | 159 |
gh_patches_debug_29324
|
rasdani/github-patches
|
git_diff
|
dask__distributed-228
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No such file or directory "bokeh" - in release 1.9.2 (conda install)
Get this error in my logs:
```
scheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786
scheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI
scheduler_1 | Traceback (most recent call last):
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 950, in __init__
scheduler_1 | restore_signals, start_new_session)
scheduler_1 | File "/opt/conda/envs/fire/lib/python3.5/subprocess.py", line 1544, in _execute_child
scheduler_1 | raise child_exception_type(errno_num, err_msg)
scheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'
scheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler
scheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700
scheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700
```
</issue>
<code>
[start of distributed/cli/dscheduler.py]
1 from __future__ import print_function, division, absolute_import
2
3 import logging
4 import os
5 import socket
6 import subprocess
7 from sys import argv, exit
8 from time import sleep
9
10 import click
11
12 import distributed
13 from distributed import Scheduler
14 from distributed.utils import get_ip
15 from distributed.http import HTTPScheduler
16 from distributed.cli.utils import check_python_3
17 from tornado.ioloop import IOLoop
18
19 logger = logging.getLogger('distributed.scheduler')
20
21 ip = get_ip()
22
23 import signal
24
25 bokeh_proc = [False]
26
27
28 def handle_signal(sig, frame):
29 if bokeh_proc[0]:
30 bokeh_proc[0].terminate()
31 IOLoop.instance().add_callback(IOLoop.instance().stop)
32
33 signal.signal(signal.SIGINT, handle_signal)
34 signal.signal(signal.SIGTERM, handle_signal)
35
36
37 @click.command()
38 @click.argument('center', type=str, default='')
39 @click.option('--port', type=int, default=8786, help="Serving port")
40 @click.option('--http-port', type=int, default=9786, help="HTTP port")
41 @click.option('--bokeh-port', type=int, default=8787, help="HTTP port")
42 @click.option('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,
43 required=False, help="Launch Bokeh Web UI")
44 @click.option('--host', type=str, default=ip,
45 help="Serving host defaults to %s" % ip)
46 @click.option('--show/--no-show', default=False, help="Show web UI")
47 def main(center, host, port, http_port, bokeh_port, show, _bokeh):
48 ip = socket.gethostbyname(host)
49 loop = IOLoop.current()
50 scheduler = Scheduler(center, ip=ip,
51 services={('http', http_port): HTTPScheduler})
52 if center:
53 loop.run_sync(scheduler.sync_center)
54 scheduler.start(port)
55
56 if _bokeh:
57 try:
58 import bokeh
59 import distributed.bokeh
60 hosts = ['%s:%d' % (h, bokeh_port) for h in
61 ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]
62 dirname = os.path.dirname(distributed.__file__)
63 paths = [os.path.join(dirname, 'bokeh', name)
64 for name in ['status', 'tasks']]
65 args = (['bokeh', 'serve'] + paths +
66 ['--log-level', 'warning',
67 '--check-unused-sessions=50',
68 '--unused-session-lifetime=1',
69 '--port', str(bokeh_port)] +
70 sum([['--host', host] for host in hosts], []))
71 if show:
72 args.append('--show')
73 bokeh_proc[0] = subprocess.Popen(args)
74
75 logger.info(" Start Bokeh UI at: http://%s:%d/status/"
76 % (ip, bokeh_port))
77 except ImportError:
78 logger.info("Please install Bokeh to get Web UI")
79 except Exception as e:
80 logger.warn("Could not start Bokeh web UI", exc_info=True)
81
82 loop.start()
83 loop.close()
84 scheduler.stop()
85 bokeh_proc[0].terminate()
86
87 logger.info("End scheduler at %s:%d", ip, port)
88
89
90 def go():
91 check_python_3()
92 main()
93
94
95 if __name__ == '__main__':
96 go()
97
[end of distributed/cli/dscheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py
--- a/distributed/cli/dscheduler.py
+++ b/distributed/cli/dscheduler.py
@@ -1,9 +1,9 @@
from __future__ import print_function, division, absolute_import
import logging
+import multiprocessing
import os
import socket
-import subprocess
from sys import argv, exit
from time import sleep
@@ -22,12 +22,8 @@
import signal
-bokeh_proc = [False]
-
def handle_signal(sig, frame):
- if bokeh_proc[0]:
- bokeh_proc[0].terminate()
IOLoop.instance().add_callback(IOLoop.instance().stop)
signal.signal(signal.SIGINT, handle_signal)
@@ -70,7 +66,10 @@
sum([['--host', host] for host in hosts], []))
if show:
args.append('--show')
- bokeh_proc[0] = subprocess.Popen(args)
+ from bokeh.command.bootstrap import main
+ proc = multiprocessing.Process(target=main, args=(args,))
+ proc.daemon = True
+ proc.start()
logger.info(" Start Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
@@ -82,7 +81,7 @@
loop.start()
loop.close()
scheduler.stop()
- bokeh_proc[0].terminate()
+ proc.terminate()
logger.info("End scheduler at %s:%d", ip, port)
|
{"golden_diff": "diff --git a/distributed/cli/dscheduler.py b/distributed/cli/dscheduler.py\n--- a/distributed/cli/dscheduler.py\n+++ b/distributed/cli/dscheduler.py\n@@ -1,9 +1,9 @@\n from __future__ import print_function, division, absolute_import\n \n import logging\n+import multiprocessing\n import os\n import socket\n-import subprocess\n from sys import argv, exit\n from time import sleep\n \n@@ -22,12 +22,8 @@\n \n import signal\n \n-bokeh_proc = [False]\n-\n \n def handle_signal(sig, frame):\n- if bokeh_proc[0]:\n- bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n \n signal.signal(signal.SIGINT, handle_signal)\n@@ -70,7 +66,10 @@\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n- bokeh_proc[0] = subprocess.Popen(args)\n+ from bokeh.command.bootstrap import main\n+ proc = multiprocessing.Process(target=main, args=(args,))\n+ proc.daemon = True\n+ proc.start()\n \n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n@@ -82,7 +81,7 @@\n loop.start()\n loop.close()\n scheduler.stop()\n- bokeh_proc[0].terminate()\n+ proc.terminate()\n \n logger.info(\"End scheduler at %s:%d\", ip, port)\n", "issue": "No such file or directory \"bokeh\" - in release 1.9.2 (conda install)\nGet this error in my logs:\n\n```\nscheduler_1 | distributed.scheduler - INFO - http at: 172.17.0.4:9786\nscheduler_1 | distributed.scheduler - WARNING - Could not start Bokeh web UI\nscheduler_1 | Traceback (most recent call last):\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/site-packages/distributed/cli/dscheduler.py\", line scheduler_1 | bokeh_proc[0] = subprocess.Popen(args)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 950, in __init__\nscheduler_1 | restore_signals, start_new_session)\nscheduler_1 | File \"/opt/conda/envs/fire/lib/python3.5/subprocess.py\", line 1544, in _execute_child\nscheduler_1 | raise child_exception_type(errno_num, err_msg)\nscheduler_1 | FileNotFoundError: [Errno 2] No such file or directory: 'bokeh'\nscheduler_1 | distributed.core - INFO - Connection from 172.17.0.6:60119 to Scheduler\nscheduler_1 | distributed.scheduler - INFO - Register 172.17.0.6:5700\nscheduler_1 | distributed.scheduler - INFO - Starting worker compute stream, 172.17.0.6:5700\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport logging\nimport os\nimport socket\nimport subprocess\nfrom sys import argv, exit\nfrom time import sleep\n\nimport click\n\nimport distributed\nfrom distributed import Scheduler\nfrom distributed.utils import get_ip\nfrom distributed.http import HTTPScheduler\nfrom distributed.cli.utils import check_python_3\nfrom tornado.ioloop import IOLoop\n\nlogger = logging.getLogger('distributed.scheduler')\n\nip = get_ip()\n\nimport signal\n\nbokeh_proc = [False]\n\n\ndef handle_signal(sig, frame):\n if bokeh_proc[0]:\n bokeh_proc[0].terminate()\n IOLoop.instance().add_callback(IOLoop.instance().stop)\n\nsignal.signal(signal.SIGINT, handle_signal)\nsignal.signal(signal.SIGTERM, handle_signal)\n\n\[email protected]()\[email protected]('center', type=str, default='')\[email protected]('--port', type=int, default=8786, help=\"Serving port\")\[email protected]('--http-port', type=int, default=9786, help=\"HTTP port\")\[email protected]('--bokeh-port', type=int, default=8787, help=\"HTTP port\")\[email protected]('--bokeh/--no-bokeh', '_bokeh', default=True, show_default=True,\n required=False, help=\"Launch Bokeh Web UI\")\[email protected]('--host', type=str, default=ip,\n help=\"Serving host defaults to %s\" % ip)\[email protected]('--show/--no-show', default=False, help=\"Show web UI\")\ndef main(center, host, port, http_port, bokeh_port, show, _bokeh):\n ip = socket.gethostbyname(host)\n loop = IOLoop.current()\n scheduler = Scheduler(center, ip=ip,\n services={('http', http_port): HTTPScheduler})\n if center:\n loop.run_sync(scheduler.sync_center)\n scheduler.start(port)\n\n if _bokeh:\n try:\n import bokeh\n import distributed.bokeh\n hosts = ['%s:%d' % (h, bokeh_port) for h in\n ['localhost', '127.0.0.1', ip, socket.gethostname(), host]]\n dirname = os.path.dirname(distributed.__file__)\n paths = [os.path.join(dirname, 'bokeh', name)\n for name in ['status', 'tasks']]\n args = (['bokeh', 'serve'] + paths +\n ['--log-level', 'warning',\n '--check-unused-sessions=50',\n '--unused-session-lifetime=1',\n '--port', str(bokeh_port)] +\n sum([['--host', host] for host in hosts], []))\n if show:\n args.append('--show')\n bokeh_proc[0] = subprocess.Popen(args)\n\n logger.info(\" Start Bokeh UI at: http://%s:%d/status/\"\n % (ip, bokeh_port))\n except ImportError:\n logger.info(\"Please install Bokeh to get Web UI\")\n except Exception as e:\n logger.warn(\"Could not start Bokeh web UI\", exc_info=True)\n\n loop.start()\n loop.close()\n scheduler.stop()\n bokeh_proc[0].terminate()\n\n logger.info(\"End scheduler at %s:%d\", ip, port)\n\n\ndef go():\n check_python_3()\n main()\n\n\nif __name__ == '__main__':\n go()\n", "path": "distributed/cli/dscheduler.py"}]}
| 1,799 | 337 |
gh_patches_debug_12036
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-354
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix TFExample codec
</issue>
<code>
[start of record_codec/__init__.py]
[end of record_codec/__init__.py]
[start of record_codec/tf_example_codec.py]
1 from enum import Enum
2 import tensorflow as tf
3
4
5 class TFExampleCodec(object):
6 def __init__(self, feature_columns):
7 self._f_name2type = {}
8 self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
9 for f_col in feature_columns:
10 self._f_name2type[f_col.key] = f_col.dtype
11
12 def encode(self, example):
13 f_dict = {}
14 for f_name, f_value in example:
15 f_type = self._f_name2type[f_name]
16 if f_type == tf.string:
17 f_dict[f_name] = tf.train.Feature(
18 bytes_list=tf.train.BytesList(value=f_value)
19 )
20 elif f_type == tf.float32:
21 f_dict[f_name] = tf.train.Feature(
22 float_list=tf.train.FloatList(value=f_value.flatten())
23 )
24 elif f_type == tf.int64:
25 f_dict[f_name] = tf.train.Feature(
26 int64_list=tf.train.Int64List(value=f_value.flatten())
27 )
28 else:
29 raise ValueError(
30 "not supported tensorflow data type: " + str(f_type)
31 )
32
33 example = tf.train.Example(features=tf.train.Features(feature=f_dict))
34 return example.SerializeToString()
35
36 def decode(self, raw):
37 return tf.parse_single_example(raw, self._example_spec)
38
[end of record_codec/tf_example_codec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/record_codec/__init__.py b/record_codec/__init__.py
--- a/record_codec/__init__.py
+++ b/record_codec/__init__.py
@@ -0,0 +1,3 @@
+from record_codec.tf_example_codec import TFExampleCodec
+
+__all__=[TFExampleCodec]
diff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py
--- a/record_codec/tf_example_codec.py
+++ b/record_codec/tf_example_codec.py
@@ -4,10 +4,8 @@
class TFExampleCodec(object):
def __init__(self, feature_columns):
- self._f_name2type = {}
self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)
- for f_col in feature_columns:
- self._f_name2type[f_col.key] = f_col.dtype
+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}
def encode(self, example):
f_dict = {}
|
{"golden_diff": "diff --git a/record_codec/__init__.py b/record_codec/__init__.py\n--- a/record_codec/__init__.py\n+++ b/record_codec/__init__.py\n@@ -0,0 +1,3 @@\n+from record_codec.tf_example_codec import TFExampleCodec\n+\n+__all__=[TFExampleCodec]\ndiff --git a/record_codec/tf_example_codec.py b/record_codec/tf_example_codec.py\n--- a/record_codec/tf_example_codec.py\n+++ b/record_codec/tf_example_codec.py\n@@ -4,10 +4,8 @@\n \n class TFExampleCodec(object):\n def __init__(self, feature_columns):\n- self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n- for f_col in feature_columns:\n- self._f_name2type[f_col.key] = f_col.dtype\n+ self._f_name2type = {f_col.key: f_col.dtype for f_col in feature_columns}\n \n def encode(self, example):\n f_dict = {}\n", "issue": "Fix TFExample codec\n\n", "before_files": [{"content": "", "path": "record_codec/__init__.py"}, {"content": "from enum import Enum\nimport tensorflow as tf\n\n\nclass TFExampleCodec(object):\n def __init__(self, feature_columns):\n self._f_name2type = {}\n self._example_spec = tf.feature_column.make_parse_example_spec(feature_columns)\n for f_col in feature_columns:\n self._f_name2type[f_col.key] = f_col.dtype\n\n def encode(self, example):\n f_dict = {}\n for f_name, f_value in example:\n f_type = self._f_name2type[f_name]\n if f_type == tf.string:\n f_dict[f_name] = tf.train.Feature(\n bytes_list=tf.train.BytesList(value=f_value)\n )\n elif f_type == tf.float32:\n f_dict[f_name] = tf.train.Feature(\n float_list=tf.train.FloatList(value=f_value.flatten())\n )\n elif f_type == tf.int64:\n f_dict[f_name] = tf.train.Feature(\n int64_list=tf.train.Int64List(value=f_value.flatten())\n )\n else:\n raise ValueError(\n \"not supported tensorflow data type: \" + str(f_type)\n )\n\n example = tf.train.Example(features=tf.train.Features(feature=f_dict))\n return example.SerializeToString()\n\n def decode(self, raw):\n return tf.parse_single_example(raw, self._example_spec)\n", "path": "record_codec/tf_example_codec.py"}]}
| 914 | 233 |
gh_patches_debug_18359
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-6361
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`pants_requirement` should include the `; python_version<'3'` environment marker
Until pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.
</issue>
<code>
[start of src/python/pants/backend/python/pants_requirement.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import absolute_import, division, print_function, unicode_literals
6
7 import os
8 from builtins import object
9
10 from pants.backend.python.python_requirement import PythonRequirement
11 from pants.base.build_environment import pants_version
12
13
14 class PantsRequirement(object):
15 """Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.
16
17 This requirement is useful for custom plugin authors who want to build and test their plugin with
18 pants itself. Using the resulting target as a dependency of their plugin target ensures the
19 dependency stays true to the surrounding repo's version of pants.
20
21 NB: The requirement generated is for official pants releases on pypi; so may not be appropriate
22 for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.
23
24 :API: public
25 """
26
27 def __init__(self, parse_context):
28 self._parse_context = parse_context
29
30 def __call__(self, name=None):
31 """
32 :param string name: The name to use for the target, defaults to the parent dir name.
33 """
34 name = name or os.path.basename(self._parse_context.rel_path)
35 requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
36 self._parse_context.create_object('python_requirement_library',
37 name=name,
38 requirements=[requirement])
39
[end of src/python/pants/backend/python/pants_requirement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py
--- a/src/python/pants/backend/python/pants_requirement.py
+++ b/src/python/pants/backend/python/pants_requirement.py
@@ -32,7 +32,14 @@
:param string name: The name to use for the target, defaults to the parent dir name.
"""
name = name or os.path.basename(self._parse_context.rel_path)
- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))
+
+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of
+ # https://github.com/pantsbuild/pants/issues/6062
+ env_marker = "python_version>='2.7' and python_version<'3'"
+
+ requirement = PythonRequirement(requirement="pantsbuild.pants=={version} ; {env_marker}"
+ .format(version=pants_version(), env_marker=env_marker))
+
self._parse_context.create_object('python_requirement_library',
name=name,
requirements=[requirement])
|
{"golden_diff": "diff --git a/src/python/pants/backend/python/pants_requirement.py b/src/python/pants/backend/python/pants_requirement.py\n--- a/src/python/pants/backend/python/pants_requirement.py\n+++ b/src/python/pants/backend/python/pants_requirement.py\n@@ -32,7 +32,14 @@\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n- requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n+\n+ # TODO(John Sirois): Modify to constraint to >=3.5,<4 as part of\n+ # https://github.com/pantsbuild/pants/issues/6062\n+ env_marker = \"python_version>='2.7' and python_version<'3'\"\n+\n+ requirement = PythonRequirement(requirement=\"pantsbuild.pants=={version} ; {env_marker}\"\n+ .format(version=pants_version(), env_marker=env_marker))\n+\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "issue": "`pants_requirement` should include the `; python_version<'3'` environment marker\nUntil pants can run on python 3, the `pants_requirement` should add an environment marker to restict the python environment it applies to. After pants only supports `>=3.5` a change should be made to the environment marker.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom builtins import object\n\nfrom pants.backend.python.python_requirement import PythonRequirement\nfrom pants.base.build_environment import pants_version\n\n\nclass PantsRequirement(object):\n \"\"\"Exports a `python_requirement_library` pointing at the active pants' corresponding sdist.\n\n This requirement is useful for custom plugin authors who want to build and test their plugin with\n pants itself. Using the resulting target as a dependency of their plugin target ensures the\n dependency stays true to the surrounding repo's version of pants.\n\n NB: The requirement generated is for official pants releases on pypi; so may not be appropriate\n for use in a repo that tracks `pantsbuild/pants` or otherwise uses custom pants sdists.\n\n :API: public\n \"\"\"\n\n def __init__(self, parse_context):\n self._parse_context = parse_context\n\n def __call__(self, name=None):\n \"\"\"\n :param string name: The name to use for the target, defaults to the parent dir name.\n \"\"\"\n name = name or os.path.basename(self._parse_context.rel_path)\n requirement = PythonRequirement(requirement='pantsbuild.pants=={}'.format(pants_version()))\n self._parse_context.create_object('python_requirement_library',\n name=name,\n requirements=[requirement])\n", "path": "src/python/pants/backend/python/pants_requirement.py"}]}
| 1,015 | 253 |
gh_patches_debug_19531
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-733
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adapt page usage to the new refarm pages interface
At https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.
Adapt site code to the new interface.
</issue>
<code>
[start of shopelectro/context.py]
1 from functools import partial
2
3 from django.conf import settings
4 from django.shortcuts import get_object_or_404
5
6 from catalog import newcontext
7 from images.models import Image
8 from pages import models as pages_models, newcontext as pages_newcontext
9 from shopelectro import models, request_data
10
11
12 # @todo #255:60m Improve `SortingOption` interface.
13 # Now it's located in context and this is wrong.
14 # Maybe refactor `CATEGORY_SORTING_OPTIONS`.
15 class SortingOption:
16 def __init__(self, index=0):
17 options = settings.CATEGORY_SORTING_OPTIONS[index]
18 self.label = options['label']
19 self.field = options['field']
20 self.direction = options['direction']
21
22 @property
23 def directed_field(self):
24 return self.direction + self.field
25
26
27 class Page(newcontext.Context):
28
29 def __init__(self, page, tags: newcontext.Tags):
30 self._page = page
31 self._tags = tags
32
33 def context(self):
34 def template_context(page, tag_titles, tags):
35 return {
36 'page': page,
37 'tag_titles': tag_titles,
38 'tags': tags,
39 }
40
41 tags_qs = self._tags.qs()
42 self._page.get_template_render_context = partial(
43 template_context, self._page, tags_qs.as_title(), tags_qs
44 )
45
46 return {
47 'page': self._page,
48 }
49
50
51 class Catalog(newcontext.Context):
52
53 def __init__(self, request_data_: request_data.Catalog):
54 self.request_data = request_data_
55
56 @property
57 def page(self):
58 return get_object_or_404(
59 pages_models.ModelPage,
60 slug=self.request_data.slug
61 )
62
63 @property
64 def category(self):
65 return self.page.model
66
67 def context(self) -> dict:
68 all_tags = newcontext.Tags(models.Tag.objects.all())
69
70 selected_tags = newcontext.tags.ParsedTags(
71 tags=all_tags,
72 raw_tags=self.request_data.tags,
73 )
74 if self.request_data.tags:
75 selected_tags = newcontext.tags.Checked404Tags(selected_tags)
76
77 products = (
78 models.Product.objects.active()
79 .filter_descendants(self.category)
80 .tagged_or_all(selected_tags.qs())
81 .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)
82 )
83
84 """
85 We have to use separated variable for pagination.
86
87 Because paginated QuerySet can not used as QuerySet.
88 It's not the most strong place of Django ORM, of course.
89 :return: ProductsContext with paginated QuerySet inside
90 """
91 # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.
92 # Rename Checked404Tags to ExistingOr404.
93 paginated = newcontext.products.PaginatedProducts(
94 products=products,
95 url=self.request_data.request.path,
96 page_number=self.request_data.pagination_page_number,
97 per_page=self.request_data.pagination_per_page,
98 )
99
100 images = newcontext.products.ProductImages(paginated.products, Image.objects.all())
101 brands = newcontext.products.ProductBrands(paginated.products, all_tags)
102 grouped_tags = newcontext.tags.GroupedTags(
103 tags=newcontext.tags.TagsByProducts(all_tags, products)
104 )
105 page = Page(self.page, selected_tags)
106 category = newcontext.category.Context(self.category)
107 params = {
108 'view_type': self.request_data.get_view_type(),
109 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),
110 'limits': settings.CATEGORY_STEP_MULTIPLIERS,
111 'sort': self.request_data.sorting_index,
112 }
113
114 return {
115 **params,
116 **pages_newcontext.Contexts([
117 page, category, paginated,
118 images, brands, grouped_tags
119 ]).context()
120 }
121
[end of shopelectro/context.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shopelectro/context.py b/shopelectro/context.py
--- a/shopelectro/context.py
+++ b/shopelectro/context.py
@@ -1,5 +1,3 @@
-from functools import partial
-
from django.conf import settings
from django.shortcuts import get_object_or_404
@@ -31,18 +29,15 @@
self._tags = tags
def context(self):
- def template_context(page, tag_titles, tags):
- return {
- 'page': page,
- 'tag_titles': tag_titles,
- 'tags': tags,
- }
-
tags_qs = self._tags.qs()
- self._page.get_template_render_context = partial(
- template_context, self._page, tags_qs.as_title(), tags_qs
- )
-
+ # use dirty patch here, because it's the most simple method
+ # to make shared templates work.
+ # For example `templates/layout/metadata.html`.
+ self._page.display = {
+ 'page': self._page,
+ 'tag_titles': tags_qs.as_title(),
+ 'tags': tags_qs,
+ }
return {
'page': self._page,
}
|
{"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -1,5 +1,3 @@\n-from functools import partial\n-\n from django.conf import settings\n from django.shortcuts import get_object_or_404\n \n@@ -31,18 +29,15 @@\n self._tags = tags\n \n def context(self):\n- def template_context(page, tag_titles, tags):\n- return {\n- 'page': page,\n- 'tag_titles': tag_titles,\n- 'tags': tags,\n- }\n-\n tags_qs = self._tags.qs()\n- self._page.get_template_render_context = partial(\n- template_context, self._page, tags_qs.as_title(), tags_qs\n- )\n-\n+ # use dirty patch here, because it's the most simple method\n+ # to make shared templates work.\n+ # For example `templates/layout/metadata.html`.\n+ self._page.display = {\n+ 'page': self._page,\n+ 'tag_titles': tags_qs.as_title(),\n+ 'tags': tags_qs,\n+ }\n return {\n 'page': self._page,\n }\n", "issue": "Adapt page usage to the new refarm pages interface\nAt https://github.com/fidals/refarm-site/issues/240 we changed page templates and page context interfaces.\r\n\r\nAdapt site code to the new interface.\r\n\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\n\nfrom catalog import newcontext\nfrom images.models import Image\nfrom pages import models as pages_models, newcontext as pages_newcontext\nfrom shopelectro import models, request_data\n\n\n# @todo #255:60m Improve `SortingOption` interface.\n# Now it's located in context and this is wrong.\n# Maybe refactor `CATEGORY_SORTING_OPTIONS`.\nclass SortingOption:\n def __init__(self, index=0):\n options = settings.CATEGORY_SORTING_OPTIONS[index]\n self.label = options['label']\n self.field = options['field']\n self.direction = options['direction']\n\n @property\n def directed_field(self):\n return self.direction + self.field\n\n\nclass Page(newcontext.Context):\n\n def __init__(self, page, tags: newcontext.Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n\n\nclass Catalog(newcontext.Context):\n\n def __init__(self, request_data_: request_data.Catalog):\n self.request_data = request_data_\n\n @property\n def page(self):\n return get_object_or_404(\n pages_models.ModelPage,\n slug=self.request_data.slug\n )\n\n @property\n def category(self):\n return self.page.model\n\n def context(self) -> dict:\n all_tags = newcontext.Tags(models.Tag.objects.all())\n\n selected_tags = newcontext.tags.ParsedTags(\n tags=all_tags,\n raw_tags=self.request_data.tags,\n )\n if self.request_data.tags:\n selected_tags = newcontext.tags.Checked404Tags(selected_tags)\n\n products = (\n models.Product.objects.active()\n .filter_descendants(self.category)\n .tagged_or_all(selected_tags.qs())\n .order_by(SortingOption(index=self.request_data.sorting_index).directed_field)\n )\n\n \"\"\"\n We have to use separated variable for pagination.\n\n Because paginated QuerySet can not used as QuerySet.\n It's not the most strong place of Django ORM, of course.\n :return: ProductsContext with paginated QuerySet inside\n \"\"\"\n # @todo #683:30m Remove *Tags and *Products suffixes from catalog.newcontext classes.\n # Rename Checked404Tags to ExistingOr404.\n paginated = newcontext.products.PaginatedProducts(\n products=products,\n url=self.request_data.request.path,\n page_number=self.request_data.pagination_page_number,\n per_page=self.request_data.pagination_per_page,\n )\n\n images = newcontext.products.ProductImages(paginated.products, Image.objects.all())\n brands = newcontext.products.ProductBrands(paginated.products, all_tags)\n grouped_tags = newcontext.tags.GroupedTags(\n tags=newcontext.tags.TagsByProducts(all_tags, products)\n )\n page = Page(self.page, selected_tags)\n category = newcontext.category.Context(self.category)\n params = {\n 'view_type': self.request_data.get_view_type(),\n 'sorting_options': settings.CATEGORY_SORTING_OPTIONS.values(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': self.request_data.sorting_index,\n }\n\n return {\n **params,\n **pages_newcontext.Contexts([\n page, category, paginated,\n images, brands, grouped_tags\n ]).context()\n }\n", "path": "shopelectro/context.py"}]}
| 1,674 | 271 |
gh_patches_debug_41380
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-462
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Jinja2 async integration
Since https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).
We need to update to only instrument at the point the `render_async` method becomes available.
</issue>
<code>
[start of src/scout_apm/instruments/jinja2.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import logging
5
6 import wrapt
7
8 from scout_apm.core.tracked_request import TrackedRequest
9
10 try:
11 from jinja2 import Template
12 except ImportError: # pragma: no cover
13 Template = None
14
15 # The async_ module can only be shipped on Python 3.6+
16 try:
17 from scout_apm.async_.instruments.jinja2 import wrapped_render_async
18 except ImportError:
19 wrapped_render_async = None
20
21
22 logger = logging.getLogger(__name__)
23
24
25 have_patched_template_render = False
26 have_patched_template_render_async = False
27
28
29 def ensure_installed():
30 global have_patched_template_render
31 global have_patched_template_render_async
32
33 logger.info("Ensuring Jinja2 instrumentation is installed.")
34
35 if Template is None:
36 logger.info("Unable to import jinja2.Template")
37 return
38
39 if not have_patched_template_render:
40 try:
41 Template.render = wrapped_render(Template.render)
42 except Exception as exc:
43 logger.warning(
44 "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
45 )
46 else:
47 have_patched_template_render = True
48
49 if not have_patched_template_render_async and wrapped_render_async is not None:
50 try:
51 Template.render_async = wrapped_render_async(Template.render_async)
52 except Exception as exc:
53 logger.warning(
54 "Unable to instrument jinja2.Template.render_async: %r",
55 exc,
56 exc_info=exc,
57 )
58 else:
59 have_patched_template_render_async = True
60
61
62 @wrapt.decorator
63 def wrapped_render(wrapped, instance, args, kwargs):
64 tracked_request = TrackedRequest.instance()
65 span = tracked_request.start_span(operation="Template/Render")
66 span.tag("name", instance.name)
67 try:
68 return wrapped(*args, **kwargs)
69 finally:
70 tracked_request.stop_span()
71
[end of src/scout_apm/instruments/jinja2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py
--- a/src/scout_apm/instruments/jinja2.py
+++ b/src/scout_apm/instruments/jinja2.py
@@ -2,11 +2,17 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
+import sys
import wrapt
from scout_apm.core.tracked_request import TrackedRequest
+try:
+ from jinja2 import Environment
+except ImportError: # pragma: no cover
+ Environment = None
+
try:
from jinja2 import Template
except ImportError: # pragma: no cover
@@ -22,13 +28,14 @@
logger = logging.getLogger(__name__)
+have_patched_environment_init = False
have_patched_template_render = False
have_patched_template_render_async = False
def ensure_installed():
+ global have_patched_environment_init
global have_patched_template_render
- global have_patched_template_render_async
logger.info("Ensuring Jinja2 instrumentation is installed.")
@@ -36,27 +43,27 @@
logger.info("Unable to import jinja2.Template")
return
- if not have_patched_template_render:
+ if not have_patched_environment_init:
try:
- Template.render = wrapped_render(Template.render)
+ Environment.__init__ = wrapped_environment_init(Environment.__init__)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
+ "Unable to instrument jinja2.Environment.__init__: %r",
+ exc,
+ exc_info=exc,
)
else:
- have_patched_template_render = True
+ have_patched_environment_init = True
- if not have_patched_template_render_async and wrapped_render_async is not None:
+ if not have_patched_template_render:
try:
- Template.render_async = wrapped_render_async(Template.render_async)
+ Template.render = wrapped_render(Template.render)
except Exception as exc:
logger.warning(
- "Unable to instrument jinja2.Template.render_async: %r",
- exc,
- exc_info=exc,
+ "Unable to instrument jinja2.Template.render: %r", exc, exc_info=exc
)
else:
- have_patched_template_render_async = True
+ have_patched_template_render = True
@wrapt.decorator
@@ -68,3 +75,33 @@
return wrapped(*args, **kwargs)
finally:
tracked_request.stop_span()
+
+
[email protected]
+def wrapped_environment_init(wrapped, instance, args, kwargs):
+ """
+ Delayed wrapping of render_async(), since Template won't have this method
+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is
+ done conditionally in Environment.__init__:
+ https://github.com/pallets/jinja/issues/765
+ """
+ global have_patched_template_render_async
+ result = wrapped(*args, **kwargs)
+
+ if (
+ wrapped_render_async is not None
+ and not have_patched_template_render_async
+ and "jinja2.asyncsupport" in sys.modules
+ ):
+ try:
+ Template.render_async = wrapped_render_async(Template.render_async)
+ except Exception as exc:
+ logger.warning(
+ "Unable to instrument jinja2.Template.render_async: %r",
+ exc,
+ exc_info=exc,
+ )
+ else:
+ have_patched_template_render_async = True
+
+ return result
|
{"golden_diff": "diff --git a/src/scout_apm/instruments/jinja2.py b/src/scout_apm/instruments/jinja2.py\n--- a/src/scout_apm/instruments/jinja2.py\n+++ b/src/scout_apm/instruments/jinja2.py\n@@ -2,11 +2,17 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n+import sys\n \n import wrapt\n \n from scout_apm.core.tracked_request import TrackedRequest\n \n+try:\n+ from jinja2 import Environment\n+except ImportError: # pragma: no cover\n+ Environment = None\n+\n try:\n from jinja2 import Template\n except ImportError: # pragma: no cover\n@@ -22,13 +28,14 @@\n logger = logging.getLogger(__name__)\n \n \n+have_patched_environment_init = False\n have_patched_template_render = False\n have_patched_template_render_async = False\n \n \n def ensure_installed():\n+ global have_patched_environment_init\n global have_patched_template_render\n- global have_patched_template_render_async\n \n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n \n@@ -36,27 +43,27 @@\n logger.info(\"Unable to import jinja2.Template\")\n return\n \n- if not have_patched_template_render:\n+ if not have_patched_environment_init:\n try:\n- Template.render = wrapped_render(Template.render)\n+ Environment.__init__ = wrapped_environment_init(Environment.__init__)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n+ \"Unable to instrument jinja2.Environment.__init__: %r\",\n+ exc,\n+ exc_info=exc,\n )\n else:\n- have_patched_template_render = True\n+ have_patched_environment_init = True\n \n- if not have_patched_template_render_async and wrapped_render_async is not None:\n+ if not have_patched_template_render:\n try:\n- Template.render_async = wrapped_render_async(Template.render_async)\n+ Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n- \"Unable to instrument jinja2.Template.render_async: %r\",\n- exc,\n- exc_info=exc,\n+ \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n- have_patched_template_render_async = True\n+ have_patched_template_render = True\n \n \n @wrapt.decorator\n@@ -68,3 +75,33 @@\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n+\n+\[email protected]\n+def wrapped_environment_init(wrapped, instance, args, kwargs):\n+ \"\"\"\n+ Delayed wrapping of render_async(), since Template won't have this method\n+ until after jinja2.asyncsupport is imported, which since Jinja2 2.11.0 is\n+ done conditionally in Environment.__init__:\n+ https://github.com/pallets/jinja/issues/765\n+ \"\"\"\n+ global have_patched_template_render_async\n+ result = wrapped(*args, **kwargs)\n+\n+ if (\n+ wrapped_render_async is not None\n+ and not have_patched_template_render_async\n+ and \"jinja2.asyncsupport\" in sys.modules\n+ ):\n+ try:\n+ Template.render_async = wrapped_render_async(Template.render_async)\n+ except Exception as exc:\n+ logger.warning(\n+ \"Unable to instrument jinja2.Template.render_async: %r\",\n+ exc,\n+ exc_info=exc,\n+ )\n+ else:\n+ have_patched_template_render_async = True\n+\n+ return result\n", "issue": "Fix Jinja2 async integration\nSince https://github.com/pallets/jinja/issues/765 , Jinja2 only lazily imports and adds the `render_async` function that we instrument (since #398).\r\n\r\nWe need to update to only instrument at the point the `render_async` method becomes available.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nimport wrapt\n\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from jinja2 import Template\nexcept ImportError: # pragma: no cover\n Template = None\n\n# The async_ module can only be shipped on Python 3.6+\ntry:\n from scout_apm.async_.instruments.jinja2 import wrapped_render_async\nexcept ImportError:\n wrapped_render_async = None\n\n\nlogger = logging.getLogger(__name__)\n\n\nhave_patched_template_render = False\nhave_patched_template_render_async = False\n\n\ndef ensure_installed():\n global have_patched_template_render\n global have_patched_template_render_async\n\n logger.info(\"Ensuring Jinja2 instrumentation is installed.\")\n\n if Template is None:\n logger.info(\"Unable to import jinja2.Template\")\n return\n\n if not have_patched_template_render:\n try:\n Template.render = wrapped_render(Template.render)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render: %r\", exc, exc_info=exc\n )\n else:\n have_patched_template_render = True\n\n if not have_patched_template_render_async and wrapped_render_async is not None:\n try:\n Template.render_async = wrapped_render_async(Template.render_async)\n except Exception as exc:\n logger.warning(\n \"Unable to instrument jinja2.Template.render_async: %r\",\n exc,\n exc_info=exc,\n )\n else:\n have_patched_template_render_async = True\n\n\[email protected]\ndef wrapped_render(wrapped, instance, args, kwargs):\n tracked_request = TrackedRequest.instance()\n span = tracked_request.start_span(operation=\"Template/Render\")\n span.tag(\"name\", instance.name)\n try:\n return wrapped(*args, **kwargs)\n finally:\n tracked_request.stop_span()\n", "path": "src/scout_apm/instruments/jinja2.py"}]}
| 1,175 | 836 |
gh_patches_debug_2580
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-4127
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Setuptools 69.0.0 breaks Astropy's setup
### setuptools version
setuptools==69.0.0
### Python version
3.12
### OS
Ubuntu
### Additional environment information
_No response_
### Description
About 15h ago, Astropy's CI started failing to build with
```
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util'
```
This seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).
Nonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?
### Expected behavior
a deprecation warning instead of a hard error ?
### How to Reproduce
```shell
$ python -c "from setuptools.dep_util import newer_group"
```
### Output
```console
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)
```
</issue>
<code>
[start of setuptools/dep_util.py]
1 import warnings
2
3 from ._distutils import _modified
4
5
6 def __getattr__(name):
7 if name not in ['newer_pairwise_group']:
8 raise AttributeError(name)
9 warnings.warn(
10 "dep_util is Deprecated. Use functions from setuptools.modified instead.",
11 DeprecationWarning,
12 stacklevel=2,
13 )
14 return getattr(_modified, name)
15
[end of setuptools/dep_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py
--- a/setuptools/dep_util.py
+++ b/setuptools/dep_util.py
@@ -4,7 +4,7 @@
def __getattr__(name):
- if name not in ['newer_pairwise_group']:
+ if name not in ['newer_group', 'newer_pairwise_group']:
raise AttributeError(name)
warnings.warn(
"dep_util is Deprecated. Use functions from setuptools.modified instead.",
|
{"golden_diff": "diff --git a/setuptools/dep_util.py b/setuptools/dep_util.py\n--- a/setuptools/dep_util.py\n+++ b/setuptools/dep_util.py\n@@ -4,7 +4,7 @@\n \n \n def __getattr__(name):\n- if name not in ['newer_pairwise_group']:\n+ if name not in ['newer_group', 'newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n", "issue": "[BUG] Setuptools 69.0.0 breaks Astropy's setup\n### setuptools version\n\nsetuptools==69.0.0\n\n### Python version\n\n3.12\n\n### OS\n\nUbuntu\n\n### Additional environment information\n\n_No response_\n\n### Description\n\nAbout 15h ago, Astropy's CI started failing to build with\r\n```\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util'\r\n```\r\nThis seems to correspond to an [intentional change in setuptools 69](https://setuptools.pypa.io/en/latest/history.html#features).\r\nNonetheless, from reading the PR that introduced the change (https://github.com/pypa/setuptools/pull/4069), I'm not sure that this was supposed to break immediately. Was this intended ?\n\n### Expected behavior\n\na deprecation warning instead of a hard error ?\n\n### How to Reproduce\n\n```shell\r\n$ python -c \"from setuptools.dep_util import newer_group\"\r\n```\n\n### Output\n\n```console\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\nImportError: cannot import name 'newer_group' from 'setuptools.dep_util' (/private/tmp/venv/lib/python3.12/site-packages/setuptools/dep_util.py)\r\n```\r\n\n", "before_files": [{"content": "import warnings\n\nfrom ._distutils import _modified\n\n\ndef __getattr__(name):\n if name not in ['newer_pairwise_group']:\n raise AttributeError(name)\n warnings.warn(\n \"dep_util is Deprecated. Use functions from setuptools.modified instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return getattr(_modified, name)\n", "path": "setuptools/dep_util.py"}]}
| 913 | 107 |
gh_patches_debug_21137
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-1746
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
describe-stream vs describe-stream-summary on non-existent streams
**Problem**
For *non-existent* Kinesis streams:
* the command `awslocal kinesis describe-stream` returns a ResourceNotFoundException (as expected)
* whereas `describe-stream-summary` returns stream details (as if it existed).
**Expected behaviour**
It's expected that both commands return a ResourceNotFoundException for non-existent streams. It has been verified that this is the case with AWS CLI with real AWS.
**Reproduction**
```
✗ docker run -d -e SERVICES=s3,iam,kinesis localstack/localstack:0.10.2
513dcffc9a7e8f7b4c12b20972119c5fd797f269ae4cbfe62d8bc9f66a6116eb
✗ docker container ls | head -n2
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
513dcffc9a7e localstack/localstack:0.10.2 "docker-entrypoint.sh" 44 seconds ago Up 42 seconds 4567-4597/tcp, 8080/tcp nostalgic_mcclintock
✗ docker exec -it nostalgic_mcclintock sh
/opt/code/localstack # awslocal kinesis describe-stream --stream-name idontexist
An error occurred (ResourceNotFoundException) when calling the DescribeStream operation: Stream idontexist under account 000000000000 not found.
/opt/code/localstack # awslocal kinesis describe-stream-summary --stream-name idontexist
{
"StreamDescriptionSummary": {
"StreamName": "idontexist",
"StreamARN": "idontexist",
(...)
"OpenShardCount": 0,
"ConsumerCount": 0
}
}
/opt/code/localstack # awslocal kinesis list-streams
{
"StreamNames": []
}
/opt/code/localstack #
```
</issue>
<code>
[start of localstack/services/kinesis/kinesis_listener.py]
1 import json
2 import random
3 from requests.models import Response
4 from localstack import config
5 from localstack.utils.common import to_str
6 from localstack.utils.analytics import event_publisher
7 from localstack.services.awslambda import lambda_api
8 from localstack.services.generic_proxy import ProxyListener
9
10 # action headers
11 ACTION_PREFIX = 'Kinesis_20131202'
12 ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX
13 ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX
14 ACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX
15 ACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX
16 ACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX
17
18
19 class ProxyListenerKinesis(ProxyListener):
20
21 def forward_request(self, method, path, data, headers):
22 data = json.loads(to_str(data))
23 action = headers.get('X-Amz-Target')
24
25 if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:
26 stream_arn = data.get('StreamARN') or data['StreamName']
27 # TODO fix values below
28 result = {
29 'StreamDescriptionSummary': {
30 'ConsumerCount': 0,
31 'EnhancedMonitoring': [],
32 'KeyId': 'string',
33 'OpenShardCount': 0,
34 'RetentionPeriodHours': 1,
35 'StreamARN': stream_arn,
36 # 'StreamCreationTimestamp': number,
37 'StreamName': data['StreamName'],
38 'StreamStatus': 'ACTIVE'
39 }
40 }
41 return result
42 if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:
43 consumer_arn = data.get('ConsumerARN') or data['ConsumerName']
44 consumer_name = data.get('ConsumerName') or data['ConsumerARN']
45 result = {
46 'ConsumerDescription': {
47 'ConsumerARN': consumer_arn,
48 # 'ConsumerCreationTimestamp': number,
49 'ConsumerName': consumer_name,
50 'ConsumerStatus': 'ACTIVE',
51 'StreamARN': data.get('StreamARN')
52 }
53 }
54 return result
55
56 if random.random() < config.KINESIS_ERROR_PROBABILITY:
57 action = headers.get('X-Amz-Target')
58 if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:
59 return kinesis_error_response(data, action)
60 return True
61
62 def return_response(self, method, path, data, headers, response):
63 action = headers.get('X-Amz-Target')
64 data = json.loads(to_str(data))
65
66 records = []
67 if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
68 event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
69 else event_publisher.EVENT_KINESIS_DELETE_STREAM)
70 payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
71 if action == ACTION_CREATE_STREAM:
72 payload['s'] = data.get('ShardCount')
73 event_publisher.fire_event(event_type, payload=payload)
74 elif action == ACTION_PUT_RECORD:
75 response_body = json.loads(to_str(response.content))
76 event_record = {
77 'data': data['Data'],
78 'partitionKey': data['PartitionKey'],
79 'sequenceNumber': response_body.get('SequenceNumber')
80 }
81 event_records = [event_record]
82 stream_name = data['StreamName']
83 lambda_api.process_kinesis_records(event_records, stream_name)
84 elif action == ACTION_PUT_RECORDS:
85 event_records = []
86 response_body = json.loads(to_str(response.content))
87 if 'Records' in response_body:
88 response_records = response_body['Records']
89 records = data['Records']
90 for index in range(0, len(records)):
91 record = records[index]
92 event_record = {
93 'data': record['Data'],
94 'partitionKey': record['PartitionKey'],
95 'sequenceNumber': response_records[index].get('SequenceNumber')
96 }
97 event_records.append(event_record)
98 stream_name = data['StreamName']
99 lambda_api.process_kinesis_records(event_records, stream_name)
100 elif action == ACTION_UPDATE_SHARD_COUNT:
101 # Currently kinesalite, which backs the Kinesis implementation for localstack, does
102 # not support UpdateShardCount:
103 # https://github.com/mhart/kinesalite/issues/61
104 #
105 # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
106 # applies Kinesis resources. A Terraform run fails when this is not present.
107 #
108 # The code that follows just returns a successful response, bypassing the 400
109 # response that kinesalite returns.
110 #
111 response = Response()
112 response.status_code = 200
113 content = {
114 'CurrentShardCount': 1,
115 'StreamName': data['StreamName'],
116 'TargetShardCount': data['TargetShardCount']
117 }
118 response.encoding = 'UTF-8'
119 response._content = json.dumps(content)
120 return response
121
122
123 # instantiate listener
124 UPDATE_KINESIS = ProxyListenerKinesis()
125
126
127 def kinesis_error_response(data, action):
128 error_response = Response()
129
130 if action == ACTION_PUT_RECORD:
131 error_response.status_code = 400
132 content = {
133 'ErrorCode': 'ProvisionedThroughputExceededException',
134 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
135 }
136 else:
137 error_response.status_code = 200
138 content = {'FailedRecordCount': 1, 'Records': []}
139 for record in data.get('Records', []):
140 content['Records'].append({
141 'ErrorCode': 'ProvisionedThroughputExceededException',
142 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
143 })
144
145 error_response._content = json.dumps(content)
146 return error_response
147
[end of localstack/services/kinesis/kinesis_listener.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py
--- a/localstack/services/kinesis/kinesis_listener.py
+++ b/localstack/services/kinesis/kinesis_listener.py
@@ -22,23 +22,6 @@
data = json.loads(to_str(data))
action = headers.get('X-Amz-Target')
- if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:
- stream_arn = data.get('StreamARN') or data['StreamName']
- # TODO fix values below
- result = {
- 'StreamDescriptionSummary': {
- 'ConsumerCount': 0,
- 'EnhancedMonitoring': [],
- 'KeyId': 'string',
- 'OpenShardCount': 0,
- 'RetentionPeriodHours': 1,
- 'StreamARN': stream_arn,
- # 'StreamCreationTimestamp': number,
- 'StreamName': data['StreamName'],
- 'StreamStatus': 'ACTIVE'
- }
- }
- return result
if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:
consumer_arn = data.get('ConsumerARN') or data['ConsumerName']
consumer_name = data.get('ConsumerName') or data['ConsumerARN']
|
{"golden_diff": "diff --git a/localstack/services/kinesis/kinesis_listener.py b/localstack/services/kinesis/kinesis_listener.py\n--- a/localstack/services/kinesis/kinesis_listener.py\n+++ b/localstack/services/kinesis/kinesis_listener.py\n@@ -22,23 +22,6 @@\n data = json.loads(to_str(data))\n action = headers.get('X-Amz-Target')\n \n- if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:\n- stream_arn = data.get('StreamARN') or data['StreamName']\n- # TODO fix values below\n- result = {\n- 'StreamDescriptionSummary': {\n- 'ConsumerCount': 0,\n- 'EnhancedMonitoring': [],\n- 'KeyId': 'string',\n- 'OpenShardCount': 0,\n- 'RetentionPeriodHours': 1,\n- 'StreamARN': stream_arn,\n- # 'StreamCreationTimestamp': number,\n- 'StreamName': data['StreamName'],\n- 'StreamStatus': 'ACTIVE'\n- }\n- }\n- return result\n if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n", "issue": "describe-stream vs describe-stream-summary on non-existent streams\n**Problem**\r\nFor *non-existent* Kinesis streams:\r\n* the command `awslocal kinesis describe-stream` returns a ResourceNotFoundException (as expected) \r\n* whereas `describe-stream-summary` returns stream details (as if it existed).\r\n\r\n**Expected behaviour**\r\nIt's expected that both commands return a ResourceNotFoundException for non-existent streams. It has been verified that this is the case with AWS CLI with real AWS.\r\n\r\n**Reproduction**\r\n```\r\n\u2717 docker run -d -e SERVICES=s3,iam,kinesis localstack/localstack:0.10.2\r\n513dcffc9a7e8f7b4c12b20972119c5fd797f269ae4cbfe62d8bc9f66a6116eb\r\n\r\n\u2717 docker container ls | head -n2\r\nCONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES\r\n513dcffc9a7e localstack/localstack:0.10.2 \"docker-entrypoint.sh\" 44 seconds ago Up 42 seconds 4567-4597/tcp, 8080/tcp nostalgic_mcclintock\r\n\r\n\u2717 docker exec -it nostalgic_mcclintock sh\r\n/opt/code/localstack # awslocal kinesis describe-stream --stream-name idontexist\r\nAn error occurred (ResourceNotFoundException) when calling the DescribeStream operation: Stream idontexist under account 000000000000 not found.\r\n\r\n/opt/code/localstack # awslocal kinesis describe-stream-summary --stream-name idontexist\r\n{\r\n \"StreamDescriptionSummary\": {\r\n \"StreamName\": \"idontexist\",\r\n \"StreamARN\": \"idontexist\",\r\n (...)\r\n \"OpenShardCount\": 0,\r\n \"ConsumerCount\": 0\r\n }\r\n}\r\n/opt/code/localstack # awslocal kinesis list-streams\r\n{\r\n \"StreamNames\": []\r\n}\r\n/opt/code/localstack # \r\n```\n", "before_files": [{"content": "import json\nimport random\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.utils.common import to_str\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.generic_proxy import ProxyListener\n\n# action headers\nACTION_PREFIX = 'Kinesis_20131202'\nACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX\nACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX\nACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX\nACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX\nACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX\n\n\nclass ProxyListenerKinesis(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = json.loads(to_str(data))\n action = headers.get('X-Amz-Target')\n\n if action == '%s.DescribeStreamSummary' % ACTION_PREFIX:\n stream_arn = data.get('StreamARN') or data['StreamName']\n # TODO fix values below\n result = {\n 'StreamDescriptionSummary': {\n 'ConsumerCount': 0,\n 'EnhancedMonitoring': [],\n 'KeyId': 'string',\n 'OpenShardCount': 0,\n 'RetentionPeriodHours': 1,\n 'StreamARN': stream_arn,\n # 'StreamCreationTimestamp': number,\n 'StreamName': data['StreamName'],\n 'StreamStatus': 'ACTIVE'\n }\n }\n return result\n if action == '%s.DescribeStreamConsumer' % ACTION_PREFIX:\n consumer_arn = data.get('ConsumerARN') or data['ConsumerName']\n consumer_name = data.get('ConsumerName') or data['ConsumerARN']\n result = {\n 'ConsumerDescription': {\n 'ConsumerARN': consumer_arn,\n # 'ConsumerCreationTimestamp': number,\n 'ConsumerName': consumer_name,\n 'ConsumerStatus': 'ACTIVE',\n 'StreamARN': data.get('StreamARN')\n }\n }\n return result\n\n if random.random() < config.KINESIS_ERROR_PROBABILITY:\n action = headers.get('X-Amz-Target')\n if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:\n return kinesis_error_response(data, action)\n return True\n\n def return_response(self, method, path, data, headers, response):\n action = headers.get('X-Amz-Target')\n data = json.loads(to_str(data))\n\n records = []\n if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):\n event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM\n else event_publisher.EVENT_KINESIS_DELETE_STREAM)\n payload = {'n': event_publisher.get_hash(data.get('StreamName'))}\n if action == ACTION_CREATE_STREAM:\n payload['s'] = data.get('ShardCount')\n event_publisher.fire_event(event_type, payload=payload)\n elif action == ACTION_PUT_RECORD:\n response_body = json.loads(to_str(response.content))\n event_record = {\n 'data': data['Data'],\n 'partitionKey': data['PartitionKey'],\n 'sequenceNumber': response_body.get('SequenceNumber')\n }\n event_records = [event_record]\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_PUT_RECORDS:\n event_records = []\n response_body = json.loads(to_str(response.content))\n if 'Records' in response_body:\n response_records = response_body['Records']\n records = data['Records']\n for index in range(0, len(records)):\n record = records[index]\n event_record = {\n 'data': record['Data'],\n 'partitionKey': record['PartitionKey'],\n 'sequenceNumber': response_records[index].get('SequenceNumber')\n }\n event_records.append(event_record)\n stream_name = data['StreamName']\n lambda_api.process_kinesis_records(event_records, stream_name)\n elif action == ACTION_UPDATE_SHARD_COUNT:\n # Currently kinesalite, which backs the Kinesis implementation for localstack, does\n # not support UpdateShardCount:\n # https://github.com/mhart/kinesalite/issues/61\n #\n # [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it\n # applies Kinesis resources. A Terraform run fails when this is not present.\n #\n # The code that follows just returns a successful response, bypassing the 400\n # response that kinesalite returns.\n #\n response = Response()\n response.status_code = 200\n content = {\n 'CurrentShardCount': 1,\n 'StreamName': data['StreamName'],\n 'TargetShardCount': data['TargetShardCount']\n }\n response.encoding = 'UTF-8'\n response._content = json.dumps(content)\n return response\n\n\n# instantiate listener\nUPDATE_KINESIS = ProxyListenerKinesis()\n\n\ndef kinesis_error_response(data, action):\n error_response = Response()\n\n if action == ACTION_PUT_RECORD:\n error_response.status_code = 400\n content = {\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n }\n else:\n error_response.status_code = 200\n content = {'FailedRecordCount': 1, 'Records': []}\n for record in data.get('Records', []):\n content['Records'].append({\n 'ErrorCode': 'ProvisionedThroughputExceededException',\n 'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'\n })\n\n error_response._content = json.dumps(content)\n return error_response\n", "path": "localstack/services/kinesis/kinesis_listener.py"}]}
| 2,573 | 283 |
gh_patches_debug_10442
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-4975
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of examples/language/bert/finetune.py]
1 import argparse
2 from typing import Callable, List, Union
3
4 import evaluate
5 import torch
6 import torch.distributed as dist
7 import torch.nn as nn
8 from data import GLUEDataBuilder
9 from torch.optim import Optimizer
10 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
11 from torch.utils.data import DataLoader
12 from tqdm import tqdm
13 from transformers import (
14 AlbertForSequenceClassification,
15 AutoConfig,
16 BertForSequenceClassification,
17 get_linear_schedule_with_warmup,
18 )
19
20 import colossalai
21 from colossalai.booster import Booster
22 from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
23 from colossalai.cluster import DistCoordinator
24 from colossalai.nn.optimizer import HybridAdam
25 from colossalai.utils import get_current_device
26
27 # ==============================
28 # Prepare Hyperparameters
29 # ==============================
30 NUM_EPOCHS = 3
31 BATCH_SIZE = 32
32 LEARNING_RATE = 2.4e-5
33 WEIGHT_DECAY = 0.01
34 WARMUP_FRACTION = 0.1
35
36 output_transform_fn = lambda x: x
37 criterion = lambda x: x.loss
38
39
40 def move_to_cuda(batch):
41 return {k: v.cuda() for k, v in batch.items()}
42
43
44 @torch.no_grad()
45 def evaluate_model(
46 model: nn.Module,
47 criterion,
48 test_dataloader: Union[DataLoader, List[DataLoader]],
49 num_labels: int,
50 task_name: str,
51 eval_splits: List[str],
52 booster: Booster,
53 coordinator: DistCoordinator,
54 ):
55 metric = evaluate.load("glue", task_name, process_id=coordinator.rank, num_process=coordinator.world_size)
56 model.eval()
57
58 def evaluate_subset(dataloader: DataLoader):
59 use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
60 is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
61
62 accum_loss = torch.zeros(1, device=get_current_device())
63 for batch in dataloader:
64 batch = move_to_cuda(batch)
65 labels = batch["labels"]
66 if use_pipeline:
67 pg_mesh = booster.plugin.pg_mesh
68 pp_group = booster.plugin.pp_group
69 current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)
70 current_rank = dist.get_rank()
71 batch = iter([batch])
72 outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True)
73
74 if is_pp_last_stage:
75 logits = outputs["outputs"]["logits"]
76 val_loss = outputs["loss"]
77 accum_loss.add_(val_loss)
78
79 if num_labels > 1:
80 preds = torch.argmax(logits, axis=1)
81 elif num_labels == 1:
82 preds = logits.squeeze()
83
84 dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group)
85
86 metric.add_batch(predictions=preds, references=labels)
87 elif current_rank in current_pp_group_ranks:
88 object_list = [None, None]
89 dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group)
90
91 metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels)
92 accum_loss.add_(object_list[1].to(get_current_device()))
93
94 else:
95 batch = move_to_cuda(batch)
96 outputs = model(**batch)
97 val_loss, logits = outputs[:2]
98 accum_loss.add_(val_loss)
99
100 if num_labels > 1:
101 preds = torch.argmax(logits, axis=1)
102 elif num_labels == 1:
103 preds = logits.squeeze()
104
105 metric.add_batch(predictions=preds, references=labels)
106
107 results = metric.compute()
108 dist.all_reduce(accum_loss.div_(len(dataloader)))
109 if coordinator.is_master() and results is not None:
110 results["loss"] = accum_loss.item() / coordinator.world_size
111
112 return results
113
114 if isinstance(test_dataloader, DataLoader):
115 return evaluate_subset(test_dataloader)
116 else:
117 assert len(test_dataloader) == len(eval_splits)
118 final_results = {}
119 for split, sub_loader in zip(eval_splits, test_dataloader):
120 results = evaluate_subset(sub_loader)
121 final_results.update({f"{k}_{split}": v for k, v in results.items()})
122 return final_results
123
124
125 def train_epoch(
126 epoch: int,
127 model: nn.Module,
128 optimizer: Optimizer,
129 _criterion: Callable,
130 lr_scheduler: LRScheduler,
131 train_dataloader: DataLoader,
132 booster: Booster,
133 coordinator: DistCoordinator,
134 ):
135 use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
136 is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
137 print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)
138 total_step = len(train_dataloader)
139
140 model.train()
141 optimizer.zero_grad()
142 train_dataloader_iter = iter(train_dataloader)
143 with tqdm(range(total_step), desc=f"Epoch [{epoch + 1}/{NUM_EPOCHS}]", disable=not print_flag) as pbar:
144 # Forward pass
145 for _ in pbar:
146 if use_pipeline:
147 outputs = booster.execute_pipeline(
148 train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True
149 )
150 # Backward and optimize
151 if is_pp_last_stage:
152 loss = outputs["loss"]
153 pbar.set_postfix({"loss": loss.item()})
154 else:
155 data = next(train_dataloader_iter)
156 data = move_to_cuda(data)
157 outputs = model(**data)
158 loss = _criterion(outputs, None)
159 # Backward
160 booster.backward(loss, optimizer)
161 pbar.set_postfix({"loss": loss.item()})
162
163 optimizer.step()
164 optimizer.zero_grad()
165 lr_scheduler.step()
166
167
168 def main():
169 # ==============================
170 # Parse Arguments
171 # ==============================
172 parser = argparse.ArgumentParser()
173 parser.add_argument("-t", "--task", default="mrpc", help="GLUE task to run")
174 parser.add_argument(
175 "-p",
176 "--plugin",
177 type=str,
178 default="torch_ddp",
179 choices=["torch_ddp", "torch_ddp_fp16", "gemini", "low_level_zero", "hybrid_parallel"],
180 help="plugin to use",
181 )
182 parser.add_argument(
183 "--model_type",
184 type=str,
185 default="bert",
186 help="bert or albert",
187 )
188 parser.add_argument("--target_f1", type=float, default=None, help="target f1 score. Raise exception if not reached")
189 parser.add_argument("--use_lazy_init", type=bool, default=False, help="for initiating lazy init context")
190 args = parser.parse_args()
191
192 if args.model_type == "bert":
193 model_name = "bert-base-uncased"
194 elif args.model_type == "albert":
195 model_name = "albert-xxlarge-v2"
196 else:
197 raise RuntimeError
198
199 # ==============================
200 # Launch Distributed Environment
201 # ==============================
202 colossalai.launch_from_torch(config={}, seed=42)
203 coordinator = DistCoordinator()
204
205 lr = LEARNING_RATE * coordinator.world_size
206
207 # ==============================
208 # Instantiate Plugin and Booster
209 # ==============================
210 booster_kwargs = {}
211 if args.plugin == "torch_ddp_fp16":
212 booster_kwargs["mixed_precision"] = "fp16"
213 if args.plugin.startswith("torch_ddp"):
214 plugin = TorchDDPPlugin()
215 elif args.plugin == "gemini":
216 plugin = GeminiPlugin(initial_scale=2**5)
217 elif args.plugin == "low_level_zero":
218 plugin = LowLevelZeroPlugin(initial_scale=2**5)
219 elif args.plugin == "hybrid_parallel":
220 # modify the param accordingly for finetuning test cases
221 plugin = HybridParallelPlugin(
222 tp_size=1,
223 pp_size=2,
224 num_microbatches=None,
225 microbatch_size=1,
226 enable_all_optimization=True,
227 zero_stage=1,
228 precision="fp16",
229 initial_scale=1,
230 )
231
232 booster = Booster(plugin=plugin, **booster_kwargs)
233
234 # ==============================
235 # Prepare Dataloader
236 # ==============================
237 data_builder = GLUEDataBuilder(
238 model_name, plugin, args.task, train_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE
239 )
240 train_dataloader = data_builder.train_dataloader()
241 test_dataloader = data_builder.test_dataloader()
242
243 # ====================================
244 # Prepare model, optimizer
245 # ====================================
246 # bert pretrained model
247
248 cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)
249
250 if model_name == "bert-base-uncased":
251 model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()
252 elif model_name == "albert-xxlarge-v2":
253 model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)
254 else:
255 raise RuntimeError
256
257 # optimizer
258 no_decay = ["bias", "LayerNorm.weight"]
259 optimizer_grouped_parameters = [
260 {
261 "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
262 "weight_decay": WEIGHT_DECAY,
263 },
264 {
265 "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
266 "weight_decay": 0.0,
267 },
268 ]
269
270 optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)
271
272 # lr scheduler
273 total_steps = len(train_dataloader) * NUM_EPOCHS
274 num_warmup_steps = int(WARMUP_FRACTION * total_steps)
275 lr_scheduler = get_linear_schedule_with_warmup(
276 optimizer,
277 num_warmup_steps=num_warmup_steps,
278 num_training_steps=total_steps,
279 )
280
281 def _criterion(outputs, inputs):
282 outputs = output_transform_fn(outputs)
283 loss = criterion(outputs)
284 return loss
285
286 # ==============================
287 # Boost with ColossalAI
288 # ==============================
289 model, optimizer, _criterion, _, lr_scheduler = booster.boost(
290 model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler
291 )
292
293 # ==============================
294 # Train model
295 # ==============================
296 for epoch in range(NUM_EPOCHS):
297 train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)
298
299 results = evaluate_model(
300 model,
301 _criterion,
302 test_dataloader,
303 data_builder.num_labels,
304 args.task,
305 data_builder.eval_splits,
306 booster,
307 coordinator,
308 )
309
310 if coordinator.is_master():
311 print(results)
312 if args.target_f1 is not None and "f1" in results:
313 assert results["f1"] >= args.target_f1, f'f1 score {results["f1"]} is lower than target {args.target_f1}'
314
315
316 if __name__ == "__main__":
317 main()
318
[end of examples/language/bert/finetune.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/language/bert/finetune.py b/examples/language/bert/finetune.py
--- a/examples/language/bert/finetune.py
+++ b/examples/language/bert/finetune.py
@@ -248,7 +248,7 @@
cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)
if model_name == "bert-base-uncased":
- model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()
+ model = BertForSequenceClassification.from_pretrained(model_name, config=cfg, cache_dir="./").cuda()
elif model_name == "albert-xxlarge-v2":
model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)
else:
|
{"golden_diff": "diff --git a/examples/language/bert/finetune.py b/examples/language/bert/finetune.py\n--- a/examples/language/bert/finetune.py\n+++ b/examples/language/bert/finetune.py\n@@ -248,7 +248,7 @@\n cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)\n \n if model_name == \"bert-base-uncased\":\n- model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()\n+ model = BertForSequenceClassification.from_pretrained(model_name, config=cfg, cache_dir=\"./\").cuda()\n elif model_name == \"albert-xxlarge-v2\":\n model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)\n else:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nfrom typing import Callable, List, Union\n\nimport evaluate\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom data import GLUEDataBuilder\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\nfrom transformers import (\n AlbertForSequenceClassification,\n AutoConfig,\n BertForSequenceClassification,\n get_linear_schedule_with_warmup,\n)\n\nimport colossalai\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin\nfrom colossalai.cluster import DistCoordinator\nfrom colossalai.nn.optimizer import HybridAdam\nfrom colossalai.utils import get_current_device\n\n# ==============================\n# Prepare Hyperparameters\n# ==============================\nNUM_EPOCHS = 3\nBATCH_SIZE = 32\nLEARNING_RATE = 2.4e-5\nWEIGHT_DECAY = 0.01\nWARMUP_FRACTION = 0.1\n\noutput_transform_fn = lambda x: x\ncriterion = lambda x: x.loss\n\n\ndef move_to_cuda(batch):\n return {k: v.cuda() for k, v in batch.items()}\n\n\[email protected]_grad()\ndef evaluate_model(\n model: nn.Module,\n criterion,\n test_dataloader: Union[DataLoader, List[DataLoader]],\n num_labels: int,\n task_name: str,\n eval_splits: List[str],\n booster: Booster,\n coordinator: DistCoordinator,\n):\n metric = evaluate.load(\"glue\", task_name, process_id=coordinator.rank, num_process=coordinator.world_size)\n model.eval()\n\n def evaluate_subset(dataloader: DataLoader):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n\n accum_loss = torch.zeros(1, device=get_current_device())\n for batch in dataloader:\n batch = move_to_cuda(batch)\n labels = batch[\"labels\"]\n if use_pipeline:\n pg_mesh = booster.plugin.pg_mesh\n pp_group = booster.plugin.pp_group\n current_pp_group_ranks = pg_mesh.get_ranks_in_group(pp_group)\n current_rank = dist.get_rank()\n batch = iter([batch])\n outputs = booster.execute_pipeline(batch, model, criterion, return_loss=True, return_outputs=True)\n\n if is_pp_last_stage:\n logits = outputs[\"outputs\"][\"logits\"]\n val_loss = outputs[\"loss\"]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n dist.broadcast_object_list([preds, val_loss], src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=preds, references=labels)\n elif current_rank in current_pp_group_ranks:\n object_list = [None, None]\n dist.broadcast_object_list(object_list, src=current_pp_group_ranks[-1], group=pp_group)\n\n metric.add_batch(predictions=object_list[0].to(get_current_device()), references=labels)\n accum_loss.add_(object_list[1].to(get_current_device()))\n\n else:\n batch = move_to_cuda(batch)\n outputs = model(**batch)\n val_loss, logits = outputs[:2]\n accum_loss.add_(val_loss)\n\n if num_labels > 1:\n preds = torch.argmax(logits, axis=1)\n elif num_labels == 1:\n preds = logits.squeeze()\n\n metric.add_batch(predictions=preds, references=labels)\n\n results = metric.compute()\n dist.all_reduce(accum_loss.div_(len(dataloader)))\n if coordinator.is_master() and results is not None:\n results[\"loss\"] = accum_loss.item() / coordinator.world_size\n\n return results\n\n if isinstance(test_dataloader, DataLoader):\n return evaluate_subset(test_dataloader)\n else:\n assert len(test_dataloader) == len(eval_splits)\n final_results = {}\n for split, sub_loader in zip(eval_splits, test_dataloader):\n results = evaluate_subset(sub_loader)\n final_results.update({f\"{k}_{split}\": v for k, v in results.items()})\n return final_results\n\n\ndef train_epoch(\n epoch: int,\n model: nn.Module,\n optimizer: Optimizer,\n _criterion: Callable,\n lr_scheduler: LRScheduler,\n train_dataloader: DataLoader,\n booster: Booster,\n coordinator: DistCoordinator,\n):\n use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1\n is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()\n print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)\n total_step = len(train_dataloader)\n\n model.train()\n optimizer.zero_grad()\n train_dataloader_iter = iter(train_dataloader)\n with tqdm(range(total_step), desc=f\"Epoch [{epoch + 1}/{NUM_EPOCHS}]\", disable=not print_flag) as pbar:\n # Forward pass\n for _ in pbar:\n if use_pipeline:\n outputs = booster.execute_pipeline(\n train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True\n )\n # Backward and optimize\n if is_pp_last_stage:\n loss = outputs[\"loss\"]\n pbar.set_postfix({\"loss\": loss.item()})\n else:\n data = next(train_dataloader_iter)\n data = move_to_cuda(data)\n outputs = model(**data)\n loss = _criterion(outputs, None)\n # Backward\n booster.backward(loss, optimizer)\n pbar.set_postfix({\"loss\": loss.item()})\n\n optimizer.step()\n optimizer.zero_grad()\n lr_scheduler.step()\n\n\ndef main():\n # ==============================\n # Parse Arguments\n # ==============================\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--task\", default=\"mrpc\", help=\"GLUE task to run\")\n parser.add_argument(\n \"-p\",\n \"--plugin\",\n type=str,\n default=\"torch_ddp\",\n choices=[\"torch_ddp\", \"torch_ddp_fp16\", \"gemini\", \"low_level_zero\", \"hybrid_parallel\"],\n help=\"plugin to use\",\n )\n parser.add_argument(\n \"--model_type\",\n type=str,\n default=\"bert\",\n help=\"bert or albert\",\n )\n parser.add_argument(\"--target_f1\", type=float, default=None, help=\"target f1 score. Raise exception if not reached\")\n parser.add_argument(\"--use_lazy_init\", type=bool, default=False, help=\"for initiating lazy init context\")\n args = parser.parse_args()\n\n if args.model_type == \"bert\":\n model_name = \"bert-base-uncased\"\n elif args.model_type == \"albert\":\n model_name = \"albert-xxlarge-v2\"\n else:\n raise RuntimeError\n\n # ==============================\n # Launch Distributed Environment\n # ==============================\n colossalai.launch_from_torch(config={}, seed=42)\n coordinator = DistCoordinator()\n\n lr = LEARNING_RATE * coordinator.world_size\n\n # ==============================\n # Instantiate Plugin and Booster\n # ==============================\n booster_kwargs = {}\n if args.plugin == \"torch_ddp_fp16\":\n booster_kwargs[\"mixed_precision\"] = \"fp16\"\n if args.plugin.startswith(\"torch_ddp\"):\n plugin = TorchDDPPlugin()\n elif args.plugin == \"gemini\":\n plugin = GeminiPlugin(initial_scale=2**5)\n elif args.plugin == \"low_level_zero\":\n plugin = LowLevelZeroPlugin(initial_scale=2**5)\n elif args.plugin == \"hybrid_parallel\":\n # modify the param accordingly for finetuning test cases\n plugin = HybridParallelPlugin(\n tp_size=1,\n pp_size=2,\n num_microbatches=None,\n microbatch_size=1,\n enable_all_optimization=True,\n zero_stage=1,\n precision=\"fp16\",\n initial_scale=1,\n )\n\n booster = Booster(plugin=plugin, **booster_kwargs)\n\n # ==============================\n # Prepare Dataloader\n # ==============================\n data_builder = GLUEDataBuilder(\n model_name, plugin, args.task, train_batch_size=BATCH_SIZE, eval_batch_size=BATCH_SIZE\n )\n train_dataloader = data_builder.train_dataloader()\n test_dataloader = data_builder.test_dataloader()\n\n # ====================================\n # Prepare model, optimizer\n # ====================================\n # bert pretrained model\n\n cfg = AutoConfig.from_pretrained(model_name, num_labels=data_builder.num_labels)\n\n if model_name == \"bert-base-uncased\":\n model = BertForSequenceClassification.from_pretrained(model_name, config=cfg).cuda()\n elif model_name == \"albert-xxlarge-v2\":\n model = AlbertForSequenceClassification.from_pretrained(model_name, config=cfg)\n else:\n raise RuntimeError\n\n # optimizer\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": WEIGHT_DECAY,\n },\n {\n \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)\n\n # lr scheduler\n total_steps = len(train_dataloader) * NUM_EPOCHS\n num_warmup_steps = int(WARMUP_FRACTION * total_steps)\n lr_scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=num_warmup_steps,\n num_training_steps=total_steps,\n )\n\n def _criterion(outputs, inputs):\n outputs = output_transform_fn(outputs)\n loss = criterion(outputs)\n return loss\n\n # ==============================\n # Boost with ColossalAI\n # ==============================\n model, optimizer, _criterion, _, lr_scheduler = booster.boost(\n model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler\n )\n\n # ==============================\n # Train model\n # ==============================\n for epoch in range(NUM_EPOCHS):\n train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)\n\n results = evaluate_model(\n model,\n _criterion,\n test_dataloader,\n data_builder.num_labels,\n args.task,\n data_builder.eval_splits,\n booster,\n coordinator,\n )\n\n if coordinator.is_master():\n print(results)\n if args.target_f1 is not None and \"f1\" in results:\n assert results[\"f1\"] >= args.target_f1, f'f1 score {results[\"f1\"]} is lower than target {args.target_f1}'\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "examples/language/bert/finetune.py"}]}
| 3,826 | 171 |
gh_patches_debug_21331
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-426
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch to the canonical click plugins implementation
The plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.
Switch to the canonical click plugins implementation
The plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # Two environmental variables influence this script.
4 #
5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,
6 # libraries, and data files.
7 #
8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the
9 # source or binary distribution. This is essential when creating self-contained
10 # binary wheels.
11
12 import logging
13 import os
14 import pprint
15 import shutil
16 import subprocess
17 import sys
18
19 from setuptools import setup
20 from setuptools.extension import Extension
21
22 logging.basicConfig()
23 log = logging.getLogger()
24
25 # python -W all setup.py ...
26 if 'all' in sys.warnoptions:
27 log.level = logging.DEBUG
28
29 def check_output(cmd):
30 # since subprocess.check_output doesn't exist in 2.6
31 # we wrap it here.
32 try:
33 out = subprocess.check_output(cmd)
34 return out.decode('utf')
35 except AttributeError:
36 # For some reasone check_output doesn't exist
37 # So fall back on Popen
38 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
39 out, err = p.communicate()
40 return out
41
42 def copy_data_tree(datadir, destdir):
43 try:
44 shutil.rmtree(destdir)
45 except OSError:
46 pass
47 shutil.copytree(datadir, destdir)
48
49 # Parse the version from the rasterio module.
50 with open('rasterio/__init__.py') as f:
51 for line in f:
52 if line.find("__version__") >= 0:
53 version = line.split("=")[1].strip()
54 version = version.strip('"')
55 version = version.strip("'")
56 continue
57
58 with open('VERSION.txt', 'w') as f:
59 f.write(version)
60
61 # Use Cython if available.
62 try:
63 from Cython.Build import cythonize
64 except ImportError:
65 cythonize = None
66
67 # By default we'll try to get options via gdal-config. On systems without,
68 # options will need to be set in setup.cfg or on the setup command line.
69 include_dirs = []
70 library_dirs = []
71 libraries = []
72 extra_link_args = []
73 gdal_output = [None]*3
74
75 try:
76 import numpy
77 include_dirs.append(numpy.get_include())
78 except ImportError:
79 log.critical("Numpy and its headers are required to run setup(). Exiting.")
80 sys.exit(1)
81
82 try:
83 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')
84 for i, flag in enumerate(("--cflags", "--libs", "--datadir")):
85 gdal_output[i] = check_output([gdal_config, flag]).strip()
86
87 for item in gdal_output[0].split():
88 if item.startswith("-I"):
89 include_dirs.extend(item[2:].split(":"))
90 for item in gdal_output[1].split():
91 if item.startswith("-L"):
92 library_dirs.extend(item[2:].split(":"))
93 elif item.startswith("-l"):
94 libraries.append(item[2:])
95 else:
96 # e.g. -framework GDAL
97 extra_link_args.append(item)
98
99 except Exception as e:
100 if os.name == "nt":
101 log.info(("Building on Windows requires extra options to setup.py to locate needed GDAL files.\n"
102 "More information is available in the README."))
103 else:
104 log.warning("Failed to get options via gdal-config: %s", str(e))
105
106
107 # Conditionally copy the GDAL data. To be used in conjunction with
108 # the bdist_wheel command to make self-contained binary wheels.
109 if os.environ.get('PACKAGE_DATA'):
110 destdir = 'rasterio/gdal_data'
111 if gdal_output[2]:
112 log.info("Copying gdal data from %s" % gdal_output[2])
113 copy_data_tree(gdal_output[2], destdir)
114 else:
115 # check to see if GDAL_DATA is defined
116 gdal_data = os.environ.get('GDAL_DATA', None)
117 if gdal_data:
118 log.info("Copying gdal_data from %s" % gdal_data)
119 copy_data_tree(gdal_data, destdir)
120
121 # Conditionally copy PROJ.4 data.
122 projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')
123 if os.path.exists(projdatadir):
124 log.info("Copying proj_data from %s" % projdatadir)
125 copy_data_tree(projdatadir, 'rasterio/proj_data')
126
127 ext_options = dict(
128 include_dirs=include_dirs,
129 library_dirs=library_dirs,
130 libraries=libraries,
131 extra_link_args=extra_link_args)
132
133 if not os.name == "nt":
134 # These options fail on Windows if using Visual Studio
135 ext_options['extra_compile_args'] = ['-Wno-unused-parameter',
136 '-Wno-unused-function']
137
138 log.debug('ext_options:\n%s', pprint.pformat(ext_options))
139
140 # When building from a repo, Cython is required.
141 if os.path.exists("MANIFEST.in") and "clean" not in sys.argv:
142 log.info("MANIFEST.in found, presume a repo, cythonizing...")
143 if not cythonize:
144 log.critical(
145 "Cython.Build.cythonize not found. "
146 "Cython is required to build from a repo.")
147 sys.exit(1)
148 ext_modules = cythonize([
149 Extension(
150 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),
151 Extension(
152 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),
153 Extension(
154 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),
155 Extension(
156 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),
157 Extension(
158 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),
159 Extension(
160 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),
161 Extension(
162 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),
163 Extension(
164 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),
165 Extension(
166 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),
167 ], quiet=True)
168
169 # If there's no manifest template, as in an sdist, we just specify .c files.
170 else:
171 ext_modules = [
172 Extension(
173 'rasterio._base', ['rasterio/_base.c'], **ext_options),
174 Extension(
175 'rasterio._io', ['rasterio/_io.c'], **ext_options),
176 Extension(
177 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),
178 Extension(
179 'rasterio._features', ['rasterio/_features.c'], **ext_options),
180 Extension(
181 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),
182 Extension(
183 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),
184 Extension(
185 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),
186 Extension(
187 'rasterio._err', ['rasterio/_err.c'], **ext_options),
188 Extension(
189 'rasterio._example', ['rasterio/_example.c'], **ext_options),
190 ]
191
192 with open('README.rst') as f:
193 readme = f.read()
194
195 # Runtime requirements.
196 inst_reqs = [
197 'affine>=1.0',
198 'cligj>=0.2.0',
199 'Numpy>=1.7',
200 'snuggs>=1.3.1']
201
202 if sys.version_info < (3, 4):
203 inst_reqs.append('enum34')
204
205 setup_args = dict(
206 name='rasterio',
207 version=version,
208 description="Fast and direct raster I/O for use with Numpy and SciPy",
209 long_description=readme,
210 classifiers=[
211 'Development Status :: 4 - Beta',
212 'Intended Audience :: Developers',
213 'Intended Audience :: Information Technology',
214 'Intended Audience :: Science/Research',
215 'License :: OSI Approved :: BSD License',
216 'Programming Language :: C',
217 'Programming Language :: Python :: 2.6',
218 'Programming Language :: Python :: 2.7',
219 'Programming Language :: Python :: 3.3',
220 'Programming Language :: Python :: 3.4',
221 'Topic :: Multimedia :: Graphics :: Graphics Conversion',
222 'Topic :: Scientific/Engineering :: GIS'],
223 keywords='raster gdal',
224 author='Sean Gillies',
225 author_email='[email protected]',
226 url='https://github.com/mapbox/rasterio',
227 license='BSD',
228 package_dir={'': '.'},
229 packages=['rasterio', 'rasterio.rio'],
230 entry_points='''
231 [console_scripts]
232 rio=rasterio.rio.main:main_group
233
234 [rasterio.rio_commands]
235 bounds=rasterio.rio.features:bounds
236 calc=rasterio.rio.calc:calc
237 convert=rasterio.rio.convert:convert
238 edit-info=rasterio.rio.info:edit
239 env=rasterio.rio.info:env
240 info=rasterio.rio.info:info
241 insp=rasterio.rio.info:insp
242 mask=rasterio.rio.features:mask
243 merge=rasterio.rio.merge:merge
244 overview=rasterio.rio.overview:overview
245 rasterize=rasterio.rio.features:rasterize
246 sample=rasterio.rio.sample:sample
247 shapes=rasterio.rio.features:shapes
248 stack=rasterio.rio.bands:stack
249 warp=rasterio.rio.warp:warp
250 transform=rasterio.rio.info:transform
251 ''',
252 include_package_data=True,
253 ext_modules=ext_modules,
254 zip_safe=False,
255 install_requires=inst_reqs,
256 extras_require={
257 'ipython': ['ipython>=2.0']})
258
259 if os.environ.get('PACKAGE_DATA'):
260 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}
261
262 setup(**setup_args)
263
[end of setup.py]
[start of rasterio/rio/main.py]
1 """
2 Main click group for CLI
3 """
4
5
6 import logging
7 from pkg_resources import iter_entry_points
8 import sys
9
10 import click
11 import cligj
12 import cligj.plugins
13
14 import rasterio
15
16
17 def configure_logging(verbosity):
18 log_level = max(10, 30 - 10*verbosity)
19 logging.basicConfig(stream=sys.stderr, level=log_level)
20
21
22 @cligj.plugins.group(plugins=(
23 ep for ep in list(iter_entry_points('rasterio.rio_commands')) +
24 list(iter_entry_points('rasterio.rio_plugins'))))
25 @cligj.verbose_opt
26 @cligj.quiet_opt
27 @click.version_option(version=rasterio.__version__, message='%(version)s')
28 @click.pass_context
29 def main_group(ctx, verbose, quiet):
30
31 """
32 Rasterio command line interface.
33 """
34
35 verbosity = verbose - quiet
36 configure_logging(verbosity)
37 ctx.obj = {}
38 ctx.obj['verbosity'] = verbosity
39
[end of rasterio/rio/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py
--- a/rasterio/rio/main.py
+++ b/rasterio/rio/main.py
@@ -8,8 +8,8 @@
import sys
import click
+from click_plugins import with_plugins
import cligj
-import cligj.plugins
import rasterio
@@ -19,9 +19,9 @@
logging.basicConfig(stream=sys.stderr, level=log_level)
[email protected](plugins=(
- ep for ep in list(iter_entry_points('rasterio.rio_commands')) +
- list(iter_entry_points('rasterio.rio_plugins'))))
+@with_plugins(ep for ep in list(iter_entry_points('rasterio.rio_commands')) +
+ list(iter_entry_points('rasterio.rio_plugins')))
[email protected]()
@cligj.verbose_opt
@cligj.quiet_opt
@click.version_option(version=rasterio.__version__, message='%(version)s')
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -197,7 +197,8 @@
'affine>=1.0',
'cligj>=0.2.0',
'Numpy>=1.7',
- 'snuggs>=1.3.1']
+ 'snuggs>=1.3.1',
+ 'click-plugins']
if sys.version_info < (3, 4):
inst_reqs.append('enum34')
|
{"golden_diff": "diff --git a/rasterio/rio/main.py b/rasterio/rio/main.py\n--- a/rasterio/rio/main.py\n+++ b/rasterio/rio/main.py\n@@ -8,8 +8,8 @@\n import sys\n \n import click\n+from click_plugins import with_plugins\n import cligj\n-import cligj.plugins\n \n import rasterio\n \n@@ -19,9 +19,9 @@\n logging.basicConfig(stream=sys.stderr, level=log_level)\n \n \[email protected](plugins=(\n- ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n- list(iter_entry_points('rasterio.rio_plugins'))))\n+@with_plugins(ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n+ list(iter_entry_points('rasterio.rio_plugins')))\[email protected]()\n @cligj.verbose_opt\n @cligj.quiet_opt\n @click.version_option(version=rasterio.__version__, message='%(version)s')\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -197,7 +197,8 @@\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n- 'snuggs>=1.3.1']\n+ 'snuggs>=1.3.1',\n+ 'click-plugins']\n \n if sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n", "issue": "Switch to the canonical click plugins implementation\nThe plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.\n\nSwitch to the canonical click plugins implementation\nThe plugin functionality from `cligj.plugins` landed in its own click extension package called [click-plugins](https://github.com/click-contrib/click-plugins). The cligj implementation will be deprecated according to https://github.com/mapbox/cligj/issues/6.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport logging\nimport os\nimport pprint\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig()\nlog = logging.getLogger()\n\n# python -W all setup.py ...\nif 'all' in sys.warnoptions:\n log.level = logging.DEBUG\n\ndef check_output(cmd):\n # since subprocess.check_output doesn't exist in 2.6\n # we wrap it here.\n try:\n out = subprocess.check_output(cmd)\n return out.decode('utf')\n except AttributeError:\n # For some reasone check_output doesn't exist\n # So fall back on Popen\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n# Parse the version from the rasterio module.\nwith open('rasterio/__init__.py') as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open('VERSION.txt', 'w') as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n cythonize = None\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal_output = [None]*3\n\ntry:\n import numpy\n include_dirs.append(numpy.get_include())\nexcept ImportError:\n log.critical(\"Numpy and its headers are required to run setup(). Exiting.\")\n sys.exit(1)\n\ntry:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\")):\n gdal_output[i] = check_output([gdal_config, flag]).strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n\nexcept Exception as e:\n if os.name == \"nt\":\n log.info((\"Building on Windows requires extra options to setup.py to locate needed GDAL files.\\n\"\n \"More information is available in the README.\"))\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ.4 data.\n projdatadir = os.environ.get('PROJ_LIB', '/usr/local/share/proj')\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\next_options = dict(\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries,\n extra_link_args=extra_link_args)\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\n# When building from a repo, Cython is required.\nif os.path.exists(\"MANIFEST.in\") and \"clean\" not in sys.argv:\n log.info(\"MANIFEST.in found, presume a repo, cythonizing...\")\n if not cythonize:\n log.critical(\n \"Cython.Build.cythonize not found. \"\n \"Cython is required to build from a repo.\")\n sys.exit(1)\n ext_modules = cythonize([\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n ], quiet=True)\n\n# If there's no manifest template, as in an sdist, we just specify .c files.\nelse:\n ext_modules = [\n Extension(\n 'rasterio._base', ['rasterio/_base.c'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.c'], **ext_options),\n Extension(\n 'rasterio._copy', ['rasterio/_copy.c'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.c'], **ext_options),\n Extension(\n 'rasterio._drivers', ['rasterio/_drivers.c'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.cpp'], **ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.cpp', 'rasterio/rasterfill.cpp'], **ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.c'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.c'], **ext_options),\n ]\n\nwith open('README.rst') as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n 'affine>=1.0',\n 'cligj>=0.2.0',\n 'Numpy>=1.7',\n 'snuggs>=1.3.1']\n\nif sys.version_info < (3, 4):\n inst_reqs.append('enum34')\n\nsetup_args = dict(\n name='rasterio',\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: C',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Multimedia :: Graphics :: Graphics Conversion',\n 'Topic :: Scientific/Engineering :: GIS'],\n keywords='raster gdal',\n author='Sean Gillies',\n author_email='[email protected]',\n url='https://github.com/mapbox/rasterio',\n license='BSD',\n package_dir={'': '.'},\n packages=['rasterio', 'rasterio.rio'],\n entry_points='''\n [console_scripts]\n rio=rasterio.rio.main:main_group\n\n [rasterio.rio_commands]\n bounds=rasterio.rio.features:bounds\n calc=rasterio.rio.calc:calc\n convert=rasterio.rio.convert:convert\n edit-info=rasterio.rio.info:edit\n env=rasterio.rio.info:env\n info=rasterio.rio.info:info\n insp=rasterio.rio.info:insp\n mask=rasterio.rio.features:mask\n merge=rasterio.rio.merge:merge\n overview=rasterio.rio.overview:overview\n rasterize=rasterio.rio.features:rasterize\n sample=rasterio.rio.sample:sample\n shapes=rasterio.rio.features:shapes\n stack=rasterio.rio.bands:stack\n warp=rasterio.rio.warp:warp\n transform=rasterio.rio.info:transform\n ''',\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require={\n 'ipython': ['ipython>=2.0']})\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}, {"content": "\"\"\"\nMain click group for CLI\n\"\"\"\n\n\nimport logging\nfrom pkg_resources import iter_entry_points\nimport sys\n\nimport click\nimport cligj\nimport cligj.plugins\n\nimport rasterio\n\n\ndef configure_logging(verbosity):\n log_level = max(10, 30 - 10*verbosity)\n logging.basicConfig(stream=sys.stderr, level=log_level)\n\n\[email protected](plugins=(\n ep for ep in list(iter_entry_points('rasterio.rio_commands')) +\n list(iter_entry_points('rasterio.rio_plugins'))))\[email protected]_opt\[email protected]_opt\[email protected]_option(version=rasterio.__version__, message='%(version)s')\[email protected]_context\ndef main_group(ctx, verbose, quiet):\n\n \"\"\"\n Rasterio command line interface.\n \"\"\"\n\n verbosity = verbose - quiet\n configure_logging(verbosity)\n ctx.obj = {}\n ctx.obj['verbosity'] = verbosity\n", "path": "rasterio/rio/main.py"}]}
| 3,898 | 337 |
gh_patches_debug_23752
|
rasdani/github-patches
|
git_diff
|
Azure__azure-cli-extensions-3136
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Transition to GA: databox
Command module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.
Please check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.
</issue>
<code>
[start of src/databox/azext_databox/commands.py]
1 # --------------------------------------------------------------------------------------------
2 # Copyright (c) Microsoft Corporation. All rights reserved.
3 # Licensed under the MIT License. See License.txt in the project root for license information.
4 # --------------------------------------------------------------------------------------------
5
6 # pylint: disable=line-too-long
7 # pylint: disable=too-many-lines
8 # pylint: disable=too-many-statements
9 # pylint: disable=too-many-locals
10 from azext_databox._validators import validate_create_input_parameters
11 from azure.cli.core.commands import CliCommandType
12
13
14 def load_command_table(self, _):
15
16 from azext_databox._client_factory import cf_jobs
17 databox_jobs = CliCommandType(
18 operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
19 client_factory=cf_jobs)
20 with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
21 g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
22 g.custom_command('update', 'update_databox_job')
23 g.custom_command('delete', 'delete_databox_job', confirmation=True)
24 g.custom_show_command('show', 'get_databox_job')
25 g.custom_command('list', 'list_databox_job')
26 g.custom_command('cancel', 'cancel_databox_job', confirmation=True)
27 g.custom_command('list-credentials', 'list_credentials_databox_job')
28
[end of src/databox/azext_databox/commands.py]
[start of src/databox/setup.py]
1 #!/usr/bin/env python
2
3 # --------------------------------------------------------------------------------------------
4 # Copyright (c) Microsoft Corporation. All rights reserved.
5 # Licensed under the MIT License. See License.txt in the project root for license information.
6 # --------------------------------------------------------------------------------------------
7
8
9 from codecs import open
10 from setuptools import setup, find_packages
11 try:
12 from azure_bdist_wheel import cmdclass
13 except ImportError:
14 from distutils import log as logger
15 logger.warn("Wheel is not available, disabling bdist_wheel hook")
16
17 # TODO: Confirm this is the right version number you want and it matches your
18 # HISTORY.rst entry.
19 VERSION = '0.1.0'
20
21 # The full list of classifiers is available at
22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers
23 CLASSIFIERS = [
24 'Development Status :: 4 - Beta',
25 'Intended Audience :: Developers',
26 'Intended Audience :: System Administrators',
27 'Programming Language :: Python',
28 'Programming Language :: Python :: 3',
29 'Programming Language :: Python :: 3.4',
30 'Programming Language :: Python :: 3.5',
31 'Programming Language :: Python :: 3.6',
32 'Programming Language :: Python :: 3.7',
33 'Programming Language :: Python :: 3.8',
34 'License :: OSI Approved :: MIT License',
35 ]
36
37 # TODO: Add any additional SDK dependencies here
38 DEPENDENCIES = []
39
40 with open('README.md', 'r', encoding='utf-8') as f:
41 README = f.read()
42 with open('HISTORY.rst', 'r', encoding='utf-8') as f:
43 HISTORY = f.read()
44
45 setup(
46 name='databox',
47 version=VERSION,
48 description='Microsoft Azure Command-Line Tools DataBox Extension',
49 # TODO: Update author and email, if applicable
50 author='Microsoft Corporation',
51 author_email='[email protected]',
52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',
53 long_description=README + '\n\n' + HISTORY,
54 license='MIT',
55 classifiers=CLASSIFIERS,
56 packages=find_packages(),
57 install_requires=DEPENDENCIES,
58 package_data={'azext_databox': ['azext_metadata.json']},
59 )
60
[end of src/databox/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py
--- a/src/databox/azext_databox/commands.py
+++ b/src/databox/azext_databox/commands.py
@@ -17,7 +17,7 @@
databox_jobs = CliCommandType(
operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',
client_factory=cf_jobs)
- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:
+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:
g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)
g.custom_command('update', 'update_databox_job')
g.custom_command('delete', 'delete_databox_job', confirmation=True)
diff --git a/src/databox/setup.py b/src/databox/setup.py
--- a/src/databox/setup.py
+++ b/src/databox/setup.py
@@ -16,7 +16,7 @@
# TODO: Confirm this is the right version number you want and it matches your
# HISTORY.rst entry.
-VERSION = '0.1.0'
+VERSION = '0.1.1'
# The full list of classifiers is available at
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
|
{"golden_diff": "diff --git a/src/databox/azext_databox/commands.py b/src/databox/azext_databox/commands.py\n--- a/src/databox/azext_databox/commands.py\n+++ b/src/databox/azext_databox/commands.py\n@@ -17,7 +17,7 @@\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n- with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n+ with self.command_group('databox job', databox_jobs, client_factory=cf_jobs) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\ndiff --git a/src/databox/setup.py b/src/databox/setup.py\n--- a/src/databox/setup.py\n+++ b/src/databox/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.0'\n+VERSION = '0.1.1'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "Transition to GA: databox\nCommand module `databox` has been released for a long time and is using stable sdk version `2019-09-01`.\r\n\r\nPlease check [Extension GA guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/onboarding_guide.md#preview-extension-to-ga-extension) and remove `experimental` tag if necessary.\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n# pylint: disable=too-many-locals\nfrom azext_databox._validators import validate_create_input_parameters\nfrom azure.cli.core.commands import CliCommandType\n\n\ndef load_command_table(self, _):\n\n from azext_databox._client_factory import cf_jobs\n databox_jobs = CliCommandType(\n operations_tmpl='azext_databox.vendored_sdks.databox.operations._jobs_operations#JobsOperations.{}',\n client_factory=cf_jobs)\n with self.command_group('databox job', databox_jobs, client_factory=cf_jobs, is_experimental=True) as g:\n g.custom_command('create', 'create_databox_job', validator=validate_create_input_parameters)\n g.custom_command('update', 'update_databox_job')\n g.custom_command('delete', 'delete_databox_job', confirmation=True)\n g.custom_show_command('show', 'get_databox_job')\n g.custom_command('list', 'list_databox_job')\n g.custom_command('cancel', 'cancel_databox_job', confirmation=True)\n g.custom_command('list-credentials', 'list_credentials_databox_job')\n", "path": "src/databox/azext_databox/commands.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='databox',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools DataBox Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/databox',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_databox': ['azext_metadata.json']},\n)\n", "path": "src/databox/setup.py"}]}
| 1,601 | 347 |
gh_patches_debug_5162
|
rasdani/github-patches
|
git_diff
|
pretix__pretix-558
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Voucher not being counted correctly
In the voucher by tag overview, the number sometimes just don't add up ;-)

So the progressbar is showing nothing, because it's class is set to 200%. Okay, that's kinda expected to not work ;-)
Explanation for the numbers:
- 2: Two vouchers are created with this tag
- 4: Those two vouchers have been redeem twice each.
So I guess we have to decide if the overview should show the amount of vouchers or the amount of possible voucher redemptions.
</issue>
<code>
[start of src/pretix/control/views/vouchers.py]
1 import csv
2 import io
3
4 from django.conf import settings
5 from django.contrib import messages
6 from django.core.urlresolvers import resolve, reverse
7 from django.db import transaction
8 from django.db.models import Count, Q, Sum
9 from django.http import (
10 Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,
11 JsonResponse,
12 )
13 from django.utils.timezone import now
14 from django.utils.translation import ugettext_lazy as _
15 from django.views.generic import (
16 CreateView, DeleteView, ListView, TemplateView, UpdateView, View,
17 )
18
19 from pretix.base.models import Voucher
20 from pretix.base.models.vouchers import _generate_random_code
21 from pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm
22 from pretix.control.permissions import EventPermissionRequiredMixin
23 from pretix.control.signals import voucher_form_class
24
25
26 class VoucherList(EventPermissionRequiredMixin, ListView):
27 model = Voucher
28 context_object_name = 'vouchers'
29 paginate_by = 30
30 template_name = 'pretixcontrol/vouchers/index.html'
31 permission = 'can_view_vouchers'
32
33 def get_queryset(self):
34 qs = self.request.event.vouchers.all().select_related('item', 'variation')
35 if self.request.GET.get("search", "") != "":
36 s = self.request.GET.get("search", "").strip()
37 qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))
38 if self.request.GET.get("tag", "") != "":
39 s = self.request.GET.get("tag", "")
40 qs = qs.filter(tag__icontains=s)
41 if self.request.GET.get("status", "") != "":
42 s = self.request.GET.get("status", "")
43 if s == 'v':
44 qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=0)
45 elif s == 'r':
46 qs = qs.filter(redeemed__gt=0)
47 elif s == 'e':
48 qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=0)
49 if self.request.GET.get("subevent", "") != "":
50 s = self.request.GET.get("subevent", "")
51 qs = qs.filter(subevent_id=s)
52 return qs
53
54 def get(self, request, *args, **kwargs):
55 if request.GET.get("download", "") == "yes":
56 return self._download_csv()
57 return super().get(request, *args, **kwargs)
58
59 def _download_csv(self):
60 output = io.StringIO()
61 writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=",")
62
63 headers = [
64 _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),
65 _('Price effect'), _('Value'), _('Tag'), _('Redeemed'), _('Maximum usages')
66 ]
67 writer.writerow(headers)
68
69 for v in self.get_queryset():
70 if v.item:
71 if v.variation:
72 prod = '%s – %s' % (str(v.item.name), str(v.variation.name))
73 else:
74 prod = '%s' % str(v.item.name)
75 elif v.quota:
76 prod = _('Any product in quota "{quota}"').format(quota=str(v.quota.name))
77 row = [
78 v.code,
79 v.valid_until.isoformat() if v.valid_until else "",
80 prod,
81 _("Yes") if v.block_quota else _("No"),
82 _("Yes") if v.allow_ignore_quota else _("No"),
83 v.get_price_mode_display(),
84 str(v.value) if v.value is not None else "",
85 v.tag,
86 str(v.redeemed),
87 str(v.max_usages)
88 ]
89 writer.writerow(row)
90
91 r = HttpResponse(output.getvalue().encode("utf-8"), content_type='text/csv')
92 r['Content-Disposition'] = 'attachment; filename="vouchers.csv"'
93 return r
94
95
96 class VoucherTags(EventPermissionRequiredMixin, TemplateView):
97 template_name = 'pretixcontrol/vouchers/tags.html'
98 permission = 'can_view_vouchers'
99
100 def get_context_data(self, **kwargs):
101 ctx = super().get_context_data(**kwargs)
102
103 tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(
104 total=Count('id'),
105 redeemed=Sum('redeemed')
106 )
107 for t in tags:
108 t['percentage'] = int((t['redeemed'] / t['total']) * 100)
109
110 ctx['tags'] = tags
111 return ctx
112
113
114 class VoucherDelete(EventPermissionRequiredMixin, DeleteView):
115 model = Voucher
116 template_name = 'pretixcontrol/vouchers/delete.html'
117 permission = 'can_change_vouchers'
118 context_object_name = 'voucher'
119
120 def get_object(self, queryset=None) -> Voucher:
121 try:
122 return self.request.event.vouchers.get(
123 id=self.kwargs['voucher']
124 )
125 except Voucher.DoesNotExist:
126 raise Http404(_("The requested voucher does not exist."))
127
128 def get(self, request, *args, **kwargs):
129 if self.get_object().redeemed > 0:
130 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
131 return HttpResponseRedirect(self.get_success_url())
132 return super().get(request, *args, **kwargs)
133
134 @transaction.atomic
135 def delete(self, request, *args, **kwargs):
136 self.object = self.get_object()
137 success_url = self.get_success_url()
138
139 if self.object.redeemed > 0:
140 messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))
141 else:
142 self.object.log_action('pretix.voucher.deleted', user=self.request.user)
143 self.object.delete()
144 messages.success(request, _('The selected voucher has been deleted.'))
145 return HttpResponseRedirect(success_url)
146
147 def get_success_url(self) -> str:
148 return reverse('control:event.vouchers', kwargs={
149 'organizer': self.request.event.organizer.slug,
150 'event': self.request.event.slug,
151 })
152
153
154 class VoucherUpdate(EventPermissionRequiredMixin, UpdateView):
155 model = Voucher
156 template_name = 'pretixcontrol/vouchers/detail.html'
157 permission = 'can_change_vouchers'
158 context_object_name = 'voucher'
159
160 def get_form_class(self):
161 form_class = VoucherForm
162 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
163 if response:
164 form_class = response
165 return form_class
166
167 def get_object(self, queryset=None) -> VoucherForm:
168 url = resolve(self.request.path_info)
169 try:
170 return self.request.event.vouchers.get(
171 id=url.kwargs['voucher']
172 )
173 except Voucher.DoesNotExist:
174 raise Http404(_("The requested voucher does not exist."))
175
176 @transaction.atomic
177 def form_valid(self, form):
178 messages.success(self.request, _('Your changes have been saved.'))
179 if form.has_changed():
180 self.object.log_action(
181 'pretix.voucher.changed', user=self.request.user, data={
182 k: form.cleaned_data.get(k) for k in form.changed_data
183 }
184 )
185 return super().form_valid(form)
186
187 def get_success_url(self) -> str:
188 return reverse('control:event.vouchers', kwargs={
189 'organizer': self.request.event.organizer.slug,
190 'event': self.request.event.slug,
191 })
192
193
194 class VoucherCreate(EventPermissionRequiredMixin, CreateView):
195 model = Voucher
196 template_name = 'pretixcontrol/vouchers/detail.html'
197 permission = 'can_change_vouchers'
198 context_object_name = 'voucher'
199
200 def get_form_class(self):
201 form_class = VoucherForm
202 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
203 if response:
204 form_class = response
205 return form_class
206
207 def get_success_url(self) -> str:
208 return reverse('control:event.vouchers', kwargs={
209 'organizer': self.request.event.organizer.slug,
210 'event': self.request.event.slug,
211 })
212
213 def get_form_kwargs(self):
214 kwargs = super().get_form_kwargs()
215 kwargs['instance'] = Voucher(event=self.request.event)
216 return kwargs
217
218 @transaction.atomic
219 def form_valid(self, form):
220 form.instance.event = self.request.event
221 messages.success(self.request, _('The new voucher has been created: {code}').format(code=form.instance.code))
222 ret = super().form_valid(form)
223 form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)
224 return ret
225
226 def post(self, request, *args, **kwargs):
227 # TODO: Transform this into an asynchronous call?
228 with request.event.lock():
229 return super().post(request, *args, **kwargs)
230
231
232 class VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):
233 model = Voucher
234 template_name = 'pretixcontrol/vouchers/bulk.html'
235 permission = 'can_change_vouchers'
236 context_object_name = 'voucher'
237
238 def get_success_url(self) -> str:
239 return reverse('control:event.vouchers', kwargs={
240 'organizer': self.request.event.organizer.slug,
241 'event': self.request.event.slug,
242 })
243
244 def get_form_kwargs(self):
245 kwargs = super().get_form_kwargs()
246 kwargs['instance'] = Voucher(event=self.request.event)
247 return kwargs
248
249 @transaction.atomic
250 def form_valid(self, form):
251 for o in form.save(self.request.event):
252 o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)
253 messages.success(self.request, _('The new vouchers have been created.'))
254 return HttpResponseRedirect(self.get_success_url())
255
256 def get_form_class(self):
257 form_class = VoucherBulkForm
258 for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):
259 if response:
260 form_class = response
261 return form_class
262
263 def get_context_data(self, **kwargs):
264 ctx = super().get_context_data(**kwargs)
265 ctx['code_length'] = settings.ENTROPY['voucher_code']
266 return ctx
267
268 def post(self, request, *args, **kwargs):
269 # TODO: Transform this into an asynchronous call?
270 with request.event.lock():
271 return super().post(request, *args, **kwargs)
272
273
274 class VoucherRNG(EventPermissionRequiredMixin, View):
275 permission = 'can_change_vouchers'
276
277 def get(self, request, *args, **kwargs):
278 codes = set()
279 try:
280 num = int(request.GET.get('num', '5'))
281 except ValueError: # NOQA
282 return HttpResponseBadRequest()
283
284 prefix = request.GET.get('prefix')
285 while len(codes) < num:
286 new_codes = set()
287 for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER
288 new_codes.add(_generate_random_code(prefix=prefix))
289 new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])
290 codes |= new_codes
291
292 return JsonResponse({
293 'codes': list(codes)
294 })
295
296 def get_success_url(self) -> str:
297 return reverse('control:event.vouchers', kwargs={
298 'organizer': self.request.event.organizer.slug,
299 'event': self.request.event.slug,
300 })
301
[end of src/pretix/control/views/vouchers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py
--- a/src/pretix/control/views/vouchers.py
+++ b/src/pretix/control/views/vouchers.py
@@ -101,7 +101,7 @@
ctx = super().get_context_data(**kwargs)
tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(
- total=Count('id'),
+ total=Sum('max_usages'),
redeemed=Sum('redeemed')
)
for t in tags:
|
{"golden_diff": "diff --git a/src/pretix/control/views/vouchers.py b/src/pretix/control/views/vouchers.py\n--- a/src/pretix/control/views/vouchers.py\n+++ b/src/pretix/control/views/vouchers.py\n@@ -101,7 +101,7 @@\n ctx = super().get_context_data(**kwargs)\n \n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n- total=Count('id'),\n+ total=Sum('max_usages'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n", "issue": "Voucher not being counted correctly\nIn the voucher by tag overview, the number sometimes just don't add up ;-)\r\n\r\n\r\n\r\nSo the progressbar is showing nothing, because it's class is set to 200%. Okay, that's kinda expected to not work ;-)\r\n\r\nExplanation for the numbers:\r\n- 2: Two vouchers are created with this tag\r\n- 4: Those two vouchers have been redeem twice each.\r\n\r\nSo I guess we have to decide if the overview should show the amount of vouchers or the amount of possible voucher redemptions.\n", "before_files": [{"content": "import csv\nimport io\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.urlresolvers import resolve, reverse\nfrom django.db import transaction\nfrom django.db.models import Count, Q, Sum\nfrom django.http import (\n Http404, HttpResponse, HttpResponseBadRequest, HttpResponseRedirect,\n JsonResponse,\n)\nfrom django.utils.timezone import now\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (\n CreateView, DeleteView, ListView, TemplateView, UpdateView, View,\n)\n\nfrom pretix.base.models import Voucher\nfrom pretix.base.models.vouchers import _generate_random_code\nfrom pretix.control.forms.vouchers import VoucherBulkForm, VoucherForm\nfrom pretix.control.permissions import EventPermissionRequiredMixin\nfrom pretix.control.signals import voucher_form_class\n\n\nclass VoucherList(EventPermissionRequiredMixin, ListView):\n model = Voucher\n context_object_name = 'vouchers'\n paginate_by = 30\n template_name = 'pretixcontrol/vouchers/index.html'\n permission = 'can_view_vouchers'\n\n def get_queryset(self):\n qs = self.request.event.vouchers.all().select_related('item', 'variation')\n if self.request.GET.get(\"search\", \"\") != \"\":\n s = self.request.GET.get(\"search\", \"\").strip()\n qs = qs.filter(Q(code__icontains=s) | Q(tag__icontains=s) | Q(comment__icontains=s))\n if self.request.GET.get(\"tag\", \"\") != \"\":\n s = self.request.GET.get(\"tag\", \"\")\n qs = qs.filter(tag__icontains=s)\n if self.request.GET.get(\"status\", \"\") != \"\":\n s = self.request.GET.get(\"status\", \"\")\n if s == 'v':\n qs = qs.filter(Q(valid_until__isnull=True) | Q(valid_until__gt=now())).filter(redeemed=0)\n elif s == 'r':\n qs = qs.filter(redeemed__gt=0)\n elif s == 'e':\n qs = qs.filter(Q(valid_until__isnull=False) & Q(valid_until__lt=now())).filter(redeemed=0)\n if self.request.GET.get(\"subevent\", \"\") != \"\":\n s = self.request.GET.get(\"subevent\", \"\")\n qs = qs.filter(subevent_id=s)\n return qs\n\n def get(self, request, *args, **kwargs):\n if request.GET.get(\"download\", \"\") == \"yes\":\n return self._download_csv()\n return super().get(request, *args, **kwargs)\n\n def _download_csv(self):\n output = io.StringIO()\n writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC, delimiter=\",\")\n\n headers = [\n _('Voucher code'), _('Valid until'), _('Product'), _('Reserve quota'), _('Bypass quota'),\n _('Price effect'), _('Value'), _('Tag'), _('Redeemed'), _('Maximum usages')\n ]\n writer.writerow(headers)\n\n for v in self.get_queryset():\n if v.item:\n if v.variation:\n prod = '%s \u2013 %s' % (str(v.item.name), str(v.variation.name))\n else:\n prod = '%s' % str(v.item.name)\n elif v.quota:\n prod = _('Any product in quota \"{quota}\"').format(quota=str(v.quota.name))\n row = [\n v.code,\n v.valid_until.isoformat() if v.valid_until else \"\",\n prod,\n _(\"Yes\") if v.block_quota else _(\"No\"),\n _(\"Yes\") if v.allow_ignore_quota else _(\"No\"),\n v.get_price_mode_display(),\n str(v.value) if v.value is not None else \"\",\n v.tag,\n str(v.redeemed),\n str(v.max_usages)\n ]\n writer.writerow(row)\n\n r = HttpResponse(output.getvalue().encode(\"utf-8\"), content_type='text/csv')\n r['Content-Disposition'] = 'attachment; filename=\"vouchers.csv\"'\n return r\n\n\nclass VoucherTags(EventPermissionRequiredMixin, TemplateView):\n template_name = 'pretixcontrol/vouchers/tags.html'\n permission = 'can_view_vouchers'\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n\n tags = self.request.event.vouchers.order_by('tag').filter(tag__isnull=False).values('tag').annotate(\n total=Count('id'),\n redeemed=Sum('redeemed')\n )\n for t in tags:\n t['percentage'] = int((t['redeemed'] / t['total']) * 100)\n\n ctx['tags'] = tags\n return ctx\n\n\nclass VoucherDelete(EventPermissionRequiredMixin, DeleteView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/delete.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_object(self, queryset=None) -> Voucher:\n try:\n return self.request.event.vouchers.get(\n id=self.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n def get(self, request, *args, **kwargs):\n if self.get_object().redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n return HttpResponseRedirect(self.get_success_url())\n return super().get(request, *args, **kwargs)\n\n @transaction.atomic\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n success_url = self.get_success_url()\n\n if self.object.redeemed > 0:\n messages.error(request, _('A voucher can not be deleted if it already has been redeemed.'))\n else:\n self.object.log_action('pretix.voucher.deleted', user=self.request.user)\n self.object.delete()\n messages.success(request, _('The selected voucher has been deleted.'))\n return HttpResponseRedirect(success_url)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherUpdate(EventPermissionRequiredMixin, UpdateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_object(self, queryset=None) -> VoucherForm:\n url = resolve(self.request.path_info)\n try:\n return self.request.event.vouchers.get(\n id=url.kwargs['voucher']\n )\n except Voucher.DoesNotExist:\n raise Http404(_(\"The requested voucher does not exist.\"))\n\n @transaction.atomic\n def form_valid(self, form):\n messages.success(self.request, _('Your changes have been saved.'))\n if form.has_changed():\n self.object.log_action(\n 'pretix.voucher.changed', user=self.request.user, data={\n k: form.cleaned_data.get(k) for k in form.changed_data\n }\n )\n return super().form_valid(form)\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n\nclass VoucherCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/detail.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_form_class(self):\n form_class = VoucherForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n messages.success(self.request, _('The new voucher has been created: {code}').format(code=form.instance.code))\n ret = super().form_valid(form)\n form.instance.log_action('pretix.voucher.added', data=dict(form.cleaned_data), user=self.request.user)\n return ret\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherBulkCreate(EventPermissionRequiredMixin, CreateView):\n model = Voucher\n template_name = 'pretixcontrol/vouchers/bulk.html'\n permission = 'can_change_vouchers'\n context_object_name = 'voucher'\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['instance'] = Voucher(event=self.request.event)\n return kwargs\n\n @transaction.atomic\n def form_valid(self, form):\n for o in form.save(self.request.event):\n o.log_action('pretix.voucher.added', data=form.cleaned_data, user=self.request.user)\n messages.success(self.request, _('The new vouchers have been created.'))\n return HttpResponseRedirect(self.get_success_url())\n\n def get_form_class(self):\n form_class = VoucherBulkForm\n for receiver, response in voucher_form_class.send(self.request.event, cls=form_class):\n if response:\n form_class = response\n return form_class\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n ctx['code_length'] = settings.ENTROPY['voucher_code']\n return ctx\n\n def post(self, request, *args, **kwargs):\n # TODO: Transform this into an asynchronous call?\n with request.event.lock():\n return super().post(request, *args, **kwargs)\n\n\nclass VoucherRNG(EventPermissionRequiredMixin, View):\n permission = 'can_change_vouchers'\n\n def get(self, request, *args, **kwargs):\n codes = set()\n try:\n num = int(request.GET.get('num', '5'))\n except ValueError: # NOQA\n return HttpResponseBadRequest()\n\n prefix = request.GET.get('prefix')\n while len(codes) < num:\n new_codes = set()\n for i in range(min(num - len(codes), 500)): # Work around SQLite's SQLITE_MAX_VARIABLE_NUMBER\n new_codes.add(_generate_random_code(prefix=prefix))\n new_codes -= set([v['code'] for v in Voucher.objects.filter(code__in=new_codes).values('code')])\n codes |= new_codes\n\n return JsonResponse({\n 'codes': list(codes)\n })\n\n def get_success_url(self) -> str:\n return reverse('control:event.vouchers', kwargs={\n 'organizer': self.request.event.organizer.slug,\n 'event': self.request.event.slug,\n })\n", "path": "src/pretix/control/views/vouchers.py"}]}
| 3,991 | 134 |
gh_patches_debug_37030
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2713
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
yum.repos.d parser is too tolerant
The yum repos d parser will accept almost anything, including binary.
</issue>
<code>
[start of insights/parsers/yum_repos_d.py]
1 from .. import Parser, parser, get_active_lines, LegacyItemAccess
2 from insights.specs import Specs
3
4
5 @parser(Specs.yum_repos_d)
6 class YumReposD(LegacyItemAccess, Parser):
7 """Class to parse the files under ``yum.repos.d`` """
8
9 def get(self, key):
10 return self.data.get(key)
11
12 def parse_content(self, content):
13 '''
14 Return an object contains a dict.
15 {
16 "rhel-source": {
17 "gpgcheck": "1",
18 "gpgkey": ["file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release",
19 "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak"]
20 "enabled": "0",
21 "name": "Red Hat Enterprise Linux $releasever - $basearch - Source",
22 "baseurl": "ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/"
23 }
24 }
25 ----------------------------------------------------
26 There are several files in 'yum.repos.d' directory, which have the same
27 format. For example:
28 --------one of the files : rhel-source.repo---------
29 [rhel-source]
30 name=Red Hat Enterprise Linux $releasever - $basearch - Source
31 baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/
32 enabled=0
33 gpgcheck=1
34 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
35 file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
36 '''
37 repos_dict = {}
38 section_dict = {}
39 key = None
40 for line in get_active_lines(content):
41 if line.startswith('['):
42 section_dict = {}
43 repos_dict[line[1:-1]] = section_dict
44 elif '=' in line:
45 key, value = [s.strip() for s in line.split("=", 1)]
46 if key in ('baseurl', 'gpgkey'):
47 section_dict[key] = [v.strip() for v in value.split(",")]
48 else:
49 section_dict[key] = value
50 else:
51 if key and isinstance(section_dict[key], list):
52 section_dict[key].extend(v.strip() for v in line.split(","))
53 # Otherwise ignore line if no key or we don't store multiple values
54 self.data = repos_dict
55
56 def __iter__(self):
57 for repo in self.data:
58 yield repo
59
[end of insights/parsers/yum_repos_d.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py
--- a/insights/parsers/yum_repos_d.py
+++ b/insights/parsers/yum_repos_d.py
@@ -1,5 +1,39 @@
-from .. import Parser, parser, get_active_lines, LegacyItemAccess
+import re
+import string
+
+from .. import Parser, parser, LegacyItemAccess
from insights.specs import Specs
+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,
+ skip_none, String, WithIndent, WS)
+
+
+header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ")
+sep_chars = set(":=")
+key_chars = header_chars - sep_chars - set(" ")
+value_chars = set(string.printable) - set("\n\r")
+
+LeftEnd = WS >> Char("[") << WS
+RightEnd = WS >> Char("]") << WS
+Header = LeftEnd >> String(header_chars) << RightEnd
+Key = WS >> String(key_chars) << WS
+Sep = InSet(sep_chars)
+Value = WS >> HangingString(value_chars)
+KVPair = WithIndent(Key + Opt(Sep >> Value))
+Comment = WS >> (OneLineComment("#") | OneLineComment(";")).map(lambda x: None)
+
+Line = Comment | KVPair.map(tuple)
+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)
+Doc = Many(Comment | Sect).map(skip_none).map(dict)
+Top = Doc << WS << EOF
+
+
+def parse_yum_repos(content):
+ doc = Top(content)
+ for k, v in doc.items():
+ for special in ("baseurl", "gpgkey"):
+ if special in v:
+ v[special] = [i.strip() for i in re.split(",| ", v[special])]
+ return doc
@parser(Specs.yum_repos_d)
@@ -10,7 +44,7 @@
return self.data.get(key)
def parse_content(self, content):
- '''
+ """
Return an object contains a dict.
{
"rhel-source": {
@@ -33,25 +67,8 @@
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak
- '''
- repos_dict = {}
- section_dict = {}
- key = None
- for line in get_active_lines(content):
- if line.startswith('['):
- section_dict = {}
- repos_dict[line[1:-1]] = section_dict
- elif '=' in line:
- key, value = [s.strip() for s in line.split("=", 1)]
- if key in ('baseurl', 'gpgkey'):
- section_dict[key] = [v.strip() for v in value.split(",")]
- else:
- section_dict[key] = value
- else:
- if key and isinstance(section_dict[key], list):
- section_dict[key].extend(v.strip() for v in line.split(","))
- # Otherwise ignore line if no key or we don't store multiple values
- self.data = repos_dict
+ """
+ self.data = parse_yum_repos("\n".join(content))
def __iter__(self):
for repo in self.data:
|
{"golden_diff": "diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py\n--- a/insights/parsers/yum_repos_d.py\n+++ b/insights/parsers/yum_repos_d.py\n@@ -1,5 +1,39 @@\n-from .. import Parser, parser, get_active_lines, LegacyItemAccess\n+import re\n+import string\n+\n+from .. import Parser, parser, LegacyItemAccess\n from insights.specs import Specs\n+from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt,\n+ skip_none, String, WithIndent, WS)\n+\n+\n+header_chars = (set(string.printable) - set(string.whitespace) - set(\"[]\")) | set(\" \")\n+sep_chars = set(\":=\")\n+key_chars = header_chars - sep_chars - set(\" \")\n+value_chars = set(string.printable) - set(\"\\n\\r\")\n+\n+LeftEnd = WS >> Char(\"[\") << WS\n+RightEnd = WS >> Char(\"]\") << WS\n+Header = LeftEnd >> String(header_chars) << RightEnd\n+Key = WS >> String(key_chars) << WS\n+Sep = InSet(sep_chars)\n+Value = WS >> HangingString(value_chars)\n+KVPair = WithIndent(Key + Opt(Sep >> Value))\n+Comment = WS >> (OneLineComment(\"#\") | OneLineComment(\";\")).map(lambda x: None)\n+\n+Line = Comment | KVPair.map(tuple)\n+Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple)\n+Doc = Many(Comment | Sect).map(skip_none).map(dict)\n+Top = Doc << WS << EOF\n+\n+\n+def parse_yum_repos(content):\n+ doc = Top(content)\n+ for k, v in doc.items():\n+ for special in (\"baseurl\", \"gpgkey\"):\n+ if special in v:\n+ v[special] = [i.strip() for i in re.split(\",| \", v[special])]\n+ return doc\n \n \n @parser(Specs.yum_repos_d)\n@@ -10,7 +44,7 @@\n return self.data.get(key)\n \n def parse_content(self, content):\n- '''\n+ \"\"\"\n Return an object contains a dict.\n {\n \"rhel-source\": {\n@@ -33,25 +67,8 @@\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n- '''\n- repos_dict = {}\n- section_dict = {}\n- key = None\n- for line in get_active_lines(content):\n- if line.startswith('['):\n- section_dict = {}\n- repos_dict[line[1:-1]] = section_dict\n- elif '=' in line:\n- key, value = [s.strip() for s in line.split(\"=\", 1)]\n- if key in ('baseurl', 'gpgkey'):\n- section_dict[key] = [v.strip() for v in value.split(\",\")]\n- else:\n- section_dict[key] = value\n- else:\n- if key and isinstance(section_dict[key], list):\n- section_dict[key].extend(v.strip() for v in line.split(\",\"))\n- # Otherwise ignore line if no key or we don't store multiple values\n- self.data = repos_dict\n+ \"\"\"\n+ self.data = parse_yum_repos(\"\\n\".join(content))\n \n def __iter__(self):\n for repo in self.data:\n", "issue": "yum.repos.d parser is too tolerant\nThe yum repos d parser will accept almost anything, including binary.\n", "before_files": [{"content": "from .. import Parser, parser, get_active_lines, LegacyItemAccess\nfrom insights.specs import Specs\n\n\n@parser(Specs.yum_repos_d)\nclass YumReposD(LegacyItemAccess, Parser):\n \"\"\"Class to parse the files under ``yum.repos.d`` \"\"\"\n\n def get(self, key):\n return self.data.get(key)\n\n def parse_content(self, content):\n '''\n Return an object contains a dict.\n {\n \"rhel-source\": {\n \"gpgcheck\": \"1\",\n \"gpgkey\": [\"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\",\n \"file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\"]\n \"enabled\": \"0\",\n \"name\": \"Red Hat Enterprise Linux $releasever - $basearch - Source\",\n \"baseurl\": \"ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\"\n }\n }\n ----------------------------------------------------\n There are several files in 'yum.repos.d' directory, which have the same\n format. For example:\n --------one of the files : rhel-source.repo---------\n [rhel-source]\n name=Red Hat Enterprise Linux $releasever - $basearch - Source\n baseurl=ftp://ftp.redhat.com/pub/redhat/linux/enterprise/$releasever/en/os/SRPMS/\n enabled=0\n gpgcheck=1\n gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release\n file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak\n '''\n repos_dict = {}\n section_dict = {}\n key = None\n for line in get_active_lines(content):\n if line.startswith('['):\n section_dict = {}\n repos_dict[line[1:-1]] = section_dict\n elif '=' in line:\n key, value = [s.strip() for s in line.split(\"=\", 1)]\n if key in ('baseurl', 'gpgkey'):\n section_dict[key] = [v.strip() for v in value.split(\",\")]\n else:\n section_dict[key] = value\n else:\n if key and isinstance(section_dict[key], list):\n section_dict[key].extend(v.strip() for v in line.split(\",\"))\n # Otherwise ignore line if no key or we don't store multiple values\n self.data = repos_dict\n\n def __iter__(self):\n for repo in self.data:\n yield repo\n", "path": "insights/parsers/yum_repos_d.py"}]}
| 1,231 | 801 |
gh_patches_debug_16524
|
rasdani/github-patches
|
git_diff
|
keras-team__autokeras-325
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Temp folder conflict
<!---
**If you are reporting a bug:**
* Verify that your issue is not being currently addressed by other issues or pull requests.
* Please note that Auto-Keras is only compatible with **Python 3.6**.
* Tag the issue with the `bug report` tag.
-->
### Bug Description
<!---
-->
Temp folder generated will be conflict if one server has multiple users use the package.
</issue>
<code>
[start of autokeras/utils.py]
1 import csv
2 import os
3 import pickle
4 import sys
5 import tempfile
6 import zipfile
7
8 import warnings
9 import imageio
10 import numpy
11 import requests
12 from skimage.transform import resize
13 import torch
14 import subprocess
15
16 from autokeras.constant import Constant
17
18
19 class NoImprovementError(Exception):
20 def __init__(self, message):
21 self.message = message
22
23
24 def ensure_dir(directory):
25 """Create directory if it does not exist."""
26 if not os.path.exists(directory):
27 os.makedirs(directory)
28
29
30 def ensure_file_dir(path):
31 """Create path if it does not exist."""
32 ensure_dir(os.path.dirname(path))
33
34
35 def has_file(path):
36 """Check if the given path exists."""
37 return os.path.exists(path)
38
39
40 def pickle_from_file(path):
41 """Load the pickle file from the provided path and returns the object."""
42 return pickle.load(open(path, 'rb'))
43
44
45 def pickle_to_file(obj, path):
46 """Save the pickle file to the specified path."""
47 pickle.dump(obj, open(path, 'wb'))
48
49
50 def get_device():
51 """ If CUDA is available, use CUDA device, else use CPU device.
52
53 When choosing from CUDA devices, this function will choose the one with max memory available.
54
55 Returns: string device name.
56 """
57 # TODO: could use gputil in the future
58 device = 'cpu'
59 if torch.cuda.is_available():
60 try:
61 # smi_out=
62 # Free : xxxxxx MiB
63 # Free : xxxxxx MiB
64 # ....
65 smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)
66 if isinstance(smi_out, bytes):
67 smi_out = smi_out.decode('utf-8')
68 print(smi_out)
69 except subprocess.SubprocessError:
70 warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')
71 return 'cpu'
72 visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')
73 if len(visible_devices) == 1 and visible_devices[0] == '':
74 visible_devices = []
75 visible_devices = [int(x) for x in visible_devices]
76 memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]
77 for cuda_index, _ in enumerate(memory_available):
78 if cuda_index not in visible_devices and visible_devices:
79 memory_available[cuda_index] = 0
80
81 if memory_available:
82 if max(memory_available) != 0:
83 device = 'cuda:' + str(memory_available.index(max(memory_available)))
84 return device
85
86
87 def temp_folder_generator():
88 """Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
89 sys_temp = tempfile.gettempdir()
90 path = os.path.join(sys_temp, 'autokeras')
91 ensure_dir(path)
92 return path
93
94
95 def download_file(file_link, file_path):
96 """Download the file specified in `file_link` and saves it in `file_path`."""
97 if not os.path.exists(file_path):
98 with open(file_path, "wb") as f:
99 print("Downloading %s" % file_path)
100 response = requests.get(file_link, stream=True)
101 total_length = response.headers.get('content-length')
102
103 if total_length is None: # no content length header
104 f.write(response.content)
105 else:
106 dl = 0
107 total_length = int(total_length)
108 for data in response.iter_content(chunk_size=4096):
109 dl += len(data)
110 f.write(data)
111 done = int(50 * dl / total_length)
112 sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
113 sys.stdout.flush()
114
115
116 def download_file_with_extract(file_link, file_path, extract_path):
117 """Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`."""
118 if not os.path.exists(extract_path):
119 download_file(file_link, file_path)
120 zip_ref = zipfile.ZipFile(file_path, 'r')
121 print("extracting downloaded file...")
122 zip_ref.extractall(extract_path)
123 os.remove(file_path)
124 print("extracted and removed downloaded zip file")
125 print("file already extracted in the path %s" % extract_path)
126
127
128 def verbose_print(new_father_id, new_graph):
129 """Print information about the operation performed on father model to obtain current model and father's id."""
130 cell_size = [24, 49]
131 header = ['Father Model ID', 'Added Operation']
132 line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))
133 print('\n' + '+' + '-' * len(line) + '+')
134 print('|' + line + '|')
135 print('+' + '-' * len(line) + '+')
136 for i in range(len(new_graph.operation_history)):
137 if i == len(new_graph.operation_history) // 2:
138 r = [new_father_id, new_graph.operation_history[i]]
139 else:
140 r = [' ', new_graph.operation_history[i]]
141 line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))
142 print('|' + line + '|')
143 print('+' + '-' * len(line) + '+')
144
145
146 def validate_xy(x_train, y_train):
147 """Validate `x_train`'s type and the shape of `x_train`, `y_train`."""
148 try:
149 x_train = x_train.astype('float64')
150 except ValueError:
151 raise ValueError('x_train should only contain numerical data.')
152
153 if len(x_train.shape) < 2:
154 raise ValueError('x_train should at least has 2 dimensions.')
155
156 if x_train.shape[0] != y_train.shape[0]:
157 raise ValueError('x_train and y_train should have the same number of instances.')
158
159
160 def read_csv_file(csv_file_path):
161 """Read the csv file and returns two separate list containing file names and their labels.
162
163 Args:
164 csv_file_path: Path to the CSV file.
165
166 Returns:
167 file_names: List containing files names.
168 file_label: List containing their respective labels.
169 """
170 file_names = []
171 file_labels = []
172 with open(csv_file_path, 'r') as files_path:
173 path_list = csv.DictReader(files_path)
174 fieldnames = path_list.fieldnames
175 for path in path_list:
176 file_names.append(path[fieldnames[0]])
177 file_labels.append(path[fieldnames[1]])
178 return file_names, file_labels
179
180
181 def read_image(img_path):
182 """Read the image contained in the provided path `image_path`."""
183 img = imageio.imread(uri=img_path)
184 return img
185
186
187 def compute_image_resize_params(data):
188 """Compute median height and width of all images in data.
189
190 These values are used to resize the images at later point. Number of channels do not change from the original
191 images. Currently, only 2-D images are supported.
192
193 Args:
194 data: 2-D Image data with shape N x H x W x C.
195
196 Returns:
197 median height: Median height of all images in the data.
198 median width: Median width of all images in the data.
199 """
200 median_height, median_width = numpy.median(numpy.array(list(map(lambda x: x.shape, data))), axis=0)[:2]
201
202 if median_height * median_width > Constant.MAX_IMAGE_SIZE:
203 reduction_factor = numpy.sqrt(median_height * median_width / Constant.MAX_IMAGE_SIZE)
204 median_height = median_height / reduction_factor
205 median_width = median_width / reduction_factor
206
207 return int(median_height), int(median_width)
208
209
210 def resize_image_data(data, height, weight):
211 """Resize images to provided height and width.
212
213 Resize all images in data to size h x w x c, where h is the height, w is the width and c is the number of channels.
214 The number of channels c does not change from data. The function supports only 2-D image data.
215
216 Args:
217 data: 2-D Image data with shape N x H x W x C.
218 h: Image resize height.
219 w: Image resize width.
220
221 Returns:
222 data: Resize data.
223 """
224
225 output_data = []
226 for im in data:
227 if len(im.shape) != 3:
228 return data
229 output_data.append(resize(image=im,
230 output_shape=(height, weight, im.shape[-1]),
231 mode='edge',
232 preserve_range=True))
233
234 return numpy.array(output_data)
235
236
237 def get_system():
238 """Get the current system environment. If the current system is not supported, raise an exception.
239
240 Returns:
241 A string to represent the current OS name.
242 "posix" stands for Linux, Mac or Solaris architecture.
243 "nt" stands for Windows system.
244 """
245 print(os.name)
246 if 'google.colab' in sys.modules:
247 return Constant.SYS_GOOGLE_COLAB
248 if os.name == 'posix':
249 return Constant.SYS_LINUX
250 if os.name == 'nt':
251 return Constant.SYS_WINDOWS
252 raise EnvironmentError('Unsupported environment')
253
[end of autokeras/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/autokeras/utils.py b/autokeras/utils.py
--- a/autokeras/utils.py
+++ b/autokeras/utils.py
@@ -12,7 +12,8 @@
from skimage.transform import resize
import torch
import subprocess
-
+import string
+import random
from autokeras.constant import Constant
@@ -86,8 +87,11 @@
def temp_folder_generator():
"""Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras)."""
+ chars = string.ascii_uppercase + string.digits
+ size = 6
sys_temp = tempfile.gettempdir()
- path = os.path.join(sys_temp, 'autokeras')
+ random_suffix = ''.join(random.choice(chars) for _ in range(size))
+ path = os.path.join(sys_temp, 'autokeras_' + random_suffix)
ensure_dir(path)
return path
|
{"golden_diff": "diff --git a/autokeras/utils.py b/autokeras/utils.py\n--- a/autokeras/utils.py\n+++ b/autokeras/utils.py\n@@ -12,7 +12,8 @@\n from skimage.transform import resize\n import torch\n import subprocess\n-\n+import string\n+import random\n from autokeras.constant import Constant\n \n \n@@ -86,8 +87,11 @@\n \n def temp_folder_generator():\n \"\"\"Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).\"\"\"\n+ chars = string.ascii_uppercase + string.digits\n+ size = 6\n sys_temp = tempfile.gettempdir()\n- path = os.path.join(sys_temp, 'autokeras')\n+ random_suffix = ''.join(random.choice(chars) for _ in range(size))\n+ path = os.path.join(sys_temp, 'autokeras_' + random_suffix)\n ensure_dir(path)\n return path\n", "issue": "Temp folder conflict\n<!---\r\n**If you are reporting a bug:**\r\n* Verify that your issue is not being currently addressed by other issues or pull requests.\r\n* Please note that Auto-Keras is only compatible with **Python 3.6**.\r\n* Tag the issue with the `bug report` tag.\r\n-->\r\n\r\n### Bug Description\r\n<!---\r\n\r\n-->\r\nTemp folder generated will be conflict if one server has multiple users use the package.\r\n\r\n\n", "before_files": [{"content": "import csv\nimport os\nimport pickle\nimport sys\nimport tempfile\nimport zipfile\n\nimport warnings\nimport imageio\nimport numpy\nimport requests\nfrom skimage.transform import resize\nimport torch\nimport subprocess\n\nfrom autokeras.constant import Constant\n\n\nclass NoImprovementError(Exception):\n def __init__(self, message):\n self.message = message\n\n\ndef ensure_dir(directory):\n \"\"\"Create directory if it does not exist.\"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef ensure_file_dir(path):\n \"\"\"Create path if it does not exist.\"\"\"\n ensure_dir(os.path.dirname(path))\n\n\ndef has_file(path):\n \"\"\"Check if the given path exists.\"\"\"\n return os.path.exists(path)\n\n\ndef pickle_from_file(path):\n \"\"\"Load the pickle file from the provided path and returns the object.\"\"\"\n return pickle.load(open(path, 'rb'))\n\n\ndef pickle_to_file(obj, path):\n \"\"\"Save the pickle file to the specified path.\"\"\"\n pickle.dump(obj, open(path, 'wb'))\n\n\ndef get_device():\n \"\"\" If CUDA is available, use CUDA device, else use CPU device.\n\n When choosing from CUDA devices, this function will choose the one with max memory available.\n\n Returns: string device name.\n \"\"\"\n # TODO: could use gputil in the future\n device = 'cpu'\n if torch.cuda.is_available():\n try:\n # smi_out=\n # Free : xxxxxx MiB\n # Free : xxxxxx MiB\n # ....\n smi_out = subprocess.check_output('nvidia-smi -q -d Memory | grep -A4 GPU|grep Free', shell=True)\n if isinstance(smi_out, bytes):\n smi_out = smi_out.decode('utf-8')\n print(smi_out)\n except subprocess.SubprocessError:\n warnings.warn('Cuda device successfully detected. However, nvidia-smi cannot be invoked')\n return 'cpu'\n visible_devices = os.getenv('CUDA_VISIBLE_DEVICES', '').split(',')\n if len(visible_devices) == 1 and visible_devices[0] == '':\n visible_devices = []\n visible_devices = [int(x) for x in visible_devices]\n memory_available = [int(x.split()[2]) for x in smi_out.splitlines()]\n for cuda_index, _ in enumerate(memory_available):\n if cuda_index not in visible_devices and visible_devices:\n memory_available[cuda_index] = 0\n\n if memory_available:\n if max(memory_available) != 0:\n device = 'cuda:' + str(memory_available.index(max(memory_available)))\n return device\n\n\ndef temp_folder_generator():\n \"\"\"Create and return a temporary directory with the path name '/temp_dir_name/autokeras' (E:g:- /tmp/autokeras).\"\"\"\n sys_temp = tempfile.gettempdir()\n path = os.path.join(sys_temp, 'autokeras')\n ensure_dir(path)\n return path\n\n\ndef download_file(file_link, file_path):\n \"\"\"Download the file specified in `file_link` and saves it in `file_path`.\"\"\"\n if not os.path.exists(file_path):\n with open(file_path, \"wb\") as f:\n print(\"Downloading %s\" % file_path)\n response = requests.get(file_link, stream=True)\n total_length = response.headers.get('content-length')\n\n if total_length is None: # no content length header\n f.write(response.content)\n else:\n dl = 0\n total_length = int(total_length)\n for data in response.iter_content(chunk_size=4096):\n dl += len(data)\n f.write(data)\n done = int(50 * dl / total_length)\n sys.stdout.write(\"\\r[%s%s]\" % ('=' * done, ' ' * (50 - done)))\n sys.stdout.flush()\n\n\ndef download_file_with_extract(file_link, file_path, extract_path):\n \"\"\"Download the file specified in `file_link`, save to `file_path` and extract to the directory `extract_path`.\"\"\"\n if not os.path.exists(extract_path):\n download_file(file_link, file_path)\n zip_ref = zipfile.ZipFile(file_path, 'r')\n print(\"extracting downloaded file...\")\n zip_ref.extractall(extract_path)\n os.remove(file_path)\n print(\"extracted and removed downloaded zip file\")\n print(\"file already extracted in the path %s\" % extract_path)\n\n\ndef verbose_print(new_father_id, new_graph):\n \"\"\"Print information about the operation performed on father model to obtain current model and father's id.\"\"\"\n cell_size = [24, 49]\n header = ['Father Model ID', 'Added Operation']\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(header))\n print('\\n' + '+' + '-' * len(line) + '+')\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n for i in range(len(new_graph.operation_history)):\n if i == len(new_graph.operation_history) // 2:\n r = [new_father_id, new_graph.operation_history[i]]\n else:\n r = [' ', new_graph.operation_history[i]]\n line = '|'.join(str(x).center(cell_size[i]) for i, x in enumerate(r))\n print('|' + line + '|')\n print('+' + '-' * len(line) + '+')\n\n\ndef validate_xy(x_train, y_train):\n \"\"\"Validate `x_train`'s type and the shape of `x_train`, `y_train`.\"\"\"\n try:\n x_train = x_train.astype('float64')\n except ValueError:\n raise ValueError('x_train should only contain numerical data.')\n\n if len(x_train.shape) < 2:\n raise ValueError('x_train should at least has 2 dimensions.')\n\n if x_train.shape[0] != y_train.shape[0]:\n raise ValueError('x_train and y_train should have the same number of instances.')\n\n\ndef read_csv_file(csv_file_path):\n \"\"\"Read the csv file and returns two separate list containing file names and their labels.\n\n Args:\n csv_file_path: Path to the CSV file.\n\n Returns:\n file_names: List containing files names.\n file_label: List containing their respective labels.\n \"\"\"\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels\n\n\ndef read_image(img_path):\n \"\"\"Read the image contained in the provided path `image_path`.\"\"\"\n img = imageio.imread(uri=img_path)\n return img\n\n\ndef compute_image_resize_params(data):\n \"\"\"Compute median height and width of all images in data.\n\n These values are used to resize the images at later point. Number of channels do not change from the original\n images. Currently, only 2-D images are supported.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n\n Returns:\n median height: Median height of all images in the data.\n median width: Median width of all images in the data.\n \"\"\"\n median_height, median_width = numpy.median(numpy.array(list(map(lambda x: x.shape, data))), axis=0)[:2]\n\n if median_height * median_width > Constant.MAX_IMAGE_SIZE:\n reduction_factor = numpy.sqrt(median_height * median_width / Constant.MAX_IMAGE_SIZE)\n median_height = median_height / reduction_factor\n median_width = median_width / reduction_factor\n\n return int(median_height), int(median_width)\n\n\ndef resize_image_data(data, height, weight):\n \"\"\"Resize images to provided height and width.\n\n Resize all images in data to size h x w x c, where h is the height, w is the width and c is the number of channels.\n The number of channels c does not change from data. The function supports only 2-D image data.\n\n Args:\n data: 2-D Image data with shape N x H x W x C.\n h: Image resize height.\n w: Image resize width.\n\n Returns:\n data: Resize data.\n \"\"\"\n\n output_data = []\n for im in data:\n if len(im.shape) != 3:\n return data\n output_data.append(resize(image=im,\n output_shape=(height, weight, im.shape[-1]),\n mode='edge',\n preserve_range=True))\n\n return numpy.array(output_data)\n\n\ndef get_system():\n \"\"\"Get the current system environment. If the current system is not supported, raise an exception.\n\n Returns:\n A string to represent the current OS name.\n \"posix\" stands for Linux, Mac or Solaris architecture.\n \"nt\" stands for Windows system.\n \"\"\"\n print(os.name)\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n raise EnvironmentError('Unsupported environment')\n", "path": "autokeras/utils.py"}]}
| 3,282 | 219 |
gh_patches_debug_43147
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-3328
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
In-App for Cocoa needs to support Multiple Images
Currently the in_app flag only covers a single image: the application. However for people with multiple frameworks we need to have a second mode where we detect the application images based on the path.
</issue>
<code>
[start of src/sentry/lang/native/plugin.py]
1 from __future__ import absolute_import, print_function
2
3 import logging
4 import posixpath
5
6 from sentry.models import Project, EventError
7 from sentry.plugins import Plugin2
8 from sentry.lang.native.symbolizer import Symbolizer, have_symsynd
9 from sentry.models.dsymfile import SDK_MAPPING
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def append_error(data, err):
16 data.setdefault('errors', []).append(err)
17
18
19 def exception_from_apple_error_or_diagnosis(error, diagnosis=None):
20 error = error or {}
21
22 if error:
23 nsexception = error.get('nsexception')
24 if nsexception:
25 return {
26 'type': nsexception['name'],
27 'value': error['reason'],
28 }
29
30 if diagnosis:
31 return {
32 'type': 'Error',
33 'value': diagnosis
34 }
35
36
37 def inject_apple_backtrace(data, frames, diagnosis=None, error=None,
38 system=None):
39 # TODO:
40 # user report stacktraces from unity
41
42 app_uuid = None
43 if system:
44 app_uuid = system.get('app_uuid')
45 if app_uuid is not None:
46 app_uuid = app_uuid.lower()
47
48 converted_frames = []
49 longest_addr = 0
50 for frame in reversed(frames):
51 fn = frame.get('filename')
52 in_app = False
53
54 if app_uuid is not None:
55 frame_uuid = frame.get('uuid')
56 if frame_uuid == app_uuid:
57 in_app = True
58
59 # We only record the offset if we found a symbol but we did not
60 # find a line number. In that case it's the offset in bytes from
61 # the beginning of the symbol.
62 function = frame['symbol_name'] or '<unknown>'
63 lineno = frame.get('line')
64 offset = None
65 if not lineno:
66 offset = frame['instruction_addr'] - frame['symbol_addr']
67
68 cframe = {
69 'in_app': in_app,
70 'abs_path': fn,
71 'filename': fn and posixpath.basename(fn) or None,
72 # This can come back as `None` from the symbolizer, in which
73 # case we need to fill something else in or we will fail
74 # later fulfill the interface requirements which say that a
75 # function needs to be provided.
76 'function': function,
77 'package': frame['object_name'],
78 'symbol_addr': '%x' % frame['symbol_addr'],
79 'instruction_addr': '%x' % frame['instruction_addr'],
80 'instruction_offset': offset,
81 'lineno': lineno,
82 }
83 converted_frames.append(cframe)
84 longest_addr = max(longest_addr, len(cframe['symbol_addr']),
85 len(cframe['instruction_addr']))
86
87 # Pad out addresses to be of the same length and add prefix
88 for frame in converted_frames:
89 for key in 'symbol_addr', 'instruction_addr':
90 frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')
91
92 stacktrace = {'frames': converted_frames}
93
94 if error or diagnosis:
95 error = error or {}
96 exc = exception_from_apple_error_or_diagnosis(error, diagnosis)
97 if exc is not None:
98 exc['stacktrace'] = stacktrace
99 data['sentry.interfaces.Exception'] = {'values': [exc]}
100 # Since we inject the exception late we need to make sure that
101 # we set the event type to error as it would be set to
102 # 'default' otherwise.
103 data['type'] = 'error'
104 return
105
106 data['sentry.interfaces.Stacktrace'] = stacktrace
107
108
109 def inject_apple_device_data(data, system):
110 container = data.setdefault('device', {})
111 try:
112 container['name'] = SDK_MAPPING[system['system_name']]
113 except LookupError:
114 container['name'] = system.get('system_name') or 'Generic Apple'
115
116 if 'system_version' in system:
117 container['version'] = system['system_version']
118 if 'os_version' in system:
119 container['build'] = system['os_version']
120
121 extra = container.setdefault('data', {})
122 if 'cpu_arch' in system:
123 extra['cpu_arch'] = system['cpu_arch']
124 if 'model' in system:
125 extra['device_model_id'] = system['model']
126 if 'machine' in system:
127 extra['device_model'] = system['machine']
128 if 'kernel_version' in system:
129 extra['kernel_version'] = system['kernel_version']
130
131
132 def preprocess_apple_crash_event(data):
133 crash_report = data.get('sentry.interfaces.AppleCrashReport')
134 if crash_report is None:
135 return
136
137 project = Project.objects.get_from_cache(
138 id=data['project'],
139 )
140
141 crash = crash_report['crash']
142 crashed_thread = None
143 for thread in crash['threads']:
144 if thread['crashed']:
145 crashed_thread = thread
146 if crashed_thread is None:
147 append_error(data, {
148 'type': EventError.NATIVE_NO_CRASHED_THREAD,
149 })
150
151 else:
152 system = crash_report.get('system')
153 try:
154 sym = Symbolizer(project, crash_report['binary_images'],
155 threads=[crashed_thread])
156 with sym:
157 bt = sym.symbolize_backtrace(
158 crashed_thread['backtrace']['contents'], system)
159 inject_apple_backtrace(data, bt, crash.get('diagnosis'),
160 crash.get('error'), system)
161 except Exception as e:
162 logger.exception('Failed to symbolicate')
163 append_error(data, {
164 'type': EventError.NATIVE_INTERNAL_FAILURE,
165 'error': '%s: %s' % (e.__class__.__name__, str(e)),
166 })
167 return
168
169 if system:
170 inject_apple_device_data(data, system)
171
172 return data
173
174
175 class NativePlugin(Plugin2):
176 can_disable = False
177
178 def get_event_preprocessors(self, **kwargs):
179 if not have_symsynd:
180 return []
181 return [preprocess_apple_crash_event]
182
[end of src/sentry/lang/native/plugin.py]
[start of src/sentry/lang/native/symbolizer.py]
1 try:
2 from symsynd.driver import Driver
3 from symsynd.report import ReportSymbolizer
4 from symsynd.macho.arch import get_cpu_name
5 from symsynd.demangle import demangle_symbol
6 have_symsynd = True
7 except ImportError:
8 have_symsynd = False
9
10 from sentry import options
11 from sentry.lang.native.dsymcache import dsymcache
12 from sentry.utils.safe import trim
13 from sentry.models import DSymSymbol
14 from sentry.models.dsymfile import MAX_SYM
15
16
17 def trim_frame(frame):
18 # This matches what's in stacktrace.py
19 frame['symbol_name'] = trim(frame.get('symbol_name'), MAX_SYM)
20 frame['filename'] = trim(frame.get('filename'), 256)
21 return frame
22
23
24 def find_system_symbol(img, instruction_addr, system_info=None):
25 """Finds a system symbol."""
26 return DSymSymbol.objects.lookup_symbol(
27 instruction_addr=instruction_addr,
28 image_addr=img['image_addr'],
29 uuid=img['uuid'],
30 cpu_name=get_cpu_name(img['cpu_type'],
31 img['cpu_subtype']),
32 object_path=img['name'],
33 system_info=system_info
34 )
35
36
37 def make_symbolizer(project, binary_images, threads=None):
38 """Creates a symbolizer for the given project and binary images. If a
39 list of threads is referenced (from an apple crash report) then only
40 images needed by those frames are loaded.
41 """
42 if not have_symsynd:
43 raise RuntimeError('symsynd is unavailable. Install sentry with '
44 'the dsym feature flag.')
45 driver = Driver(options.get('dsym.llvm-symbolizer-path') or None)
46
47 if threads is None:
48 to_load = [x['uuid'] for x in binary_images]
49 else:
50 image_map = {}
51 for image in binary_images:
52 image_map[image['image_addr']] = image['uuid']
53 to_load = set()
54 for thread in threads:
55 for frame in thread['backtrace']['contents']:
56 img_uuid = image_map.get(frame['object_addr'])
57 if img_uuid is not None:
58 to_load.add(img_uuid)
59 to_load = list(to_load)
60
61 dsym_paths, loaded = dsymcache.fetch_dsyms(project, to_load)
62 return ReportSymbolizer(driver, dsym_paths, binary_images)
63
64
65 class Symbolizer(object):
66
67 def __init__(self, project, binary_images, threads=None):
68 self.symsynd_symbolizer = make_symbolizer(project, binary_images,
69 threads=threads)
70 self.images = dict((img['image_addr'], img) for img in binary_images)
71
72 def __enter__(self):
73 return self.symsynd_symbolizer.driver.__enter__()
74
75 def __exit__(self, *args):
76 return self.symsynd_symbolizer.driver.__exit__(*args)
77
78 def symbolize_frame(self, frame, system_info=None):
79 # Step one: try to symbolize with cached dsym files.
80 new_frame = self.symsynd_symbolizer.symbolize_frame(frame)
81 if new_frame is not None:
82 return trim_frame(new_frame)
83
84 # If that does not work, look up system symbols.
85 img = self.images.get(frame['object_addr'])
86 if img is not None:
87 symbol = find_system_symbol(img, frame['instruction_addr'],
88 system_info)
89 if symbol is not None:
90 symbol = demangle_symbol(symbol) or symbol
91 rv = dict(frame, symbol_name=symbol, filename=None,
92 line=0, column=0, uuid=img['uuid'])
93 return trim_frame(rv)
94
95 def symbolize_backtrace(self, backtrace, system_info=None):
96 rv = []
97 for frame in backtrace:
98 new_frame = self.symbolize_frame(frame, system_info)
99 rv.append(new_frame or frame)
100 return rv
101
[end of src/sentry/lang/native/symbolizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py
--- a/src/sentry/lang/native/plugin.py
+++ b/src/sentry/lang/native/plugin.py
@@ -11,6 +11,15 @@
logger = logging.getLogger(__name__)
+APP_BUNDLE_PATHS = (
+ '/var/containers/Bundle/Application/',
+ '/private/var/containers/Bundle/Application/',
+)
+
+NON_APP_FRAMEWORKS = (
+ '/Frameworks/libswiftCore.dylib',
+)
+
def append_error(data, err):
data.setdefault('errors', []).append(err)
@@ -34,6 +43,19 @@
}
+def is_in_app(frame, app_uuid=None):
+ if app_uuid is not None:
+ frame_uuid = frame.get('uuid')
+ if frame_uuid == app_uuid:
+ return True
+ object_name = frame.get('object_name', '')
+ if not object_name.startswith(APP_BUNDLE_PATHS):
+ return False
+ if object_name.endswith(NON_APP_FRAMEWORKS):
+ return False
+ return True
+
+
def inject_apple_backtrace(data, frames, diagnosis=None, error=None,
system=None):
# TODO:
@@ -49,12 +71,6 @@
longest_addr = 0
for frame in reversed(frames):
fn = frame.get('filename')
- in_app = False
-
- if app_uuid is not None:
- frame_uuid = frame.get('uuid')
- if frame_uuid == app_uuid:
- in_app = True
# We only record the offset if we found a symbol but we did not
# find a line number. In that case it's the offset in bytes from
@@ -66,7 +82,7 @@
offset = frame['instruction_addr'] - frame['symbol_addr']
cframe = {
- 'in_app': in_app,
+ 'in_app': is_in_app(frame, app_uuid),
'abs_path': fn,
'filename': fn and posixpath.basename(fn) or None,
# This can come back as `None` from the symbolizer, in which
diff --git a/src/sentry/lang/native/symbolizer.py b/src/sentry/lang/native/symbolizer.py
--- a/src/sentry/lang/native/symbolizer.py
+++ b/src/sentry/lang/native/symbolizer.py
@@ -75,26 +75,33 @@
def __exit__(self, *args):
return self.symsynd_symbolizer.driver.__exit__(*args)
+ def _process_frame(self, frame, img):
+ rv = trim_frame(frame)
+ if img is not None:
+ rv['object_name'] = img['name']
+ rv['uuid'] = img['uuid']
+ return rv
+
def symbolize_frame(self, frame, system_info=None):
+ img = self.images.get(frame['object_addr'])
+
# Step one: try to symbolize with cached dsym files.
new_frame = self.symsynd_symbolizer.symbolize_frame(frame)
if new_frame is not None:
- return trim_frame(new_frame)
+ return self._process_frame(new_frame, img)
# If that does not work, look up system symbols.
- img = self.images.get(frame['object_addr'])
if img is not None:
symbol = find_system_symbol(img, frame['instruction_addr'],
system_info)
if symbol is not None:
symbol = demangle_symbol(symbol) or symbol
rv = dict(frame, symbol_name=symbol, filename=None,
- line=0, column=0, uuid=img['uuid'])
- return trim_frame(rv)
+ line=0, column=0, uuid=img['uuid'],
+ object_name=img['object_name'])
+ return self._process_frame(rv, img)
+
+ return self._process_frame(frame, img)
def symbolize_backtrace(self, backtrace, system_info=None):
- rv = []
- for frame in backtrace:
- new_frame = self.symbolize_frame(frame, system_info)
- rv.append(new_frame or frame)
- return rv
+ return [self.symbolize_frame(frm, system_info) for frm in backtrace]
|
{"golden_diff": "diff --git a/src/sentry/lang/native/plugin.py b/src/sentry/lang/native/plugin.py\n--- a/src/sentry/lang/native/plugin.py\n+++ b/src/sentry/lang/native/plugin.py\n@@ -11,6 +11,15 @@\n \n logger = logging.getLogger(__name__)\n \n+APP_BUNDLE_PATHS = (\n+ '/var/containers/Bundle/Application/',\n+ '/private/var/containers/Bundle/Application/',\n+)\n+\n+NON_APP_FRAMEWORKS = (\n+ '/Frameworks/libswiftCore.dylib',\n+)\n+\n \n def append_error(data, err):\n data.setdefault('errors', []).append(err)\n@@ -34,6 +43,19 @@\n }\n \n \n+def is_in_app(frame, app_uuid=None):\n+ if app_uuid is not None:\n+ frame_uuid = frame.get('uuid')\n+ if frame_uuid == app_uuid:\n+ return True\n+ object_name = frame.get('object_name', '')\n+ if not object_name.startswith(APP_BUNDLE_PATHS):\n+ return False\n+ if object_name.endswith(NON_APP_FRAMEWORKS):\n+ return False\n+ return True\n+\n+\n def inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n@@ -49,12 +71,6 @@\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n- in_app = False\n-\n- if app_uuid is not None:\n- frame_uuid = frame.get('uuid')\n- if frame_uuid == app_uuid:\n- in_app = True\n \n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n@@ -66,7 +82,7 @@\n offset = frame['instruction_addr'] - frame['symbol_addr']\n \n cframe = {\n- 'in_app': in_app,\n+ 'in_app': is_in_app(frame, app_uuid),\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\ndiff --git a/src/sentry/lang/native/symbolizer.py b/src/sentry/lang/native/symbolizer.py\n--- a/src/sentry/lang/native/symbolizer.py\n+++ b/src/sentry/lang/native/symbolizer.py\n@@ -75,26 +75,33 @@\n def __exit__(self, *args):\n return self.symsynd_symbolizer.driver.__exit__(*args)\n \n+ def _process_frame(self, frame, img):\n+ rv = trim_frame(frame)\n+ if img is not None:\n+ rv['object_name'] = img['name']\n+ rv['uuid'] = img['uuid']\n+ return rv\n+\n def symbolize_frame(self, frame, system_info=None):\n+ img = self.images.get(frame['object_addr'])\n+\n # Step one: try to symbolize with cached dsym files.\n new_frame = self.symsynd_symbolizer.symbolize_frame(frame)\n if new_frame is not None:\n- return trim_frame(new_frame)\n+ return self._process_frame(new_frame, img)\n \n # If that does not work, look up system symbols.\n- img = self.images.get(frame['object_addr'])\n if img is not None:\n symbol = find_system_symbol(img, frame['instruction_addr'],\n system_info)\n if symbol is not None:\n symbol = demangle_symbol(symbol) or symbol\n rv = dict(frame, symbol_name=symbol, filename=None,\n- line=0, column=0, uuid=img['uuid'])\n- return trim_frame(rv)\n+ line=0, column=0, uuid=img['uuid'],\n+ object_name=img['object_name'])\n+ return self._process_frame(rv, img)\n+\n+ return self._process_frame(frame, img)\n \n def symbolize_backtrace(self, backtrace, system_info=None):\n- rv = []\n- for frame in backtrace:\n- new_frame = self.symbolize_frame(frame, system_info)\n- rv.append(new_frame or frame)\n- return rv\n+ return [self.symbolize_frame(frm, system_info) for frm in backtrace]\n", "issue": "In-App for Cocoa needs to support Multiple Images\nCurrently the in_app flag only covers a single image: the application. However for people with multiple frameworks we need to have a second mode where we detect the application images based on the path.\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport logging\nimport posixpath\n\nfrom sentry.models import Project, EventError\nfrom sentry.plugins import Plugin2\nfrom sentry.lang.native.symbolizer import Symbolizer, have_symsynd\nfrom sentry.models.dsymfile import SDK_MAPPING\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef append_error(data, err):\n data.setdefault('errors', []).append(err)\n\n\ndef exception_from_apple_error_or_diagnosis(error, diagnosis=None):\n error = error or {}\n\n if error:\n nsexception = error.get('nsexception')\n if nsexception:\n return {\n 'type': nsexception['name'],\n 'value': error['reason'],\n }\n\n if diagnosis:\n return {\n 'type': 'Error',\n 'value': diagnosis\n }\n\n\ndef inject_apple_backtrace(data, frames, diagnosis=None, error=None,\n system=None):\n # TODO:\n # user report stacktraces from unity\n\n app_uuid = None\n if system:\n app_uuid = system.get('app_uuid')\n if app_uuid is not None:\n app_uuid = app_uuid.lower()\n\n converted_frames = []\n longest_addr = 0\n for frame in reversed(frames):\n fn = frame.get('filename')\n in_app = False\n\n if app_uuid is not None:\n frame_uuid = frame.get('uuid')\n if frame_uuid == app_uuid:\n in_app = True\n\n # We only record the offset if we found a symbol but we did not\n # find a line number. In that case it's the offset in bytes from\n # the beginning of the symbol.\n function = frame['symbol_name'] or '<unknown>'\n lineno = frame.get('line')\n offset = None\n if not lineno:\n offset = frame['instruction_addr'] - frame['symbol_addr']\n\n cframe = {\n 'in_app': in_app,\n 'abs_path': fn,\n 'filename': fn and posixpath.basename(fn) or None,\n # This can come back as `None` from the symbolizer, in which\n # case we need to fill something else in or we will fail\n # later fulfill the interface requirements which say that a\n # function needs to be provided.\n 'function': function,\n 'package': frame['object_name'],\n 'symbol_addr': '%x' % frame['symbol_addr'],\n 'instruction_addr': '%x' % frame['instruction_addr'],\n 'instruction_offset': offset,\n 'lineno': lineno,\n }\n converted_frames.append(cframe)\n longest_addr = max(longest_addr, len(cframe['symbol_addr']),\n len(cframe['instruction_addr']))\n\n # Pad out addresses to be of the same length and add prefix\n for frame in converted_frames:\n for key in 'symbol_addr', 'instruction_addr':\n frame[key] = '0x' + frame[key][2:].rjust(longest_addr, '0')\n\n stacktrace = {'frames': converted_frames}\n\n if error or diagnosis:\n error = error or {}\n exc = exception_from_apple_error_or_diagnosis(error, diagnosis)\n if exc is not None:\n exc['stacktrace'] = stacktrace\n data['sentry.interfaces.Exception'] = {'values': [exc]}\n # Since we inject the exception late we need to make sure that\n # we set the event type to error as it would be set to\n # 'default' otherwise.\n data['type'] = 'error'\n return\n\n data['sentry.interfaces.Stacktrace'] = stacktrace\n\n\ndef inject_apple_device_data(data, system):\n container = data.setdefault('device', {})\n try:\n container['name'] = SDK_MAPPING[system['system_name']]\n except LookupError:\n container['name'] = system.get('system_name') or 'Generic Apple'\n\n if 'system_version' in system:\n container['version'] = system['system_version']\n if 'os_version' in system:\n container['build'] = system['os_version']\n\n extra = container.setdefault('data', {})\n if 'cpu_arch' in system:\n extra['cpu_arch'] = system['cpu_arch']\n if 'model' in system:\n extra['device_model_id'] = system['model']\n if 'machine' in system:\n extra['device_model'] = system['machine']\n if 'kernel_version' in system:\n extra['kernel_version'] = system['kernel_version']\n\n\ndef preprocess_apple_crash_event(data):\n crash_report = data.get('sentry.interfaces.AppleCrashReport')\n if crash_report is None:\n return\n\n project = Project.objects.get_from_cache(\n id=data['project'],\n )\n\n crash = crash_report['crash']\n crashed_thread = None\n for thread in crash['threads']:\n if thread['crashed']:\n crashed_thread = thread\n if crashed_thread is None:\n append_error(data, {\n 'type': EventError.NATIVE_NO_CRASHED_THREAD,\n })\n\n else:\n system = crash_report.get('system')\n try:\n sym = Symbolizer(project, crash_report['binary_images'],\n threads=[crashed_thread])\n with sym:\n bt = sym.symbolize_backtrace(\n crashed_thread['backtrace']['contents'], system)\n inject_apple_backtrace(data, bt, crash.get('diagnosis'),\n crash.get('error'), system)\n except Exception as e:\n logger.exception('Failed to symbolicate')\n append_error(data, {\n 'type': EventError.NATIVE_INTERNAL_FAILURE,\n 'error': '%s: %s' % (e.__class__.__name__, str(e)),\n })\n return\n\n if system:\n inject_apple_device_data(data, system)\n\n return data\n\n\nclass NativePlugin(Plugin2):\n can_disable = False\n\n def get_event_preprocessors(self, **kwargs):\n if not have_symsynd:\n return []\n return [preprocess_apple_crash_event]\n", "path": "src/sentry/lang/native/plugin.py"}, {"content": "try:\n from symsynd.driver import Driver\n from symsynd.report import ReportSymbolizer\n from symsynd.macho.arch import get_cpu_name\n from symsynd.demangle import demangle_symbol\n have_symsynd = True\nexcept ImportError:\n have_symsynd = False\n\nfrom sentry import options\nfrom sentry.lang.native.dsymcache import dsymcache\nfrom sentry.utils.safe import trim\nfrom sentry.models import DSymSymbol\nfrom sentry.models.dsymfile import MAX_SYM\n\n\ndef trim_frame(frame):\n # This matches what's in stacktrace.py\n frame['symbol_name'] = trim(frame.get('symbol_name'), MAX_SYM)\n frame['filename'] = trim(frame.get('filename'), 256)\n return frame\n\n\ndef find_system_symbol(img, instruction_addr, system_info=None):\n \"\"\"Finds a system symbol.\"\"\"\n return DSymSymbol.objects.lookup_symbol(\n instruction_addr=instruction_addr,\n image_addr=img['image_addr'],\n uuid=img['uuid'],\n cpu_name=get_cpu_name(img['cpu_type'],\n img['cpu_subtype']),\n object_path=img['name'],\n system_info=system_info\n )\n\n\ndef make_symbolizer(project, binary_images, threads=None):\n \"\"\"Creates a symbolizer for the given project and binary images. If a\n list of threads is referenced (from an apple crash report) then only\n images needed by those frames are loaded.\n \"\"\"\n if not have_symsynd:\n raise RuntimeError('symsynd is unavailable. Install sentry with '\n 'the dsym feature flag.')\n driver = Driver(options.get('dsym.llvm-symbolizer-path') or None)\n\n if threads is None:\n to_load = [x['uuid'] for x in binary_images]\n else:\n image_map = {}\n for image in binary_images:\n image_map[image['image_addr']] = image['uuid']\n to_load = set()\n for thread in threads:\n for frame in thread['backtrace']['contents']:\n img_uuid = image_map.get(frame['object_addr'])\n if img_uuid is not None:\n to_load.add(img_uuid)\n to_load = list(to_load)\n\n dsym_paths, loaded = dsymcache.fetch_dsyms(project, to_load)\n return ReportSymbolizer(driver, dsym_paths, binary_images)\n\n\nclass Symbolizer(object):\n\n def __init__(self, project, binary_images, threads=None):\n self.symsynd_symbolizer = make_symbolizer(project, binary_images,\n threads=threads)\n self.images = dict((img['image_addr'], img) for img in binary_images)\n\n def __enter__(self):\n return self.symsynd_symbolizer.driver.__enter__()\n\n def __exit__(self, *args):\n return self.symsynd_symbolizer.driver.__exit__(*args)\n\n def symbolize_frame(self, frame, system_info=None):\n # Step one: try to symbolize with cached dsym files.\n new_frame = self.symsynd_symbolizer.symbolize_frame(frame)\n if new_frame is not None:\n return trim_frame(new_frame)\n\n # If that does not work, look up system symbols.\n img = self.images.get(frame['object_addr'])\n if img is not None:\n symbol = find_system_symbol(img, frame['instruction_addr'],\n system_info)\n if symbol is not None:\n symbol = demangle_symbol(symbol) or symbol\n rv = dict(frame, symbol_name=symbol, filename=None,\n line=0, column=0, uuid=img['uuid'])\n return trim_frame(rv)\n\n def symbolize_backtrace(self, backtrace, system_info=None):\n rv = []\n for frame in backtrace:\n new_frame = self.symbolize_frame(frame, system_info)\n rv.append(new_frame or frame)\n return rv\n", "path": "src/sentry/lang/native/symbolizer.py"}]}
| 3,396 | 937 |
gh_patches_debug_9034
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-1029
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Staff users should see the public view of results by default
Staff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask "omg you see more stuff here why is that"
</issue>
<code>
[start of evap/results/views.py]
1 from collections import OrderedDict, namedtuple
2
3 from django.core.exceptions import PermissionDenied
4 from django.shortcuts import get_object_or_404, render
5 from django.contrib.auth.decorators import login_required
6
7 from evap.evaluation.models import Semester, Degree, Contribution
8 from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY
9
10
11 @login_required
12 def index(request):
13 semesters = Semester.get_all_with_published_courses()
14
15 return render(request, "results_index.html", dict(semesters=semesters))
16
17
18 @login_required
19 def semester_detail(request, semester_id):
20 semester = get_object_or_404(Semester, id=semester_id)
21 if request.user.is_reviewer:
22 courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees"))
23 else:
24 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees"))
25
26 courses = [course for course in courses if course.can_user_see_course(request.user)]
27
28 # Annotate each course object with its grades.
29 for course in courses:
30 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
31
32 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))
33
34 courses_by_degree = OrderedDict()
35 for degree in Degree.objects.all():
36 courses_by_degree[degree] = CourseTuple([], [])
37 for course in courses:
38 if course.is_single_result:
39 for degree in course.degrees.all():
40 section = calculate_results(course)[0]
41 result = section.results[0]
42 courses_by_degree[degree].single_results.append((course, result))
43 else:
44 for degree in course.degrees.all():
45 courses_by_degree[degree].courses.append(course)
46
47 template_data = dict(semester=semester, courses_by_degree=courses_by_degree)
48 return render(request, "results_semester_detail.html", template_data)
49
50
51 @login_required
52 def course_detail(request, semester_id, course_id):
53 semester = get_object_or_404(Semester, id=semester_id)
54 course = get_object_or_404(semester.course_set, id=course_id, semester=semester)
55
56 if not course.can_user_see_results(request.user):
57 raise PermissionDenied
58
59 sections = calculate_results(course)
60
61 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
62
63 represented_users = list(request.user.represented_users.all())
64 represented_users.append(request.user)
65
66 # filter text answers
67 for section in sections:
68 results = []
69 for result in section.results:
70 if isinstance(result, TextResult):
71 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]
72 if answers:
73 results.append(TextResult(question=result.question, answers=answers))
74 else:
75 results.append(result)
76 section.results[:] = results
77
78 # remove empty sections
79 sections = [section for section in sections if section.results]
80
81 # group by contributor
82 course_sections = []
83 contributor_sections = OrderedDict()
84 for section in sections:
85 if not section.results:
86 continue
87 if section.contributor is None:
88 course_sections.append(section)
89 else:
90 contributor_sections.setdefault(section.contributor,
91 {'total_votes': 0, 'sections': []})['sections'].append(section)
92
93 # Sum up all Sections for this contributor.
94 # If section is not a RatingResult:
95 # Add 1 as we assume it is a TextResult or something similar that should be displayed.
96 contributor_sections[section.contributor]['total_votes'] +=\
97 sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])
98
99 # Show a warning if course is still in evaluation (for reviewer preview).
100 evaluation_warning = course.state != 'published'
101
102 # Results for a course might not be visible because there are not enough answers
103 # but it can still be "published" e.g. to show the comment results to contributors.
104 # Users who can open the results page see a warning message in this case.
105 sufficient_votes_warning = not course.can_publish_grades
106
107 show_grades = request.user.is_reviewer or course.can_publish_grades
108
109 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)
110
111 template_data = dict(
112 course=course,
113 course_sections=course_sections,
114 contributor_sections=contributor_sections,
115 evaluation_warning=evaluation_warning,
116 sufficient_votes_warning=sufficient_votes_warning,
117 show_grades=show_grades,
118 reviewer=request.user.is_reviewer,
119 contributor=course.is_user_contributor_or_delegate(request.user),
120 can_download_grades=request.user.can_download_grades,
121 public_view=public_view)
122 return render(request, "results_course_detail.html", template_data)
123
124
125 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False):
126 if public_view:
127 return False
128 if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:
129 return False
130 if user.is_reviewer:
131 return True
132
133 contributor = text_answer.contribution.contributor
134
135 if text_answer.is_private:
136 return contributor == user
137
138 if text_answer.is_published:
139 if text_answer.contribution.responsible:
140 return contributor == user or user in contributor.delegates.all()
141
142 if contributor in represented_users:
143 return True
144 if text_answer.contribution.course.contributions.filter(
145 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():
146 return True
147 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(
148 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():
149 return True
150
151 return False
152
[end of evap/results/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/results/views.py b/evap/results/views.py
--- a/evap/results/views.py
+++ b/evap/results/views.py
@@ -58,7 +58,10 @@
sections = calculate_results(course)
- public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
+ if request.user.is_staff or request.user.is_reviewer:
+ public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.
+ else:
+ public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.
represented_users = list(request.user.represented_users.all())
represented_users.append(request.user)
|
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -58,7 +58,10 @@\n \n sections = calculate_results(course)\n \n- public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n+ if request.user.is_staff or request.user.is_reviewer:\n+ public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n+ else:\n+ public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n \n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n", "issue": "Staff users should see the public view of results by default\nStaff users should by default see the public view of results pages. It shows less data (primarily no comments), which is good, and random people looking over one's shoulder won't ask \"omg you see more stuff here why is that\"\n", "before_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@login_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@login_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
| 2,223 | 181 |
gh_patches_debug_20105
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-768
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
assigning-sources-to-users process could be smoother
I recently tried to add myself as an editor for a test source on staging. I went into the admin area, selected my user account, and tried to add CDN-Test2020 to my sources:
So far so good...:

Oh dear...:

Let me try finding the source in the admin area, and then add myself to the source that way! oh... oh dear...:

Admittedly, there exists a faster way to get to the source page—source-detail -> click "manage source editors", which takes you to the admin area for the source in question, where you can find a filterable list of users—but these two pages of the admin area could certainly be improved by the ability to filter.
</issue>
<code>
[start of django/cantusdb_project/main_app/admin.py]
1 from django.contrib import admin
2 from main_app.models import *
3
4 # these fields should not be editable by all classes
5 EXCLUDE = ("created_by", "last_updated_by", "json_info")
6
7
8 class BaseModelAdmin(admin.ModelAdmin):
9 exclude = EXCLUDE
10
11 # if an object is created in the admin interface, assign the user to the created_by field
12 # else if an object is updated in the admin interface, assign the user to the last_updated_by field
13 def save_model(self, request, obj, form, change):
14 if change:
15 obj.last_updated_by = request.user
16 else:
17 obj.created_by = request.user
18 super().save_model(request, obj, form, change)
19
20
21 class CenturyAdmin(BaseModelAdmin):
22 pass
23
24
25 class ChantAdmin(BaseModelAdmin):
26 list_display = ("incipit", "siglum", "genre")
27 search_fields = ("title", "incipit", "cantus_id")
28 list_filter = ("genre",)
29 exclude = EXCLUDE + (
30 "col1",
31 "col2",
32 "col3",
33 "next_chant",
34 "s_sequence",
35 "is_last_chant_in_feast",
36 )
37
38
39 class FeastAdmin(BaseModelAdmin):
40 pass
41
42
43 class GenreAdmin(BaseModelAdmin):
44 pass
45
46
47 class NotationAdmin(BaseModelAdmin):
48 pass
49
50
51 class OfficeAdmin(BaseModelAdmin):
52 pass
53
54
55 class ProvenanceAdmin(BaseModelAdmin):
56 pass
57
58
59 class RismSiglumAdmin(BaseModelAdmin):
60 pass
61
62
63 class SegmentAdmin(BaseModelAdmin):
64 pass
65
66
67 class SequenceAdmin(BaseModelAdmin):
68 exclude = EXCLUDE + ("c_sequence", "next_chant", "is_last_chant_in_feast")
69
70
71 class SourceAdmin(BaseModelAdmin):
72 # from the Django docs:
73 # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface
74 # that allows searching within the options. The unselected and selected options appear in two boxes side by side.
75 filter_horizontal = (
76 "century",
77 "notation",
78 "current_editors",
79 "inventoried_by",
80 "full_text_entered_by",
81 "melodies_entered_by",
82 "proofreaders",
83 "other_editors",
84 )
85
86
87 admin.site.register(Century, CenturyAdmin)
88 admin.site.register(Chant, ChantAdmin)
89 admin.site.register(Feast, FeastAdmin)
90 admin.site.register(Genre, GenreAdmin)
91 admin.site.register(Notation, NotationAdmin)
92 admin.site.register(Office, OfficeAdmin)
93 admin.site.register(Provenance, ProvenanceAdmin)
94 admin.site.register(RismSiglum, RismSiglumAdmin)
95 admin.site.register(Segment, SegmentAdmin)
96 admin.site.register(Sequence, SequenceAdmin)
97 admin.site.register(Source, SourceAdmin)
98
[end of django/cantusdb_project/main_app/admin.py]
[start of django/cantusdb_project/users/admin.py]
1 from django.contrib import admin
2 from .models import *
3 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
4 from main_app.models import Source
5
6 # Register your models here.
7
8
9 # this will allow us to assign sources to users in the User admin page
10 class SourceInline(admin.TabularInline):
11 model = Source.current_editors.through
12
13
14 class UserAdmin(BaseUserAdmin):
15 readonly_fields = (
16 "date_joined",
17 "last_login",
18 )
19 # fields that are displayed on the user list page of the admin
20 list_display = (
21 "email",
22 "first_name",
23 "last_name",
24 "institution",
25 )
26 # creates a filter on the right side of the page to filter users by group
27 list_filter = ("groups",)
28 fieldsets = (
29 (
30 "Account info",
31 {
32 "fields": (
33 ("email", "password"),
34 "is_active",
35 ("date_joined", "last_login"),
36 )
37 },
38 ),
39 (
40 "Personal info",
41 {
42 "fields": (
43 "full_name",
44 ("first_name", "last_name"),
45 "institution",
46 ("city", "country"),
47 "website",
48 )
49 },
50 ),
51 (
52 "Permissions",
53 {
54 "fields": (
55 "is_staff",
56 "is_superuser",
57 "groups",
58 )
59 },
60 ),
61 )
62 add_fieldsets = (
63 (
64 "Account info",
65 {
66 "fields": (
67 "email",
68 ("password1", "password2"),
69 )
70 },
71 ),
72 (
73 "Personal info",
74 {
75 "fields": (
76 "full_name",
77 ("first_name", "last_name"),
78 "institution",
79 ("city", "country"),
80 "website",
81 )
82 },
83 ),
84 (
85 "Permissions",
86 {
87 "fields": (
88 "is_staff",
89 "is_superuser",
90 "groups",
91 )
92 },
93 ),
94 )
95 search_fields = (
96 "email",
97 "first_name",
98 "last_name",
99 "institution",
100 )
101 # order the list of users by email
102 ordering = ("email",)
103 filter_horizontal = ("groups",)
104 inlines = [SourceInline]
105
106
107 admin.site.register(User, UserAdmin)
108
[end of django/cantusdb_project/users/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py
--- a/django/cantusdb_project/main_app/admin.py
+++ b/django/cantusdb_project/main_app/admin.py
@@ -69,6 +69,11 @@
class SourceAdmin(BaseModelAdmin):
+ # These search fields are also available on the user-source inline relationship in the user admin page
+ search_fields = (
+ "siglum",
+ "title",
+ )
# from the Django docs:
# Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript “filter” interface
# that allows searching within the options. The unselected and selected options appear in two boxes side by side.
diff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py
--- a/django/cantusdb_project/users/admin.py
+++ b/django/cantusdb_project/users/admin.py
@@ -9,6 +9,7 @@
# this will allow us to assign sources to users in the User admin page
class SourceInline(admin.TabularInline):
model = Source.current_editors.through
+ raw_id_fields = ["source"]
class UserAdmin(BaseUserAdmin):
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/admin.py b/django/cantusdb_project/main_app/admin.py\n--- a/django/cantusdb_project/main_app/admin.py\n+++ b/django/cantusdb_project/main_app/admin.py\n@@ -69,6 +69,11 @@\n \n \n class SourceAdmin(BaseModelAdmin):\n+ # These search fields are also available on the user-source inline relationship in the user admin page\n+ search_fields = (\n+ \"siglum\",\n+ \"title\",\n+ )\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\ndiff --git a/django/cantusdb_project/users/admin.py b/django/cantusdb_project/users/admin.py\n--- a/django/cantusdb_project/users/admin.py\n+++ b/django/cantusdb_project/users/admin.py\n@@ -9,6 +9,7 @@\n # this will allow us to assign sources to users in the User admin page\n class SourceInline(admin.TabularInline):\n model = Source.current_editors.through\n+ raw_id_fields = [\"source\"]\n \n \n class UserAdmin(BaseUserAdmin):\n", "issue": "assigning-sources-to-users process could be smoother\nI recently tried to add myself as an editor for a test source on staging. I went into the admin area, selected my user account, and tried to add CDN-Test2020 to my sources:\r\n\r\nSo far so good...:\r\n\r\n\r\nOh dear...:\r\n\r\n\r\nLet me try finding the source in the admin area, and then add myself to the source that way! oh... oh dear...:\r\n\r\n\r\nAdmittedly, there exists a faster way to get to the source page\u2014source-detail -> click \"manage source editors\", which takes you to the admin area for the source in question, where you can find a filterable list of users\u2014but these two pages of the admin area could certainly be improved by the ability to filter.\n", "before_files": [{"content": "from django.contrib import admin\nfrom main_app.models import *\n\n# these fields should not be editable by all classes\nEXCLUDE = (\"created_by\", \"last_updated_by\", \"json_info\")\n\n\nclass BaseModelAdmin(admin.ModelAdmin):\n exclude = EXCLUDE\n\n # if an object is created in the admin interface, assign the user to the created_by field\n # else if an object is updated in the admin interface, assign the user to the last_updated_by field\n def save_model(self, request, obj, form, change):\n if change:\n obj.last_updated_by = request.user\n else:\n obj.created_by = request.user\n super().save_model(request, obj, form, change)\n\n\nclass CenturyAdmin(BaseModelAdmin):\n pass\n\n\nclass ChantAdmin(BaseModelAdmin):\n list_display = (\"incipit\", \"siglum\", \"genre\")\n search_fields = (\"title\", \"incipit\", \"cantus_id\")\n list_filter = (\"genre\",)\n exclude = EXCLUDE + (\n \"col1\",\n \"col2\",\n \"col3\",\n \"next_chant\",\n \"s_sequence\",\n \"is_last_chant_in_feast\",\n )\n\n\nclass FeastAdmin(BaseModelAdmin):\n pass\n\n\nclass GenreAdmin(BaseModelAdmin):\n pass\n\n\nclass NotationAdmin(BaseModelAdmin):\n pass\n\n\nclass OfficeAdmin(BaseModelAdmin):\n pass\n\n\nclass ProvenanceAdmin(BaseModelAdmin):\n pass\n\n\nclass RismSiglumAdmin(BaseModelAdmin):\n pass\n\n\nclass SegmentAdmin(BaseModelAdmin):\n pass\n\n\nclass SequenceAdmin(BaseModelAdmin):\n exclude = EXCLUDE + (\"c_sequence\", \"next_chant\", \"is_last_chant_in_feast\")\n\n\nclass SourceAdmin(BaseModelAdmin):\n # from the Django docs:\n # Adding a ManyToManyField to this list will instead use a nifty unobtrusive JavaScript \u201cfilter\u201d interface\n # that allows searching within the options. The unselected and selected options appear in two boxes side by side.\n filter_horizontal = (\n \"century\",\n \"notation\",\n \"current_editors\",\n \"inventoried_by\",\n \"full_text_entered_by\",\n \"melodies_entered_by\",\n \"proofreaders\",\n \"other_editors\",\n )\n\n\nadmin.site.register(Century, CenturyAdmin)\nadmin.site.register(Chant, ChantAdmin)\nadmin.site.register(Feast, FeastAdmin)\nadmin.site.register(Genre, GenreAdmin)\nadmin.site.register(Notation, NotationAdmin)\nadmin.site.register(Office, OfficeAdmin)\nadmin.site.register(Provenance, ProvenanceAdmin)\nadmin.site.register(RismSiglum, RismSiglumAdmin)\nadmin.site.register(Segment, SegmentAdmin)\nadmin.site.register(Sequence, SequenceAdmin)\nadmin.site.register(Source, SourceAdmin)\n", "path": "django/cantusdb_project/main_app/admin.py"}, {"content": "from django.contrib import admin\nfrom .models import *\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom main_app.models import Source\n\n# Register your models here.\n\n\n# this will allow us to assign sources to users in the User admin page\nclass SourceInline(admin.TabularInline):\n model = Source.current_editors.through\n\n\nclass UserAdmin(BaseUserAdmin):\n readonly_fields = (\n \"date_joined\",\n \"last_login\",\n )\n # fields that are displayed on the user list page of the admin\n list_display = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # creates a filter on the right side of the page to filter users by group\n list_filter = (\"groups\",)\n fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n (\"email\", \"password\"),\n \"is_active\",\n (\"date_joined\", \"last_login\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n add_fieldsets = (\n (\n \"Account info\",\n {\n \"fields\": (\n \"email\",\n (\"password1\", \"password2\"),\n )\n },\n ),\n (\n \"Personal info\",\n {\n \"fields\": (\n \"full_name\",\n (\"first_name\", \"last_name\"),\n \"institution\",\n (\"city\", \"country\"),\n \"website\",\n )\n },\n ),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n )\n },\n ),\n )\n search_fields = (\n \"email\",\n \"first_name\",\n \"last_name\",\n \"institution\",\n )\n # order the list of users by email\n ordering = (\"email\",)\n filter_horizontal = (\"groups\",)\n inlines = [SourceInline]\n\n\nadmin.site.register(User, UserAdmin)\n", "path": "django/cantusdb_project/users/admin.py"}]}
| 2,472 | 288 |
gh_patches_debug_28895
|
rasdani/github-patches
|
git_diff
|
translate__pootle-3780
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Legal pages don't remember your acceptance of the agreemenet
This is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget.
</issue>
<code>
[start of pootle/apps/staticpages/managers.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.db.models import F, Manager
11
12
13 class PageManager(Manager):
14
15 def live(self, user=None, **kwargs):
16 """Filters active (live) pages.
17
18 :param user: Current active user. If omitted or the user doesn't
19 have administration privileges, only active pages will be
20 returned.
21 """
22 if user is not None and user.is_superuser:
23 return self.get_queryset()
24
25 return self.get_queryset().filter(active=True)
26
27 def pending_user_agreement(self, user, **kwargs):
28 """Filters active pages where the given `user` has pending
29 agreements.
30 """
31 # FIXME: This should be a method exclusive to a LegalPage manager
32 return self.live().exclude(
33 agreement__user=user,
34 modified_on__lt=F('agreement__agreed_on'),
35 ).distinct()
36
[end of pootle/apps/staticpages/managers.py]
[start of pootle/apps/pootle_misc/context_processors.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.conf import settings
11 from django.utils import translation
12
13 from pootle import __version__
14 from pootle_language.models import Language
15 from pootle_project.models import Project
16 from staticpages.models import LegalPage
17
18
19 def _agreement_context(request):
20 """Returns whether the agreement box should be displayed or not."""
21 request_path = request.META['PATH_INFO']
22 nocheck = filter(lambda x: request_path.startswith(x),
23 settings.LEGALPAGE_NOCHECK_PREFIXES)
24
25 if (request.user.is_authenticated() and not nocheck and
26 LegalPage.objects.pending_user_agreement(request.user).exists()):
27 return True
28
29 return False
30
31
32 def pootle_context(request):
33 """Exposes settings to templates."""
34 #FIXME: maybe we should expose relevant settings only?
35 return {
36 'settings': {
37 'TITLE': settings.TITLE,
38 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,
39 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,
40 'SCRIPT_NAME': settings.SCRIPT_NAME,
41 'POOTLE_VERSION': __version__,
42 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,
43 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,
44 'DEBUG': settings.DEBUG,
45 },
46 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,
47 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),
48 'ALL_PROJECTS': Project.objects.cached_dict(request.user),
49 'display_agreement': _agreement_context(request),
50 }
51
[end of pootle/apps/pootle_misc/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py
--- a/pootle/apps/pootle_misc/context_processors.py
+++ b/pootle/apps/pootle_misc/context_processors.py
@@ -23,7 +23,7 @@
settings.LEGALPAGE_NOCHECK_PREFIXES)
if (request.user.is_authenticated() and not nocheck and
- LegalPage.objects.pending_user_agreement(request.user).exists()):
+ LegalPage.objects.has_pending_agreement(request.user)):
return True
return False
diff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py
--- a/pootle/apps/staticpages/managers.py
+++ b/pootle/apps/staticpages/managers.py
@@ -24,12 +24,24 @@
return self.get_queryset().filter(active=True)
+ def has_pending_agreement(self, user):
+ agreements = self.pending_user_agreement(user)
+ return len(list(agreements)) > 0
+
def pending_user_agreement(self, user, **kwargs):
"""Filters active pages where the given `user` has pending
agreements.
"""
# FIXME: This should be a method exclusive to a LegalPage manager
- return self.live().exclude(
- agreement__user=user,
- modified_on__lt=F('agreement__agreed_on'),
- ).distinct()
+ return self.raw('''
+ SELECT DISTINCT staticpages_legalpage.id
+ FROM staticpages_legalpage
+ WHERE (staticpages_legalpage.active = TRUE
+ AND NOT (staticpages_legalpage.id IN
+ (SELECT A.document_id
+ FROM staticpages_legalpage AS LP
+ INNER JOIN staticpages_agreement AS A
+ ON LP.id = A.document_id
+ WHERE A.user_id = %s AND
+ A.agreed_on > LP.modified_on)))
+ ''', [user.id])
|
{"golden_diff": "diff --git a/pootle/apps/pootle_misc/context_processors.py b/pootle/apps/pootle_misc/context_processors.py\n--- a/pootle/apps/pootle_misc/context_processors.py\n+++ b/pootle/apps/pootle_misc/context_processors.py\n@@ -23,7 +23,7 @@\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n \n if (request.user.is_authenticated() and not nocheck and\n- LegalPage.objects.pending_user_agreement(request.user).exists()):\n+ LegalPage.objects.has_pending_agreement(request.user)):\n return True\n \n return False\ndiff --git a/pootle/apps/staticpages/managers.py b/pootle/apps/staticpages/managers.py\n--- a/pootle/apps/staticpages/managers.py\n+++ b/pootle/apps/staticpages/managers.py\n@@ -24,12 +24,24 @@\n \n return self.get_queryset().filter(active=True)\n \n+ def has_pending_agreement(self, user):\n+ agreements = self.pending_user_agreement(user)\n+ return len(list(agreements)) > 0\n+\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n- return self.live().exclude(\n- agreement__user=user,\n- modified_on__lt=F('agreement__agreed_on'),\n- ).distinct()\n+ return self.raw('''\n+ SELECT DISTINCT staticpages_legalpage.id\n+ FROM staticpages_legalpage\n+ WHERE (staticpages_legalpage.active = TRUE\n+ AND NOT (staticpages_legalpage.id IN\n+ (SELECT A.document_id\n+ FROM staticpages_legalpage AS LP\n+ INNER JOIN staticpages_agreement AS A\n+ ON LP.id = A.document_id\n+ WHERE A.user_id = %s AND\n+ A.agreed_on > LP.modified_on)))\n+ ''', [user.id])\n", "issue": "Legal pages don't remember your acceptance of the agreemenet\nThis is likely only a problem on the [LibreOffice dev server](https://translations-test.documentfoundation.org/projects/) but tracking it here so we don't forget.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models import F, Manager\n\n\nclass PageManager(Manager):\n\n def live(self, user=None, **kwargs):\n \"\"\"Filters active (live) pages.\n\n :param user: Current active user. If omitted or the user doesn't\n have administration privileges, only active pages will be\n returned.\n \"\"\"\n if user is not None and user.is_superuser:\n return self.get_queryset()\n\n return self.get_queryset().filter(active=True)\n\n def pending_user_agreement(self, user, **kwargs):\n \"\"\"Filters active pages where the given `user` has pending\n agreements.\n \"\"\"\n # FIXME: This should be a method exclusive to a LegalPage manager\n return self.live().exclude(\n agreement__user=user,\n modified_on__lt=F('agreement__agreed_on'),\n ).distinct()\n", "path": "pootle/apps/staticpages/managers.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.conf import settings\nfrom django.utils import translation\n\nfrom pootle import __version__\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import LegalPage\n\n\ndef _agreement_context(request):\n \"\"\"Returns whether the agreement box should be displayed or not.\"\"\"\n request_path = request.META['PATH_INFO']\n nocheck = filter(lambda x: request_path.startswith(x),\n settings.LEGALPAGE_NOCHECK_PREFIXES)\n\n if (request.user.is_authenticated() and not nocheck and\n LegalPage.objects.pending_user_agreement(request.user).exists()):\n return True\n\n return False\n\n\ndef pootle_context(request):\n \"\"\"Exposes settings to templates.\"\"\"\n #FIXME: maybe we should expose relevant settings only?\n return {\n 'settings': {\n 'TITLE': settings.TITLE,\n 'POOTLE_INSTANCE_ID': settings.POOTLE_INSTANCE_ID,\n 'CAN_CONTACT': settings.CAN_CONTACT and settings.CONTACT_EMAIL,\n 'SCRIPT_NAME': settings.SCRIPT_NAME,\n 'POOTLE_VERSION': __version__,\n 'CACHE_TIMEOUT': settings.CACHE_MIDDLEWARE_SECONDS,\n 'POOTLE_CACHE_TIMEOUT': settings.POOTLE_CACHE_TIMEOUT,\n 'DEBUG': settings.DEBUG,\n },\n 'custom': settings.CUSTOM_TEMPLATE_CONTEXT,\n 'ALL_LANGUAGES': Language.live.cached_dict(translation.get_language()),\n 'ALL_PROJECTS': Project.objects.cached_dict(request.user),\n 'display_agreement': _agreement_context(request),\n }\n", "path": "pootle/apps/pootle_misc/context_processors.py"}]}
| 1,437 | 439 |
gh_patches_debug_33621
|
rasdani/github-patches
|
git_diff
|
dotkom__onlineweb4-1548
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make sure people with `webshop.view_model` can view detail views in dashboard
Users with access to webshop dashboard cannot get a list of products (dashboard -> webshop -> kategorier -> click on a category), nor view the details of an order (dashboard -> webshop -> bestillinger -> click on an order)
</issue>
<code>
[start of apps/webshop/dashboard/views.py]
1 import logging
2
3 from django.contrib.contenttypes.models import ContentType
4 from django.core.urlresolvers import reverse
5 from django.shortcuts import get_object_or_404, redirect
6 from django.views.generic import (CreateView, DeleteView, DetailView, ListView, TemplateView,
7 UpdateView)
8 from taggit.models import TaggedItem
9
10 from apps.dashboard.tools import DashboardPermissionMixin
11 from apps.gallery.models import ResponsiveImage
12 from apps.webshop.models import Category, OrderLine, Product
13
14 logger = logging.getLogger(__name__)
15
16
17 class Overview(DashboardPermissionMixin, TemplateView):
18 template_name = 'webshop/dashboard/index.html'
19 permission_required = 'webshop.view_category'
20
21
22 class Categories(DashboardPermissionMixin, TemplateView):
23 model = Category
24 template_name = 'webshop/dashboard/categories.html'
25 permission_required = 'webshop.change_category'
26
27 def get_context_data(self, *args, **kwargs):
28 context = super(Categories, self).get_context_data(*args, **kwargs)
29 context['categories'] = Category.objects.all().prefetch_related('products')
30 return context
31
32
33 class CategoryView(DashboardPermissionMixin, DetailView):
34 model = Category
35 template_name = 'webshop/dashboard/category.html'
36 permission_required = 'webshop.view_category'
37
38
39 class CategoryCreate(DashboardPermissionMixin, CreateView):
40 model = Category
41 fields = ['name', 'slug']
42 template_name = 'webshop/dashboard/category_update.html'
43 permission_required = 'webshop.add_category'
44
45 def get_object(self, *args, **kwargs):
46 # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195
47 return None
48
49 def get_success_url(self):
50 return reverse('dashboard-webshop:categories')
51
52
53 class CategoryUpdate(DashboardPermissionMixin, UpdateView):
54 model = Category
55 fields = ['name', 'slug']
56 template_name = 'webshop/dashboard/category_update.html'
57 context_object_name = 'category'
58 permission_required = 'webshop.change_product'
59
60 def get_success_url(self):
61 return reverse('dashboard-webshop:category', kwargs={'slug': self.object.slug})
62
63
64 class CategoryDelete(DashboardPermissionMixin, DeleteView):
65 model = Category
66 template_name = 'webshop/dashboard/delete.html'
67 permission_required = 'webshop.delete_category'
68
69 def get_success_url(self):
70 return reverse('dashboard-webshop:categories')
71
72
73 class ProductView(DashboardPermissionMixin, DetailView):
74 model = Product
75 template_name = 'webshop/dashboard/product.html'
76 permission_required = 'webshop.view_product'
77
78
79 class ProductCreate(DashboardPermissionMixin, CreateView):
80 model = Product
81 fields = ['name', 'slug', 'short', 'description', 'price', 'stock']
82 template_name = 'webshop/dashboard/product_update.html'
83 permission_required = 'webshop.add_product'
84
85 def get_context_data(self, *args, **kwargs):
86 context = super(ProductCreate, self).get_context_data(*args, **kwargs)
87 context['category'] = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))
88 return context
89
90 def form_valid(self, form):
91 product = form.save(commit=False)
92 # Setting foreign key
93 category = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))
94 product.category = category
95 return super(ProductCreate, self).form_valid(form)
96
97 def get_object(self, *args, **kwargs):
98 # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195
99 return None
100
101 def get_success_url(self):
102 return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})
103
104
105 class ProductUpdate(DashboardPermissionMixin, UpdateView):
106 model = Product
107 fields = ['name', 'slug', 'short', 'description', 'price', 'stock']
108 template_name = 'webshop/dashboard/product_update.html'
109 context_object_name = 'product'
110 permission_required = 'webshop.change_product'
111
112 def get_context_data(self, *args, **kwargs):
113 context = super(ProductUpdate, self).get_context_data(*args, **kwargs)
114 context['category'] = self.object.category
115 return context
116
117 def get_success_url(self):
118 return reverse('dashboard-webshop:product', kwargs={'slug': self.object.slug})
119
120
121 class ProductDelete(DashboardPermissionMixin, DeleteView):
122 model = Product
123 template_name = 'webshop/dashboard/delete.html'
124 permission_required = 'webshop.delete_product'
125
126 def get_success_url(self):
127 return reverse('dashboard-webshop:category', kwargs={'slug': self.object.category.slug})
128
129
130 class ProductImage(DashboardPermissionMixin, DetailView):
131 model = Product
132 template_name = 'webshop/dashboard/image.html'
133 permission_required = 'webshop.change_product'
134
135 def get_context_data(self, *args, **kwargs):
136 context = super(ProductImage, self).get_context_data(*args, **kwargs)
137 # Filter out potential ResponsiveImage objects that have orphan file references
138 images = ResponsiveImage.objects.all().order_by('-timestamp')[:15]
139 context['images'] = [i for i in images if i.file_status_ok()]
140
141 context['tags'] = sorted(set(tag.tag.name for tag in TaggedItem.objects.filter(
142 content_type=ContentType.objects.get_for_model(ResponsiveImage)
143 ).order_by('tag__name')))
144
145 return context
146
147 def post(self, request, *args, **kwargs):
148 pass
149
150
151 class Orders(DashboardPermissionMixin, ListView):
152 template_name = 'webshop/dashboard/orders.html'
153 permission_required = 'webshop.view_order'
154 queryset = OrderLine.objects.filter(paid=True)
155 context_object_name = 'orders'
156
157
158 class Order(DashboardPermissionMixin, DetailView):
159 model = OrderLine
160 template_name = 'webshop/dashboard/order.html'
161 permission_required = 'webshop.change_order_line'
162 context_object_name = 'order'
163
164
165 class OrderDeliver(DashboardPermissionMixin, DetailView):
166 model = OrderLine
167 permission_required = 'webshop.change_order_line'
168
169 def post(self, *args, **kwargs):
170 super(OrderDeliver, self).get(*args, **kwargs)
171 if not self.object.delivered:
172 self.object.delivered = True
173 self.object.save()
174 return self.get(*args, **kwargs)
175
176 def get(self, *args, **kwargs):
177 super(OrderDeliver, self).get(*args, **kwargs)
178 return redirect('dashboard-webshop:order', pk=self.object.pk)
179
[end of apps/webshop/dashboard/views.py]
[start of apps/dashboard/chunks/dashboard/views.py]
1 from chunks.models import Chunk
2 from django.core.urlresolvers import reverse, reverse_lazy
3 from django.views.generic import CreateView, DeleteView, ListView, UpdateView
4
5 from apps.dashboard.tools import DashboardPermissionMixin
6
7
8 class ChunkList(DashboardPermissionMixin, ListView):
9 model = Chunk
10 queryset = Chunk.objects.all()
11 context_object_name = 'chunks'
12 permission_required = 'chunks.add_chunk'
13
14
15 class ChunkUpdate(DashboardPermissionMixin, UpdateView):
16 model = Chunk
17 context_object_name = 'chunk'
18 fields = ('key', 'description', 'content')
19 permission_required = 'chunks.change_chunk'
20 success_url = reverse_lazy('chunk-dashboard:list')
21
22
23 class ChunkCreate(DashboardPermissionMixin, CreateView):
24 model = Chunk
25 fields = ('key', 'description', 'content')
26 permission_required = 'chunks.add_chunk'
27
28 def get_object(self, queryset=None):
29 # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,
30 # an issue with regards to guardian's PermissionMixin working with CreateView -
31 # where it requires an object even in a CreateView.
32 return None
33
34 def get_success_url(self):
35 return reverse('chunk-dashboard:update', args=(self.object.id,))
36
37
38 class ChunkDelete(DashboardPermissionMixin, DeleteView):
39 model = Chunk
40 permission_required = ('chunks.delete_chunk',)
41 success_url = reverse_lazy('chunk-dashboard:list')
42
[end of apps/dashboard/chunks/dashboard/views.py]
[start of apps/dashboard/tools.py]
1 # -*- encoding: utf-8 -*-
2
3 from datetime import date
4
5 from django.contrib.auth.models import Group
6 from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
7 from guardian.mixins import PermissionRequiredMixin
8
9 from apps.approval.models import MembershipApproval
10 from apps.gallery.models import UnhandledImage
11 from apps.inventory.models import Batch
12 from apps.posters.models import Poster
13
14
15 def has_access(request):
16 """
17 This helper method does a basic check to see if the logged in user
18 has access to the dashboard.
19
20 We might add additional checks here later.
21 """
22
23 if request.user.is_superuser:
24 return True
25
26 try:
27 committees = Group.objects.get(name='Komiteer')
28 except ObjectDoesNotExist:
29 committees = None
30
31 if committees and committees in request.user.groups.all():
32 return True
33
34 return False
35
36
37 def check_access_or_403(request):
38 """
39 Checks if a user bundled in a request object has access using has_access
40 tool function, if not raise a 403 exception
41 """
42 if not has_access(request):
43 raise PermissionDenied
44
45
46 def get_base_context(request):
47 """
48 This function returns a dictionary with the proper context variables
49 needed for given permission settings. Should be used as the initial
50 context for every dashboard view. For example, it is used for rendering
51 badges in the dashboard menu.
52
53 Add your own if req.user.has_perm statements adding the context objects
54 of that you need.
55 """
56
57 context = {}
58
59 context['user_permissions'] = set(request.user.get_all_permissions())
60
61 # Check if we need approval count to display in template sidebar badge
62 if request.user.has_perm('approval.view_membershipapproval'):
63 context['approval_pending'] = MembershipApproval.objects.filter(
64 processed=False).count()
65
66 # Check if there exists a batch in inventory that has expired
67 if request.user.has_perm('inventory.view_item'):
68 if Batch.objects.filter(expiration_date__lt=date.today()):
69 context['inventory_expired'] = True
70
71 if request.user.has_perm('posters.view_poster'):
72 if Poster.objects.filter(assigned_to=None) or Poster.objects.filter(assigned_to=request.user):
73 context['poster_orders'] = Poster.objects.filter(assigned_to=None).count()
74 context['poster_orders'] += Poster.objects.filter(assigned_to=request.user, finished=False).count()
75
76 # Check if we have any unhandled images pending crop and save
77 if request.user.has_perm('gallery.view_unhandledimage'):
78 context['unhandled_images'] = UnhandledImage.objects.all()
79
80 return context
81
82
83 # Mixin for Class Based Views
84 class DashboardMixin(object):
85 """
86 The DashboardMixin sets up the needed context data, as well as performs
87 generic access checks.
88 """
89
90 def dispatch(self, request, *args, **kwargs):
91 """
92 Hooks into the dispatch cycle, checking whether or not the currently
93 logged in user has access to the dashboard in general.
94 :param request: Django Request object
95 :param args: Positional arguments
96 :param kwargs: Keyword arguments
97 :return: Invocation of superclass dispatch
98 """
99
100 if not has_access(request):
101 raise PermissionDenied
102
103 return super(DashboardMixin, self).dispatch(request, *args, **kwargs)
104
105 def get_context_data(self, **kwargs):
106 """
107 Sets context data on superclass, before populating it further with
108 the context data needed by dashboard.
109 :param kwargs: Keyword arguments
110 :return: A context dictionary
111 """
112
113 context = super(DashboardMixin, self).get_context_data(**kwargs)
114 context.update(get_base_context(self.request))
115
116 return context
117
118
119 class DashboardPermissionMixin(DashboardMixin, PermissionRequiredMixin):
120 """
121 DashboardPermissionMixin combines the DashboardMixin with Django
122 Guardian's permission based mixin, rendering a 403 Unauthorized
123 template if the currently logged in user is lacking appropriate
124 permissions to access a certain view.
125 """
126
127 return_403 = True
128
[end of apps/dashboard/tools.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/apps/dashboard/chunks/dashboard/views.py b/apps/dashboard/chunks/dashboard/views.py
--- a/apps/dashboard/chunks/dashboard/views.py
+++ b/apps/dashboard/chunks/dashboard/views.py
@@ -25,12 +25,6 @@
fields = ('key', 'description', 'content')
permission_required = 'chunks.add_chunk'
- def get_object(self, queryset=None):
- # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,
- # an issue with regards to guardian's PermissionMixin working with CreateView -
- # where it requires an object even in a CreateView.
- return None
-
def get_success_url(self):
return reverse('chunk-dashboard:update', args=(self.object.id,))
diff --git a/apps/dashboard/tools.py b/apps/dashboard/tools.py
--- a/apps/dashboard/tools.py
+++ b/apps/dashboard/tools.py
@@ -124,4 +124,12 @@
permissions to access a certain view.
"""
+ def get_permission_object(self, *args, **kwargs):
+ """
+ By default PermissionRequiredMixin works with object permissions.
+ By returning None we force guardian to only check if the user
+ has the proper permission.
+ """
+ return None
+
return_403 = True
diff --git a/apps/webshop/dashboard/views.py b/apps/webshop/dashboard/views.py
--- a/apps/webshop/dashboard/views.py
+++ b/apps/webshop/dashboard/views.py
@@ -42,10 +42,6 @@
template_name = 'webshop/dashboard/category_update.html'
permission_required = 'webshop.add_category'
- def get_object(self, *args, **kwargs):
- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195
- return None
-
def get_success_url(self):
return reverse('dashboard-webshop:categories')
@@ -94,10 +90,6 @@
product.category = category
return super(ProductCreate, self).form_valid(form)
- def get_object(self, *args, **kwargs):
- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195
- return None
-
def get_success_url(self):
return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})
|
{"golden_diff": "diff --git a/apps/dashboard/chunks/dashboard/views.py b/apps/dashboard/chunks/dashboard/views.py\n--- a/apps/dashboard/chunks/dashboard/views.py\n+++ b/apps/dashboard/chunks/dashboard/views.py\n@@ -25,12 +25,6 @@\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.add_chunk'\n \n- def get_object(self, queryset=None):\n- # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,\n- # an issue with regards to guardian's PermissionMixin working with CreateView -\n- # where it requires an object even in a CreateView.\n- return None\n-\n def get_success_url(self):\n return reverse('chunk-dashboard:update', args=(self.object.id,))\n \ndiff --git a/apps/dashboard/tools.py b/apps/dashboard/tools.py\n--- a/apps/dashboard/tools.py\n+++ b/apps/dashboard/tools.py\n@@ -124,4 +124,12 @@\n permissions to access a certain view.\n \"\"\"\n \n+ def get_permission_object(self, *args, **kwargs):\n+ \"\"\"\n+ By default PermissionRequiredMixin works with object permissions.\n+ By returning None we force guardian to only check if the user\n+ has the proper permission.\n+ \"\"\"\n+ return None\n+\n return_403 = True\ndiff --git a/apps/webshop/dashboard/views.py b/apps/webshop/dashboard/views.py\n--- a/apps/webshop/dashboard/views.py\n+++ b/apps/webshop/dashboard/views.py\n@@ -42,10 +42,6 @@\n template_name = 'webshop/dashboard/category_update.html'\n permission_required = 'webshop.add_category'\n \n- def get_object(self, *args, **kwargs):\n- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n- return None\n-\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n \n@@ -94,10 +90,6 @@\n product.category = category\n return super(ProductCreate, self).form_valid(form)\n \n- def get_object(self, *args, **kwargs):\n- # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n- return None\n-\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})\n", "issue": "Make sure people with `webshop.view_model` can view detail views in dashboard\nUsers with access to webshop dashboard cannot get a list of products (dashboard -> webshop -> kategorier -> click on a category), nor view the details of an order (dashboard -> webshop -> bestillinger -> click on an order)\n\n", "before_files": [{"content": "import logging\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.views.generic import (CreateView, DeleteView, DetailView, ListView, TemplateView,\n UpdateView)\nfrom taggit.models import TaggedItem\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\nfrom apps.gallery.models import ResponsiveImage\nfrom apps.webshop.models import Category, OrderLine, Product\n\nlogger = logging.getLogger(__name__)\n\n\nclass Overview(DashboardPermissionMixin, TemplateView):\n template_name = 'webshop/dashboard/index.html'\n permission_required = 'webshop.view_category'\n\n\nclass Categories(DashboardPermissionMixin, TemplateView):\n model = Category\n template_name = 'webshop/dashboard/categories.html'\n permission_required = 'webshop.change_category'\n\n def get_context_data(self, *args, **kwargs):\n context = super(Categories, self).get_context_data(*args, **kwargs)\n context['categories'] = Category.objects.all().prefetch_related('products')\n return context\n\n\nclass CategoryView(DashboardPermissionMixin, DetailView):\n model = Category\n template_name = 'webshop/dashboard/category.html'\n permission_required = 'webshop.view_category'\n\n\nclass CategoryCreate(DashboardPermissionMixin, CreateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n permission_required = 'webshop.add_category'\n\n def get_object(self, *args, **kwargs):\n # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n return None\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass CategoryUpdate(DashboardPermissionMixin, UpdateView):\n model = Category\n fields = ['name', 'slug']\n template_name = 'webshop/dashboard/category_update.html'\n context_object_name = 'category'\n permission_required = 'webshop.change_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.slug})\n\n\nclass CategoryDelete(DashboardPermissionMixin, DeleteView):\n model = Category\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_category'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:categories')\n\n\nclass ProductView(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/product.html'\n permission_required = 'webshop.view_product'\n\n\nclass ProductCreate(DashboardPermissionMixin, CreateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n permission_required = 'webshop.add_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductCreate, self).get_context_data(*args, **kwargs)\n context['category'] = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n return context\n\n def form_valid(self, form):\n product = form.save(commit=False)\n # Setting foreign key\n category = get_object_or_404(Category, slug=self.kwargs.get('category_slug'))\n product.category = category\n return super(ProductCreate, self).form_valid(form)\n\n def get_object(self, *args, **kwargs):\n # django-guardian hack https://github.com/django-guardian/django-guardian/issues/195\n return None\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.kwargs.get('category_slug')})\n\n\nclass ProductUpdate(DashboardPermissionMixin, UpdateView):\n model = Product\n fields = ['name', 'slug', 'short', 'description', 'price', 'stock']\n template_name = 'webshop/dashboard/product_update.html'\n context_object_name = 'product'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductUpdate, self).get_context_data(*args, **kwargs)\n context['category'] = self.object.category\n return context\n\n def get_success_url(self):\n return reverse('dashboard-webshop:product', kwargs={'slug': self.object.slug})\n\n\nclass ProductDelete(DashboardPermissionMixin, DeleteView):\n model = Product\n template_name = 'webshop/dashboard/delete.html'\n permission_required = 'webshop.delete_product'\n\n def get_success_url(self):\n return reverse('dashboard-webshop:category', kwargs={'slug': self.object.category.slug})\n\n\nclass ProductImage(DashboardPermissionMixin, DetailView):\n model = Product\n template_name = 'webshop/dashboard/image.html'\n permission_required = 'webshop.change_product'\n\n def get_context_data(self, *args, **kwargs):\n context = super(ProductImage, self).get_context_data(*args, **kwargs)\n # Filter out potential ResponsiveImage objects that have orphan file references\n images = ResponsiveImage.objects.all().order_by('-timestamp')[:15]\n context['images'] = [i for i in images if i.file_status_ok()]\n\n context['tags'] = sorted(set(tag.tag.name for tag in TaggedItem.objects.filter(\n content_type=ContentType.objects.get_for_model(ResponsiveImage)\n ).order_by('tag__name')))\n\n return context\n\n def post(self, request, *args, **kwargs):\n pass\n\n\nclass Orders(DashboardPermissionMixin, ListView):\n template_name = 'webshop/dashboard/orders.html'\n permission_required = 'webshop.view_order'\n queryset = OrderLine.objects.filter(paid=True)\n context_object_name = 'orders'\n\n\nclass Order(DashboardPermissionMixin, DetailView):\n model = OrderLine\n template_name = 'webshop/dashboard/order.html'\n permission_required = 'webshop.change_order_line'\n context_object_name = 'order'\n\n\nclass OrderDeliver(DashboardPermissionMixin, DetailView):\n model = OrderLine\n permission_required = 'webshop.change_order_line'\n\n def post(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n if not self.object.delivered:\n self.object.delivered = True\n self.object.save()\n return self.get(*args, **kwargs)\n\n def get(self, *args, **kwargs):\n super(OrderDeliver, self).get(*args, **kwargs)\n return redirect('dashboard-webshop:order', pk=self.object.pk)\n", "path": "apps/webshop/dashboard/views.py"}, {"content": "from chunks.models import Chunk\nfrom django.core.urlresolvers import reverse, reverse_lazy\nfrom django.views.generic import CreateView, DeleteView, ListView, UpdateView\n\nfrom apps.dashboard.tools import DashboardPermissionMixin\n\n\nclass ChunkList(DashboardPermissionMixin, ListView):\n model = Chunk\n queryset = Chunk.objects.all()\n context_object_name = 'chunks'\n permission_required = 'chunks.add_chunk'\n\n\nclass ChunkUpdate(DashboardPermissionMixin, UpdateView):\n model = Chunk\n context_object_name = 'chunk'\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.change_chunk'\n success_url = reverse_lazy('chunk-dashboard:list')\n\n\nclass ChunkCreate(DashboardPermissionMixin, CreateView):\n model = Chunk\n fields = ('key', 'description', 'content')\n permission_required = 'chunks.add_chunk'\n\n def get_object(self, queryset=None):\n # Overridden to work around https://github.com/django-guardian/django-guardian/issues/146,\n # an issue with regards to guardian's PermissionMixin working with CreateView -\n # where it requires an object even in a CreateView.\n return None\n\n def get_success_url(self):\n return reverse('chunk-dashboard:update', args=(self.object.id,))\n\n\nclass ChunkDelete(DashboardPermissionMixin, DeleteView):\n model = Chunk\n permission_required = ('chunks.delete_chunk',)\n success_url = reverse_lazy('chunk-dashboard:list')\n", "path": "apps/dashboard/chunks/dashboard/views.py"}, {"content": "# -*- encoding: utf-8 -*-\n\nfrom datetime import date\n\nfrom django.contrib.auth.models import Group\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nfrom guardian.mixins import PermissionRequiredMixin\n\nfrom apps.approval.models import MembershipApproval\nfrom apps.gallery.models import UnhandledImage\nfrom apps.inventory.models import Batch\nfrom apps.posters.models import Poster\n\n\ndef has_access(request):\n \"\"\"\n This helper method does a basic check to see if the logged in user\n has access to the dashboard.\n\n We might add additional checks here later.\n \"\"\"\n\n if request.user.is_superuser:\n return True\n\n try:\n committees = Group.objects.get(name='Komiteer')\n except ObjectDoesNotExist:\n committees = None\n\n if committees and committees in request.user.groups.all():\n return True\n\n return False\n\n\ndef check_access_or_403(request):\n \"\"\"\n Checks if a user bundled in a request object has access using has_access\n tool function, if not raise a 403 exception\n \"\"\"\n if not has_access(request):\n raise PermissionDenied\n\n\ndef get_base_context(request):\n \"\"\"\n This function returns a dictionary with the proper context variables\n needed for given permission settings. Should be used as the initial\n context for every dashboard view. For example, it is used for rendering\n badges in the dashboard menu.\n\n Add your own if req.user.has_perm statements adding the context objects\n of that you need.\n \"\"\"\n\n context = {}\n\n context['user_permissions'] = set(request.user.get_all_permissions())\n\n # Check if we need approval count to display in template sidebar badge\n if request.user.has_perm('approval.view_membershipapproval'):\n context['approval_pending'] = MembershipApproval.objects.filter(\n processed=False).count()\n\n # Check if there exists a batch in inventory that has expired\n if request.user.has_perm('inventory.view_item'):\n if Batch.objects.filter(expiration_date__lt=date.today()):\n context['inventory_expired'] = True\n\n if request.user.has_perm('posters.view_poster'):\n if Poster.objects.filter(assigned_to=None) or Poster.objects.filter(assigned_to=request.user):\n context['poster_orders'] = Poster.objects.filter(assigned_to=None).count()\n context['poster_orders'] += Poster.objects.filter(assigned_to=request.user, finished=False).count()\n\n # Check if we have any unhandled images pending crop and save\n if request.user.has_perm('gallery.view_unhandledimage'):\n context['unhandled_images'] = UnhandledImage.objects.all()\n\n return context\n\n\n# Mixin for Class Based Views\nclass DashboardMixin(object):\n \"\"\"\n The DashboardMixin sets up the needed context data, as well as performs\n generic access checks.\n \"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n Hooks into the dispatch cycle, checking whether or not the currently\n logged in user has access to the dashboard in general.\n :param request: Django Request object\n :param args: Positional arguments\n :param kwargs: Keyword arguments\n :return: Invocation of superclass dispatch\n \"\"\"\n\n if not has_access(request):\n raise PermissionDenied\n\n return super(DashboardMixin, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Sets context data on superclass, before populating it further with\n the context data needed by dashboard.\n :param kwargs: Keyword arguments\n :return: A context dictionary\n \"\"\"\n\n context = super(DashboardMixin, self).get_context_data(**kwargs)\n context.update(get_base_context(self.request))\n\n return context\n\n\nclass DashboardPermissionMixin(DashboardMixin, PermissionRequiredMixin):\n \"\"\"\n DashboardPermissionMixin combines the DashboardMixin with Django\n Guardian's permission based mixin, rendering a 403 Unauthorized\n template if the currently logged in user is lacking appropriate\n permissions to access a certain view.\n \"\"\"\n\n return_403 = True\n", "path": "apps/dashboard/tools.py"}]}
| 4,026 | 531 |
gh_patches_debug_20412
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1412
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Swarm tests fail if host has multiple network interfaces
```
__________________ ERROR at setup of ServiceTest.test_create ___________________
tests/integration/models_services_test.py:14: in setUpClass
client.swarm.init(listen_addr=helpers.swarm_listen_addr())
docker/models/swarm.py:96: in init
self.client.api.init_swarm(**init_kwargs)
docker/utils/decorators.py:35: in wrapper
return f(self, *args, **kwargs)
docker/api/swarm.py:102: in init_swarm
self._raise_for_status(response)
docker/api/client.py:208: in _raise_for_status
raise create_api_error_from_http_exception(e)
docker/errors.py:30: in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
E APIError: 500 Server Error: Internal Server Error for url: http://172.17.0.1:12375/v1.24/swarm/init ("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (10.0.2.15 on enp0s3 and 192.168.48.11 on enp0s8)")
```
</issue>
<code>
[start of docker/models/swarm.py]
1 from docker.api import APIClient
2 from docker.errors import APIError
3 from docker.types import SwarmSpec
4 from .resource import Model
5
6
7 class Swarm(Model):
8 """
9 The server's Swarm state. This a singleton that must be reloaded to get
10 the current state of the Swarm.
11 """
12 def __init__(self, *args, **kwargs):
13 super(Swarm, self).__init__(*args, **kwargs)
14 if self.client:
15 try:
16 self.reload()
17 except APIError as e:
18 # FIXME: https://github.com/docker/docker/issues/29192
19 if e.response.status_code not in (406, 503):
20 raise
21
22 @property
23 def version(self):
24 """
25 The version number of the swarm. If this is not the same as the
26 server, the :py:meth:`update` function will not work and you will
27 need to call :py:meth:`reload` before calling it again.
28 """
29 return self.attrs.get('Version').get('Index')
30
31 def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
32 force_new_cluster=False, swarm_spec=None, **kwargs):
33 """
34 Initialize a new swarm on this Engine.
35
36 Args:
37 advertise_addr (str): Externally reachable address advertised to
38 other nodes. This can either be an address/port combination in
39 the form ``192.168.1.1:4567``, or an interface followed by a
40 port number, like ``eth0:4567``. If the port number is omitted,
41 the port number from the listen address is used.
42
43 If not specified, it will be automatically detected when
44 possible.
45 listen_addr (str): Listen address used for inter-manager
46 communication, as well as determining the networking interface
47 used for the VXLAN Tunnel Endpoint (VTEP). This can either be
48 an address/port combination in the form ``192.168.1.1:4567``,
49 or an interface followed by a port number, like ``eth0:4567``.
50 If the port number is omitted, the default swarm listening port
51 is used. Default: ``0.0.0.0:2377``
52 force_new_cluster (bool): Force creating a new Swarm, even if
53 already part of one. Default: False
54 task_history_retention_limit (int): Maximum number of tasks
55 history stored.
56 snapshot_interval (int): Number of logs entries between snapshot.
57 keep_old_snapshots (int): Number of snapshots to keep beyond the
58 current snapshot.
59 log_entries_for_slow_followers (int): Number of log entries to
60 keep around to sync up slow followers after a snapshot is
61 created.
62 heartbeat_tick (int): Amount of ticks (in seconds) between each
63 heartbeat.
64 election_tick (int): Amount of ticks (in seconds) needed without a
65 leader to trigger a new election.
66 dispatcher_heartbeat_period (int): The delay for an agent to send
67 a heartbeat to the dispatcher.
68 node_cert_expiry (int): Automatic expiry for nodes certificates.
69 external_ca (dict): Configuration for forwarding signing requests
70 to an external certificate authority. Use
71 ``docker.types.SwarmExternalCA``.
72 name (string): Swarm's name
73
74 Returns:
75 ``True`` if the request went through.
76
77 Raises:
78 :py:class:`docker.errors.APIError`
79 If the server returns an error.
80
81 Example:
82
83 >>> client.swarm.init(
84 advertise_addr='eth0', listen_addr='0.0.0.0:5000',
85 force_new_cluster=False, snapshot_interval=5000,
86 log_entries_for_slow_followers=1200
87 )
88
89 """
90 init_kwargs = {}
91 for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:
92 if arg in kwargs:
93 init_kwargs[arg] = kwargs[arg]
94 del kwargs[arg]
95 init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
96 self.client.api.init_swarm(**init_kwargs)
97 self.reload()
98
99 def join(self, *args, **kwargs):
100 return self.client.api.join_swarm(*args, **kwargs)
101 join.__doc__ = APIClient.join_swarm.__doc__
102
103 def leave(self, *args, **kwargs):
104 return self.client.api.leave_swarm(*args, **kwargs)
105 leave.__doc__ = APIClient.leave_swarm.__doc__
106
107 def reload(self):
108 """
109 Inspect the swarm on the server and store the response in
110 :py:attr:`attrs`.
111
112 Raises:
113 :py:class:`docker.errors.APIError`
114 If the server returns an error.
115 """
116 self.attrs = self.client.api.inspect_swarm()
117
118 def update(self, rotate_worker_token=False, rotate_manager_token=False,
119 **kwargs):
120 """
121 Update the swarm's configuration.
122
123 It takes the same arguments as :py:meth:`init`, except
124 ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
125 addition, it takes these arguments:
126
127 Args:
128 rotate_worker_token (bool): Rotate the worker join token. Default:
129 ``False``.
130 rotate_manager_token (bool): Rotate the manager join token.
131 Default: ``False``.
132
133 Raises:
134 :py:class:`docker.errors.APIError`
135 If the server returns an error.
136
137 """
138 # this seems to have to be set
139 if kwargs.get('node_cert_expiry') is None:
140 kwargs['node_cert_expiry'] = 7776000000000000
141
142 return self.client.api.update_swarm(
143 version=self.version,
144 swarm_spec=SwarmSpec(**kwargs),
145 rotate_worker_token=rotate_worker_token,
146 rotate_manager_token=rotate_manager_token
147 )
148
[end of docker/models/swarm.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docker/models/swarm.py b/docker/models/swarm.py
--- a/docker/models/swarm.py
+++ b/docker/models/swarm.py
@@ -29,7 +29,7 @@
return self.attrs.get('Version').get('Index')
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
- force_new_cluster=False, swarm_spec=None, **kwargs):
+ force_new_cluster=False, **kwargs):
"""
Initialize a new swarm on this Engine.
@@ -87,11 +87,11 @@
)
"""
- init_kwargs = {}
- for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:
- if arg in kwargs:
- init_kwargs[arg] = kwargs[arg]
- del kwargs[arg]
+ init_kwargs = {
+ 'advertise_addr': advertise_addr,
+ 'listen_addr': listen_addr,
+ 'force_new_cluster': force_new_cluster
+ }
init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)
self.client.api.init_swarm(**init_kwargs)
self.reload()
|
{"golden_diff": "diff --git a/docker/models/swarm.py b/docker/models/swarm.py\n--- a/docker/models/swarm.py\n+++ b/docker/models/swarm.py\n@@ -29,7 +29,7 @@\n return self.attrs.get('Version').get('Index')\n \n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n- force_new_cluster=False, swarm_spec=None, **kwargs):\n+ force_new_cluster=False, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n \n@@ -87,11 +87,11 @@\n )\n \n \"\"\"\n- init_kwargs = {}\n- for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:\n- if arg in kwargs:\n- init_kwargs[arg] = kwargs[arg]\n- del kwargs[arg]\n+ init_kwargs = {\n+ 'advertise_addr': advertise_addr,\n+ 'listen_addr': listen_addr,\n+ 'force_new_cluster': force_new_cluster\n+ }\n init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n", "issue": "Swarm tests fail if host has multiple network interfaces\n```\r\n__________________ ERROR at setup of ServiceTest.test_create ___________________\r\ntests/integration/models_services_test.py:14: in setUpClass\r\n client.swarm.init(listen_addr=helpers.swarm_listen_addr())\r\ndocker/models/swarm.py:96: in init\r\n self.client.api.init_swarm(**init_kwargs)\r\ndocker/utils/decorators.py:35: in wrapper\r\n return f(self, *args, **kwargs)\r\ndocker/api/swarm.py:102: in init_swarm\r\n self._raise_for_status(response)\r\ndocker/api/client.py:208: in _raise_for_status\r\n raise create_api_error_from_http_exception(e)\r\ndocker/errors.py:30: in create_api_error_from_http_exception\r\n raise cls(e, response=response, explanation=explanation)\r\nE APIError: 500 Server Error: Internal Server Error for url: http://172.17.0.1:12375/v1.24/swarm/init (\"could not choose an IP address to advertise since this system has multiple addresses on different interfaces (10.0.2.15 on enp0s3 and 192.168.48.11 on enp0s8)\")\r\n```\r\n\n", "before_files": [{"content": "from docker.api import APIClient\nfrom docker.errors import APIError\nfrom docker.types import SwarmSpec\nfrom .resource import Model\n\n\nclass Swarm(Model):\n \"\"\"\n The server's Swarm state. This a singleton that must be reloaded to get\n the current state of the Swarm.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Swarm, self).__init__(*args, **kwargs)\n if self.client:\n try:\n self.reload()\n except APIError as e:\n # FIXME: https://github.com/docker/docker/issues/29192\n if e.response.status_code not in (406, 503):\n raise\n\n @property\n def version(self):\n \"\"\"\n The version number of the swarm. If this is not the same as the\n server, the :py:meth:`update` function will not work and you will\n need to call :py:meth:`reload` before calling it again.\n \"\"\"\n return self.attrs.get('Version').get('Index')\n\n def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',\n force_new_cluster=False, swarm_spec=None, **kwargs):\n \"\"\"\n Initialize a new swarm on this Engine.\n\n Args:\n advertise_addr (str): Externally reachable address advertised to\n other nodes. This can either be an address/port combination in\n the form ``192.168.1.1:4567``, or an interface followed by a\n port number, like ``eth0:4567``. If the port number is omitted,\n the port number from the listen address is used.\n\n If not specified, it will be automatically detected when\n possible.\n listen_addr (str): Listen address used for inter-manager\n communication, as well as determining the networking interface\n used for the VXLAN Tunnel Endpoint (VTEP). This can either be\n an address/port combination in the form ``192.168.1.1:4567``,\n or an interface followed by a port number, like ``eth0:4567``.\n If the port number is omitted, the default swarm listening port\n is used. Default: ``0.0.0.0:2377``\n force_new_cluster (bool): Force creating a new Swarm, even if\n already part of one. Default: False\n task_history_retention_limit (int): Maximum number of tasks\n history stored.\n snapshot_interval (int): Number of logs entries between snapshot.\n keep_old_snapshots (int): Number of snapshots to keep beyond the\n current snapshot.\n log_entries_for_slow_followers (int): Number of log entries to\n keep around to sync up slow followers after a snapshot is\n created.\n heartbeat_tick (int): Amount of ticks (in seconds) between each\n heartbeat.\n election_tick (int): Amount of ticks (in seconds) needed without a\n leader to trigger a new election.\n dispatcher_heartbeat_period (int): The delay for an agent to send\n a heartbeat to the dispatcher.\n node_cert_expiry (int): Automatic expiry for nodes certificates.\n external_ca (dict): Configuration for forwarding signing requests\n to an external certificate authority. Use\n ``docker.types.SwarmExternalCA``.\n name (string): Swarm's name\n\n Returns:\n ``True`` if the request went through.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n\n >>> client.swarm.init(\n advertise_addr='eth0', listen_addr='0.0.0.0:5000',\n force_new_cluster=False, snapshot_interval=5000,\n log_entries_for_slow_followers=1200\n )\n\n \"\"\"\n init_kwargs = {}\n for arg in ['advertise_addr', 'listen_addr', 'force_new_cluster']:\n if arg in kwargs:\n init_kwargs[arg] = kwargs[arg]\n del kwargs[arg]\n init_kwargs['swarm_spec'] = SwarmSpec(**kwargs)\n self.client.api.init_swarm(**init_kwargs)\n self.reload()\n\n def join(self, *args, **kwargs):\n return self.client.api.join_swarm(*args, **kwargs)\n join.__doc__ = APIClient.join_swarm.__doc__\n\n def leave(self, *args, **kwargs):\n return self.client.api.leave_swarm(*args, **kwargs)\n leave.__doc__ = APIClient.leave_swarm.__doc__\n\n def reload(self):\n \"\"\"\n Inspect the swarm on the server and store the response in\n :py:attr:`attrs`.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n self.attrs = self.client.api.inspect_swarm()\n\n def update(self, rotate_worker_token=False, rotate_manager_token=False,\n **kwargs):\n \"\"\"\n Update the swarm's configuration.\n\n It takes the same arguments as :py:meth:`init`, except\n ``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In\n addition, it takes these arguments:\n\n Args:\n rotate_worker_token (bool): Rotate the worker join token. Default:\n ``False``.\n rotate_manager_token (bool): Rotate the manager join token.\n Default: ``False``.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n \"\"\"\n # this seems to have to be set\n if kwargs.get('node_cert_expiry') is None:\n kwargs['node_cert_expiry'] = 7776000000000000\n\n return self.client.api.update_swarm(\n version=self.version,\n swarm_spec=SwarmSpec(**kwargs),\n rotate_worker_token=rotate_worker_token,\n rotate_manager_token=rotate_manager_token\n )\n", "path": "docker/models/swarm.py"}]}
| 2,447 | 253 |
gh_patches_debug_1241
|
rasdani/github-patches
|
git_diff
|
crytic__slither-1108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Infinite loop in RTLO detector
### What happened?
Slither hangs on this code indefinitely
### Can you share code with us to reproduce this bug?
https://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol
### Version
0.8.2
### Relevant log output
_No response_
</issue>
<code>
[start of slither/detectors/source/rtlo.py]
1 import re
2 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification
3
4
5 class RightToLeftOverride(AbstractDetector):
6 """
7 Detect the usage of a Right-To-Left-Override (U+202E) character
8 """
9
10 ARGUMENT = "rtlo"
11 HELP = "Right-To-Left-Override control character is used"
12 IMPACT = DetectorClassification.HIGH
13 CONFIDENCE = DetectorClassification.HIGH
14
15 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character"
16 WIKI_TITLE = "Right-to-Left-Override character"
17 WIKI_DESCRIPTION = "An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`."
18
19 # region wiki_exploit_scenario
20 WIKI_EXPLOIT_SCENARIO = """
21 ```solidity
22 contract Token
23 {
24
25 address payable o; // owner
26 mapping(address => uint) tokens;
27
28 function withdraw() external returns(uint)
29 {
30 uint amount = tokens[msg.sender];
31 address payable d = msg.sender;
32 tokens[msg.sender] = 0;
33 _withdraw(/*owner/*noitanitsed*/ d, o/*
34 /*value */, amount);
35 }
36
37 function _withdraw(address payable fee_receiver, address payable destination, uint value) internal
38 {
39 fee_receiver.transfer(1);
40 destination.transfer(value);
41 }
42 }
43 ```
44
45 `Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.
46
47 """
48 # endregion wiki_exploit_scenario
49
50 WIKI_RECOMMENDATION = "Special control characters must not be allowed."
51
52 RTLO_CHARACTER_ENCODED = "\u202e".encode("utf-8")
53 STANDARD_JSON = False
54
55 def _detect(self):
56 results = []
57 pattern = re.compile(".*\u202e.*".encode("utf-8"))
58
59 for filename, source in self.slither.source_code.items():
60 # Attempt to find all RTLO characters in this source file.
61 original_source_encoded = source.encode("utf-8")
62 start_index = 0
63
64 # Keep searching all file contents for the character.
65 while True:
66 source_encoded = original_source_encoded[start_index:]
67 result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)
68
69 # If we couldn't find the character in the remainder of source, stop.
70 if result_index == -1:
71 break
72
73 # We found another instance of the character, define our output
74 idx = start_index + result_index
75
76 relative = self.slither.crytic_compile.filename_lookup(filename).relative
77 info = f"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\n"
78
79 # We have a patch, so pattern.find will return at least one result
80
81 info += f"\t- {pattern.findall(source_encoded)[0]}\n"
82 res = self.generate_result(info)
83 res.add_other(
84 "rtlo-character",
85 (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),
86 self.compilation_unit,
87 )
88 results.append(res)
89
90 # Advance the start index for the next iteration
91 start_index = result_index + 1
92
93 return results
94
[end of slither/detectors/source/rtlo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py
--- a/slither/detectors/source/rtlo.py
+++ b/slither/detectors/source/rtlo.py
@@ -88,6 +88,6 @@
results.append(res)
# Advance the start index for the next iteration
- start_index = result_index + 1
+ start_index = idx + 1
return results
|
{"golden_diff": "diff --git a/slither/detectors/source/rtlo.py b/slither/detectors/source/rtlo.py\n--- a/slither/detectors/source/rtlo.py\n+++ b/slither/detectors/source/rtlo.py\n@@ -88,6 +88,6 @@\n results.append(res)\n \n # Advance the start index for the next iteration\n- start_index = result_index + 1\n+ start_index = idx + 1\n \n return results\n", "issue": "[Bug]: Infinite loop in RTLO detector\n### What happened?\n\nSlither hangs on this code indefinitely\n\n### Can you share code with us to reproduce this bug?\n\nhttps://github.com/ethereum/solidity/blob/develop/test/libsolidity/syntaxTests/comments/multiline_unicode_direction_override_5.sol\n\n### Version\n\n0.8.2\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "import re\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass RightToLeftOverride(AbstractDetector):\n \"\"\"\n Detect the usage of a Right-To-Left-Override (U+202E) character\n \"\"\"\n\n ARGUMENT = \"rtlo\"\n HELP = \"Right-To-Left-Override control character is used\"\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#right-to-left-override-character\"\n WIKI_TITLE = \"Right-to-Left-Override character\"\n WIKI_DESCRIPTION = \"An attacker can manipulate the logic of the contract by using a right-to-left-override character (`U+202E)`.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\ncontract Token\n{\n\n address payable o; // owner\n mapping(address => uint) tokens;\n\n function withdraw() external returns(uint)\n {\n uint amount = tokens[msg.sender];\n address payable d = msg.sender;\n tokens[msg.sender] = 0;\n _withdraw(/*owner\u202e/*noitanitsed*/ d, o/*\u202d\n\t\t /*value */, amount);\n }\n\n function _withdraw(address payable fee_receiver, address payable destination, uint value) internal\n {\n\t\tfee_receiver.transfer(1);\n\t\tdestination.transfer(value);\n }\n}\n```\n\n`Token` uses the right-to-left-override character when calling `_withdraw`. As a result, the fee is incorrectly sent to `msg.sender`, and the token balance is sent to the owner.\n\n\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Special control characters must not be allowed.\"\n\n RTLO_CHARACTER_ENCODED = \"\\u202e\".encode(\"utf-8\")\n STANDARD_JSON = False\n\n def _detect(self):\n results = []\n pattern = re.compile(\".*\\u202e.*\".encode(\"utf-8\"))\n\n for filename, source in self.slither.source_code.items():\n # Attempt to find all RTLO characters in this source file.\n original_source_encoded = source.encode(\"utf-8\")\n start_index = 0\n\n # Keep searching all file contents for the character.\n while True:\n source_encoded = original_source_encoded[start_index:]\n result_index = source_encoded.find(self.RTLO_CHARACTER_ENCODED)\n\n # If we couldn't find the character in the remainder of source, stop.\n if result_index == -1:\n break\n\n # We found another instance of the character, define our output\n idx = start_index + result_index\n\n relative = self.slither.crytic_compile.filename_lookup(filename).relative\n info = f\"{relative} contains a unicode right-to-left-override character at byte offset {idx}:\\n\"\n\n # We have a patch, so pattern.find will return at least one result\n\n info += f\"\\t- {pattern.findall(source_encoded)[0]}\\n\"\n res = self.generate_result(info)\n res.add_other(\n \"rtlo-character\",\n (filename, idx, len(self.RTLO_CHARACTER_ENCODED)),\n self.compilation_unit,\n )\n results.append(res)\n\n # Advance the start index for the next iteration\n start_index = result_index + 1\n\n return results\n", "path": "slither/detectors/source/rtlo.py"}]}
| 1,551 | 106 |
gh_patches_debug_5715
|
rasdani/github-patches
|
git_diff
|
huggingface__diffusers-1686
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
It is not possible to disable telemetry
### Describe the bug
Although the DISABLE_TELEMETRY and HF_HUB_OFFLINE env variables exist, setting them doesn't actually seem to disable telemetry.
https://github.com/huggingface/diffusers/blob/main/src/diffusers/hub_utils.py#L87
Pass is used, and /api/telemetry is prepended to the huggingface endpoint.
### Reproduction
- set DISABLE_TELEMETRY to True
- run stablediffusion from pretrained
- see `/api/telemetry/diffusers_from_pretrained` in the requests made to huggingface
### Logs
_No response_
### System Info
main branch
</issue>
<code>
[start of src/diffusers/hub_utils.py]
1 # coding=utf-8
2 # Copyright 2022 The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16
17 import os
18 import sys
19 from pathlib import Path
20 from typing import Dict, Optional, Union
21 from uuid import uuid4
22
23 import requests
24 from huggingface_hub import HfFolder, whoami
25
26 from . import __version__
27 from .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging
28 from .utils.import_utils import (
29 _flax_version,
30 _jax_version,
31 _onnxruntime_version,
32 _torch_version,
33 is_flax_available,
34 is_modelcards_available,
35 is_onnx_available,
36 is_torch_available,
37 )
38
39
40 if is_modelcards_available():
41 from modelcards import CardData, ModelCard
42
43
44 logger = logging.get_logger(__name__)
45
46
47 MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "utils" / "model_card_template.md"
48 SESSION_ID = uuid4().hex
49 HF_HUB_OFFLINE = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES
50 DISABLE_TELEMETRY = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES
51 HUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/"
52
53
54 def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
55 """
56 Formats a user-agent string with basic info about a request.
57 """
58 ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
59 if DISABLE_TELEMETRY:
60 return ua + "; telemetry/off"
61 if is_torch_available():
62 ua += f"; torch/{_torch_version}"
63 if is_flax_available():
64 ua += f"; jax/{_jax_version}"
65 ua += f"; flax/{_flax_version}"
66 if is_onnx_available():
67 ua += f"; onnxruntime/{_onnxruntime_version}"
68 # CI will set this value to True
69 if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
70 ua += "; is_ci/true"
71 if isinstance(user_agent, dict):
72 ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
73 elif isinstance(user_agent, str):
74 ua += "; " + user_agent
75 return ua
76
77
78 def send_telemetry(data: Dict, name: str):
79 """
80 Sends logs to the Hub telemetry endpoint.
81
82 Args:
83 data: the fields to track, e.g. {"example_name": "dreambooth"}
84 name: a unique name to differentiate the telemetry logs, e.g. "diffusers_examples" or "diffusers_notebooks"
85 """
86 if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
87 pass
88
89 headers = {"user-agent": http_user_agent(data)}
90 endpoint = HUGGINGFACE_CO_TELEMETRY + name
91 try:
92 r = requests.head(endpoint, headers=headers)
93 r.raise_for_status()
94 except Exception:
95 # We don't want to error in case of connection errors of any kind.
96 pass
97
98
99 def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
100 if token is None:
101 token = HfFolder.get_token()
102 if organization is None:
103 username = whoami(token)["name"]
104 return f"{username}/{model_id}"
105 else:
106 return f"{organization}/{model_id}"
107
108
109 def create_model_card(args, model_name):
110 if not is_modelcards_available:
111 raise ValueError(
112 "Please make sure to have `modelcards` installed when using the `create_model_card` function. You can"
113 " install the package with `pip install modelcards`."
114 )
115
116 if hasattr(args, "local_rank") and args.local_rank not in [-1, 0]:
117 return
118
119 hub_token = args.hub_token if hasattr(args, "hub_token") else None
120 repo_name = get_full_repo_name(model_name, token=hub_token)
121
122 model_card = ModelCard.from_template(
123 card_data=CardData( # Card metadata object that will be converted to YAML block
124 language="en",
125 license="apache-2.0",
126 library_name="diffusers",
127 tags=[],
128 datasets=args.dataset_name,
129 metrics=[],
130 ),
131 template_path=MODEL_CARD_TEMPLATE_PATH,
132 model_name=model_name,
133 repo_name=repo_name,
134 dataset_name=args.dataset_name if hasattr(args, "dataset_name") else None,
135 learning_rate=args.learning_rate,
136 train_batch_size=args.train_batch_size,
137 eval_batch_size=args.eval_batch_size,
138 gradient_accumulation_steps=args.gradient_accumulation_steps
139 if hasattr(args, "gradient_accumulation_steps")
140 else None,
141 adam_beta1=args.adam_beta1 if hasattr(args, "adam_beta1") else None,
142 adam_beta2=args.adam_beta2 if hasattr(args, "adam_beta2") else None,
143 adam_weight_decay=args.adam_weight_decay if hasattr(args, "adam_weight_decay") else None,
144 adam_epsilon=args.adam_epsilon if hasattr(args, "adam_epsilon") else None,
145 lr_scheduler=args.lr_scheduler if hasattr(args, "lr_scheduler") else None,
146 lr_warmup_steps=args.lr_warmup_steps if hasattr(args, "lr_warmup_steps") else None,
147 ema_inv_gamma=args.ema_inv_gamma if hasattr(args, "ema_inv_gamma") else None,
148 ema_power=args.ema_power if hasattr(args, "ema_power") else None,
149 ema_max_decay=args.ema_max_decay if hasattr(args, "ema_max_decay") else None,
150 mixed_precision=args.mixed_precision,
151 )
152
153 card_path = os.path.join(args.output_dir, "README.md")
154 model_card.save(card_path)
155
[end of src/diffusers/hub_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/diffusers/hub_utils.py b/src/diffusers/hub_utils.py
--- a/src/diffusers/hub_utils.py
+++ b/src/diffusers/hub_utils.py
@@ -84,7 +84,7 @@
name: a unique name to differentiate the telemetry logs, e.g. "diffusers_examples" or "diffusers_notebooks"
"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
- pass
+ return
headers = {"user-agent": http_user_agent(data)}
endpoint = HUGGINGFACE_CO_TELEMETRY + name
|
{"golden_diff": "diff --git a/src/diffusers/hub_utils.py b/src/diffusers/hub_utils.py\n--- a/src/diffusers/hub_utils.py\n+++ b/src/diffusers/hub_utils.py\n@@ -84,7 +84,7 @@\n name: a unique name to differentiate the telemetry logs, e.g. \"diffusers_examples\" or \"diffusers_notebooks\"\n \"\"\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n- pass\n+ return\n \n headers = {\"user-agent\": http_user_agent(data)}\n endpoint = HUGGINGFACE_CO_TELEMETRY + name\n", "issue": "It is not possible to disable telemetry\n### Describe the bug\n\nAlthough the DISABLE_TELEMETRY and HF_HUB_OFFLINE env variables exist, setting them doesn't actually seem to disable telemetry.\r\n\r\nhttps://github.com/huggingface/diffusers/blob/main/src/diffusers/hub_utils.py#L87\r\n\r\nPass is used, and /api/telemetry is prepended to the huggingface endpoint.\n\n### Reproduction\n\n- set DISABLE_TELEMETRY to True\r\n- run stablediffusion from pretrained\r\n- see `/api/telemetry/diffusers_from_pretrained` in the requests made to huggingface\n\n### Logs\n\n_No response_\n\n### System Info\n\nmain branch\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2022 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\nfrom uuid import uuid4\n\nimport requests\nfrom huggingface_hub import HfFolder, whoami\n\nfrom . import __version__\nfrom .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging\nfrom .utils.import_utils import (\n _flax_version,\n _jax_version,\n _onnxruntime_version,\n _torch_version,\n is_flax_available,\n is_modelcards_available,\n is_onnx_available,\n is_torch_available,\n)\n\n\nif is_modelcards_available():\n from modelcards import CardData, ModelCard\n\n\nlogger = logging.get_logger(__name__)\n\n\nMODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / \"utils\" / \"model_card_template.md\"\nSESSION_ID = uuid4().hex\nHF_HUB_OFFLINE = os.getenv(\"HF_HUB_OFFLINE\", \"\").upper() in ENV_VARS_TRUE_VALUES\nDISABLE_TELEMETRY = os.getenv(\"DISABLE_TELEMETRY\", \"\").upper() in ENV_VARS_TRUE_VALUES\nHUGGINGFACE_CO_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + \"/api/telemetry/\"\n\n\ndef http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua\n\n\ndef send_telemetry(data: Dict, name: str):\n \"\"\"\n Sends logs to the Hub telemetry endpoint.\n\n Args:\n data: the fields to track, e.g. {\"example_name\": \"dreambooth\"}\n name: a unique name to differentiate the telemetry logs, e.g. \"diffusers_examples\" or \"diffusers_notebooks\"\n \"\"\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n pass\n\n headers = {\"user-agent\": http_user_agent(data)}\n endpoint = HUGGINGFACE_CO_TELEMETRY + name\n try:\n r = requests.head(endpoint, headers=headers)\n r.raise_for_status()\n except Exception:\n # We don't want to error in case of connection errors of any kind.\n pass\n\n\ndef get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):\n if token is None:\n token = HfFolder.get_token()\n if organization is None:\n username = whoami(token)[\"name\"]\n return f\"{username}/{model_id}\"\n else:\n return f\"{organization}/{model_id}\"\n\n\ndef create_model_card(args, model_name):\n if not is_modelcards_available:\n raise ValueError(\n \"Please make sure to have `modelcards` installed when using the `create_model_card` function. You can\"\n \" install the package with `pip install modelcards`.\"\n )\n\n if hasattr(args, \"local_rank\") and args.local_rank not in [-1, 0]:\n return\n\n hub_token = args.hub_token if hasattr(args, \"hub_token\") else None\n repo_name = get_full_repo_name(model_name, token=hub_token)\n\n model_card = ModelCard.from_template(\n card_data=CardData( # Card metadata object that will be converted to YAML block\n language=\"en\",\n license=\"apache-2.0\",\n library_name=\"diffusers\",\n tags=[],\n datasets=args.dataset_name,\n metrics=[],\n ),\n template_path=MODEL_CARD_TEMPLATE_PATH,\n model_name=model_name,\n repo_name=repo_name,\n dataset_name=args.dataset_name if hasattr(args, \"dataset_name\") else None,\n learning_rate=args.learning_rate,\n train_batch_size=args.train_batch_size,\n eval_batch_size=args.eval_batch_size,\n gradient_accumulation_steps=args.gradient_accumulation_steps\n if hasattr(args, \"gradient_accumulation_steps\")\n else None,\n adam_beta1=args.adam_beta1 if hasattr(args, \"adam_beta1\") else None,\n adam_beta2=args.adam_beta2 if hasattr(args, \"adam_beta2\") else None,\n adam_weight_decay=args.adam_weight_decay if hasattr(args, \"adam_weight_decay\") else None,\n adam_epsilon=args.adam_epsilon if hasattr(args, \"adam_epsilon\") else None,\n lr_scheduler=args.lr_scheduler if hasattr(args, \"lr_scheduler\") else None,\n lr_warmup_steps=args.lr_warmup_steps if hasattr(args, \"lr_warmup_steps\") else None,\n ema_inv_gamma=args.ema_inv_gamma if hasattr(args, \"ema_inv_gamma\") else None,\n ema_power=args.ema_power if hasattr(args, \"ema_power\") else None,\n ema_max_decay=args.ema_max_decay if hasattr(args, \"ema_max_decay\") else None,\n mixed_precision=args.mixed_precision,\n )\n\n card_path = os.path.join(args.output_dir, \"README.md\")\n model_card.save(card_path)\n", "path": "src/diffusers/hub_utils.py"}]}
| 2,438 | 134 |
gh_patches_debug_28367
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-8909
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-3279] Remove usage of dbt.config in dbt/adapters
</issue>
<code>
[start of core/dbt/adapters/base/plugin.py]
1 from typing import List, Optional, Type
2
3 from dbt.adapters.base import Credentials
4 from dbt.exceptions import CompilationError
5 from dbt.adapters.protocol import AdapterProtocol
6
7
8 def project_name_from_path(include_path: str) -> str:
9 # avoid an import cycle
10 from dbt.config.project import PartialProject
11
12 partial = PartialProject.from_project_root(include_path)
13 if partial.project_name is None:
14 raise CompilationError(f"Invalid project at {include_path}: name not set!")
15 return partial.project_name
16
17
18 class AdapterPlugin:
19 """Defines the basic requirements for a dbt adapter plugin.
20
21 :param include_path: The path to this adapter plugin's root
22 :param dependencies: A list of adapter names that this adapter depends
23 upon.
24 """
25
26 def __init__(
27 self,
28 adapter: Type[AdapterProtocol],
29 credentials: Type[Credentials],
30 include_path: str,
31 dependencies: Optional[List[str]] = None,
32 ) -> None:
33
34 self.adapter: Type[AdapterProtocol] = adapter
35 self.credentials: Type[Credentials] = credentials
36 self.include_path: str = include_path
37 self.project_name: str = project_name_from_path(include_path)
38 self.dependencies: List[str]
39 if dependencies is None:
40 self.dependencies = []
41 else:
42 self.dependencies = dependencies
43
[end of core/dbt/adapters/base/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py
--- a/core/dbt/adapters/base/plugin.py
+++ b/core/dbt/adapters/base/plugin.py
@@ -1,20 +1,10 @@
from typing import List, Optional, Type
+from pathlib import Path
from dbt.adapters.base import Credentials
-from dbt.exceptions import CompilationError
from dbt.adapters.protocol import AdapterProtocol
-def project_name_from_path(include_path: str) -> str:
- # avoid an import cycle
- from dbt.config.project import PartialProject
-
- partial = PartialProject.from_project_root(include_path)
- if partial.project_name is None:
- raise CompilationError(f"Invalid project at {include_path}: name not set!")
- return partial.project_name
-
-
class AdapterPlugin:
"""Defines the basic requirements for a dbt adapter plugin.
@@ -29,12 +19,13 @@
credentials: Type[Credentials],
include_path: str,
dependencies: Optional[List[str]] = None,
+ project_name: Optional[str] = None,
) -> None:
self.adapter: Type[AdapterProtocol] = adapter
self.credentials: Type[Credentials] = credentials
self.include_path: str = include_path
- self.project_name: str = project_name_from_path(include_path)
+ self.project_name: str = project_name or f"dbt_{Path(include_path).name}"
self.dependencies: List[str]
if dependencies is None:
self.dependencies = []
|
{"golden_diff": "diff --git a/core/dbt/adapters/base/plugin.py b/core/dbt/adapters/base/plugin.py\n--- a/core/dbt/adapters/base/plugin.py\n+++ b/core/dbt/adapters/base/plugin.py\n@@ -1,20 +1,10 @@\n from typing import List, Optional, Type\n+from pathlib import Path\n \n from dbt.adapters.base import Credentials\n-from dbt.exceptions import CompilationError\n from dbt.adapters.protocol import AdapterProtocol\n \n \n-def project_name_from_path(include_path: str) -> str:\n- # avoid an import cycle\n- from dbt.config.project import PartialProject\n-\n- partial = PartialProject.from_project_root(include_path)\n- if partial.project_name is None:\n- raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n- return partial.project_name\n-\n-\n class AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n \n@@ -29,12 +19,13 @@\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n+ project_name: Optional[str] = None,\n ) -> None:\n \n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n- self.project_name: str = project_name_from_path(include_path)\n+ self.project_name: str = project_name or f\"dbt_{Path(include_path).name}\"\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n", "issue": "[CT-3279] Remove usage of dbt.config in dbt/adapters\n\n", "before_files": [{"content": "from typing import List, Optional, Type\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.exceptions import CompilationError\nfrom dbt.adapters.protocol import AdapterProtocol\n\n\ndef project_name_from_path(include_path: str) -> str:\n # avoid an import cycle\n from dbt.config.project import PartialProject\n\n partial = PartialProject.from_project_root(include_path)\n if partial.project_name is None:\n raise CompilationError(f\"Invalid project at {include_path}: name not set!\")\n return partial.project_name\n\n\nclass AdapterPlugin:\n \"\"\"Defines the basic requirements for a dbt adapter plugin.\n\n :param include_path: The path to this adapter plugin's root\n :param dependencies: A list of adapter names that this adapter depends\n upon.\n \"\"\"\n\n def __init__(\n self,\n adapter: Type[AdapterProtocol],\n credentials: Type[Credentials],\n include_path: str,\n dependencies: Optional[List[str]] = None,\n ) -> None:\n\n self.adapter: Type[AdapterProtocol] = adapter\n self.credentials: Type[Credentials] = credentials\n self.include_path: str = include_path\n self.project_name: str = project_name_from_path(include_path)\n self.dependencies: List[str]\n if dependencies is None:\n self.dependencies = []\n else:\n self.dependencies = dependencies\n", "path": "core/dbt/adapters/base/plugin.py"}]}
| 923 | 338 |
gh_patches_debug_27213
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-4561
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't use secret interpolation in change_hook_dialects?
Trying to do something like
```
c['www'] = dict(
port=8010,
change_hook_dialects=dict(
gitlab={
'secret': util.Secret('my-webhook-token')
},
),
plugins=dict(waterfall_view={}, console_view={}, grid_view={}))
```
fails; the webhook complains 'HTTP 400 Invalid secret'.
Adding the print
```
--- a/master/buildbot/www/hooks/gitlab.py
+++ b/master/buildbot/www/hooks/gitlab.py
@@ -164,6 +164,7 @@ class GitLabHandler(BaseHookHandler):
received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
received_secret = bytes2unicode(received_secret)
if received_secret != expected_secret:
+ log.msg("Wanted secret '%s' but got '%s'" % (expected_secret, received_secret))
raise ValueError("Invalid secret")
try:
content = request.content.read()
```
shows
```
...[_GenericHTTPChannelProtocol,0,10.10.169.29] Wanted secret 'Secret(my-webhook-token)' but got 'thesecret'
```
(Evidently master/buildbot/www/hooks/gitlab.py needs to render expected_secret?
https://github.com/buildbot/buildbot/issues/3627 has some tips on how to add that, but
not clear enough for me to follow at first try.)
</issue>
<code>
[start of master/buildbot/www/hooks/gitlab.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16
17 import json
18 import re
19
20 from dateutil.parser import parse as dateparse
21
22 from twisted.python import log
23
24 from buildbot.util import bytes2unicode
25 from buildbot.www.hooks.base import BaseHookHandler
26
27 _HEADER_EVENT = b'X-Gitlab-Event'
28 _HEADER_GITLAB_TOKEN = b'X-Gitlab-Token'
29
30
31 class GitLabHandler(BaseHookHandler):
32
33 def _process_change(self, payload, user, repo, repo_url, event,
34 codebase=None):
35 """
36 Consumes the JSON as a python object and actually starts the build.
37
38 :arguments:
39 payload
40 Python Object that represents the JSON sent by GitLab Service
41 Hook.
42 """
43 changes = []
44 refname = payload['ref']
45 # project name from http headers is empty for me, so get it from repository/name
46 project = payload['repository']['name']
47
48 # We only care about regular heads or tags
49 match = re.match(r"^refs/(heads|tags)/(.+)$", refname)
50 if not match:
51 log.msg("Ignoring refname `%s': Not a branch" % refname)
52 return changes
53
54 branch = match.group(2)
55 if payload.get('deleted'):
56 log.msg("Branch `%s' deleted, ignoring" % branch)
57 return changes
58
59 for commit in payload['commits']:
60 if not commit.get('distinct', True):
61 log.msg('Commit `%s` is a non-distinct commit, ignoring...' %
62 (commit['id'],))
63 continue
64
65 files = []
66 for kind in ('added', 'modified', 'removed'):
67 files.extend(commit.get(kind, []))
68
69 when_timestamp = dateparse(commit['timestamp'])
70
71 log.msg("New revision: %s" % commit['id'][:8])
72
73 change = {
74 'author': '%s <%s>' % (commit['author']['name'],
75 commit['author']['email']),
76 'files': files,
77 'comments': commit['message'],
78 'revision': commit['id'],
79 'when_timestamp': when_timestamp,
80 'branch': branch,
81 'revlink': commit['url'],
82 'repository': repo_url,
83 'project': project,
84 'category': event,
85 'properties': {
86 'event': event,
87 },
88 }
89
90 if codebase is not None:
91 change['codebase'] = codebase
92
93 changes.append(change)
94
95 return changes
96
97 def _process_merge_request_change(self, payload, event, codebase=None):
98 """
99 Consumes the merge_request JSON as a python object and turn it into a buildbot change.
100
101 :arguments:
102 payload
103 Python Object that represents the JSON sent by GitLab Service
104 Hook.
105 """
106 attrs = payload['object_attributes']
107 commit = attrs['last_commit']
108 when_timestamp = dateparse(commit['timestamp'])
109 # @todo provide and document a way to choose between http and ssh url
110 repo_url = attrs['target']['git_http_url']
111 # project name from http headers is empty for me, so get it from object_attributes/target/name
112 project = attrs['target']['name']
113
114 # Filter out uninteresting events
115 state = attrs['state']
116 if re.match('^(closed|merged|approved)$', state):
117 log.msg("GitLab MR#{}: Ignoring because state is {}".format(attrs['iid'], state))
118 return []
119 action = attrs['action']
120 if not re.match('^(open|reopen)$', action) and not (action == "update" and "oldrev" in attrs):
121 log.msg("GitLab MR#{}: Ignoring because action {} was not open or "
122 "reopen or an update that added code".format(attrs['iid'],
123 action))
124 return []
125
126 changes = [{
127 'author': '%s <%s>' % (commit['author']['name'],
128 commit['author']['email']),
129 'files': [], # @todo use rest API
130 'comments': "MR#{}: {}\n\n{}".format(attrs['iid'], attrs['title'], attrs['description']),
131 'revision': commit['id'],
132 'when_timestamp': when_timestamp,
133 'branch': attrs['target_branch'],
134 'repository': repo_url,
135 'project': project,
136 'category': event,
137 'revlink': attrs['url'],
138 'properties': {
139 'source_branch': attrs['source_branch'],
140 'source_project_id': attrs['source_project_id'],
141 'source_repository': attrs['source']['git_http_url'],
142 'source_git_ssh_url': attrs['source']['git_ssh_url'],
143 'target_branch': attrs['target_branch'],
144 'target_project_id': attrs['target_project_id'],
145 'target_repository': attrs['target']['git_http_url'],
146 'target_git_ssh_url': attrs['target']['git_ssh_url'],
147 'event': event,
148 },
149 }]
150 if codebase is not None:
151 changes[0]['codebase'] = codebase
152 return changes
153
154 def getChanges(self, request):
155 """
156 Reponds only to POST events and starts the build process
157
158 :arguments:
159 request
160 the http request object
161 """
162 expected_secret = isinstance(self.options, dict) and self.options.get('secret')
163 if expected_secret:
164 received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
165 received_secret = bytes2unicode(received_secret)
166 if received_secret != expected_secret:
167 raise ValueError("Invalid secret")
168 try:
169 content = request.content.read()
170 payload = json.loads(bytes2unicode(content))
171 except Exception as e:
172 raise ValueError("Error loading JSON: " + str(e))
173 event_type = request.getHeader(_HEADER_EVENT)
174 event_type = bytes2unicode(event_type)
175 # newer version of gitlab have a object_kind parameter,
176 # which allows not to use the http header
177 event_type = payload.get('object_kind', event_type)
178 codebase = request.args.get(b'codebase', [None])[0]
179 codebase = bytes2unicode(codebase)
180 if event_type in ("push", "tag_push", "Push Hook"):
181 user = payload['user_name']
182 repo = payload['repository']['name']
183 repo_url = payload['repository']['url']
184 changes = self._process_change(
185 payload, user, repo, repo_url, event_type, codebase=codebase)
186 elif event_type == 'merge_request':
187 changes = self._process_merge_request_change(
188 payload, event_type, codebase=codebase)
189 else:
190 changes = []
191 if changes:
192 log.msg("Received {} changes from {} gitlab event".format(
193 len(changes), event_type))
194 return (changes, 'git')
195
196
197 gitlab = GitLabHandler
198
[end of master/buildbot/www/hooks/gitlab.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/www/hooks/gitlab.py b/master/buildbot/www/hooks/gitlab.py
--- a/master/buildbot/www/hooks/gitlab.py
+++ b/master/buildbot/www/hooks/gitlab.py
@@ -19,8 +19,10 @@
from dateutil.parser import parse as dateparse
+from twisted.internet.defer import inlineCallbacks
from twisted.python import log
+from buildbot.process.properties import Properties
from buildbot.util import bytes2unicode
from buildbot.www.hooks.base import BaseHookHandler
@@ -151,6 +153,7 @@
changes[0]['codebase'] = codebase
return changes
+ @inlineCallbacks
def getChanges(self, request):
"""
Reponds only to POST events and starts the build process
@@ -163,7 +166,12 @@
if expected_secret:
received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)
received_secret = bytes2unicode(received_secret)
- if received_secret != expected_secret:
+
+ p = Properties()
+ p.master = self.master
+ expected_secret_value = yield p.render(expected_secret)
+
+ if received_secret != expected_secret_value:
raise ValueError("Invalid secret")
try:
content = request.content.read()
|
{"golden_diff": "diff --git a/master/buildbot/www/hooks/gitlab.py b/master/buildbot/www/hooks/gitlab.py\n--- a/master/buildbot/www/hooks/gitlab.py\n+++ b/master/buildbot/www/hooks/gitlab.py\n@@ -19,8 +19,10 @@\n \n from dateutil.parser import parse as dateparse\n \n+from twisted.internet.defer import inlineCallbacks\n from twisted.python import log\n \n+from buildbot.process.properties import Properties\n from buildbot.util import bytes2unicode\n from buildbot.www.hooks.base import BaseHookHandler\n \n@@ -151,6 +153,7 @@\n changes[0]['codebase'] = codebase\n return changes\n \n+ @inlineCallbacks\n def getChanges(self, request):\n \"\"\"\n Reponds only to POST events and starts the build process\n@@ -163,7 +166,12 @@\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n received_secret = bytes2unicode(received_secret)\n- if received_secret != expected_secret:\n+\n+ p = Properties()\n+ p.master = self.master\n+ expected_secret_value = yield p.render(expected_secret)\n+\n+ if received_secret != expected_secret_value:\n raise ValueError(\"Invalid secret\")\n try:\n content = request.content.read()\n", "issue": "Can't use secret interpolation in change_hook_dialects?\nTrying to do something like\r\n```\r\nc['www'] = dict(\r\n port=8010,\r\n change_hook_dialects=dict(\r\n gitlab={\r\n 'secret': util.Secret('my-webhook-token')\r\n },\r\n ),\r\n plugins=dict(waterfall_view={}, console_view={}, grid_view={}))\r\n```\r\nfails; the webhook complains 'HTTP 400 Invalid secret'.\r\nAdding the print\r\n\r\n```\r\n--- a/master/buildbot/www/hooks/gitlab.py\r\n+++ b/master/buildbot/www/hooks/gitlab.py\r\n@@ -164,6 +164,7 @@ class GitLabHandler(BaseHookHandler):\r\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\r\n received_secret = bytes2unicode(received_secret)\r\n if received_secret != expected_secret:\r\n+ log.msg(\"Wanted secret '%s' but got '%s'\" % (expected_secret, received_secret))\r\n raise ValueError(\"Invalid secret\")\r\n try:\r\n content = request.content.read()\r\n```\r\nshows\r\n```\r\n...[_GenericHTTPChannelProtocol,0,10.10.169.29] Wanted secret 'Secret(my-webhook-token)' but got 'thesecret'\r\n```\r\n\r\n(Evidently master/buildbot/www/hooks/gitlab.py needs to render expected_secret?\r\nhttps://github.com/buildbot/buildbot/issues/3627 has some tips on how to add that, but\r\nnot clear enough for me to follow at first try.)\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\n\nimport json\nimport re\n\nfrom dateutil.parser import parse as dateparse\n\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.www.hooks.base import BaseHookHandler\n\n_HEADER_EVENT = b'X-Gitlab-Event'\n_HEADER_GITLAB_TOKEN = b'X-Gitlab-Token'\n\n\nclass GitLabHandler(BaseHookHandler):\n\n def _process_change(self, payload, user, repo, repo_url, event,\n codebase=None):\n \"\"\"\n Consumes the JSON as a python object and actually starts the build.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n changes = []\n refname = payload['ref']\n # project name from http headers is empty for me, so get it from repository/name\n project = payload['repository']['name']\n\n # We only care about regular heads or tags\n match = re.match(r\"^refs/(heads|tags)/(.+)$\", refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' %\n (commit['id'],))\n continue\n\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n\n when_timestamp = dateparse(commit['timestamp'])\n\n log.msg(\"New revision: %s\" % commit['id'][:8])\n\n change = {\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': files,\n 'comments': commit['message'],\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': branch,\n 'revlink': commit['url'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'properties': {\n 'event': event,\n },\n }\n\n if codebase is not None:\n change['codebase'] = codebase\n\n changes.append(change)\n\n return changes\n\n def _process_merge_request_change(self, payload, event, codebase=None):\n \"\"\"\n Consumes the merge_request JSON as a python object and turn it into a buildbot change.\n\n :arguments:\n payload\n Python Object that represents the JSON sent by GitLab Service\n Hook.\n \"\"\"\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n # @todo provide and document a way to choose between http and ssh url\n repo_url = attrs['target']['git_http_url']\n # project name from http headers is empty for me, so get it from object_attributes/target/name\n project = attrs['target']['name']\n\n # Filter out uninteresting events\n state = attrs['state']\n if re.match('^(closed|merged|approved)$', state):\n log.msg(\"GitLab MR#{}: Ignoring because state is {}\".format(attrs['iid'], state))\n return []\n action = attrs['action']\n if not re.match('^(open|reopen)$', action) and not (action == \"update\" and \"oldrev\" in attrs):\n log.msg(\"GitLab MR#{}: Ignoring because action {} was not open or \"\n \"reopen or an update that added code\".format(attrs['iid'],\n action))\n return []\n\n changes = [{\n 'author': '%s <%s>' % (commit['author']['name'],\n commit['author']['email']),\n 'files': [], # @todo use rest API\n 'comments': \"MR#{}: {}\\n\\n{}\".format(attrs['iid'], attrs['title'], attrs['description']),\n 'revision': commit['id'],\n 'when_timestamp': when_timestamp,\n 'branch': attrs['target_branch'],\n 'repository': repo_url,\n 'project': project,\n 'category': event,\n 'revlink': attrs['url'],\n 'properties': {\n 'source_branch': attrs['source_branch'],\n 'source_project_id': attrs['source_project_id'],\n 'source_repository': attrs['source']['git_http_url'],\n 'source_git_ssh_url': attrs['source']['git_ssh_url'],\n 'target_branch': attrs['target_branch'],\n 'target_project_id': attrs['target_project_id'],\n 'target_repository': attrs['target']['git_http_url'],\n 'target_git_ssh_url': attrs['target']['git_ssh_url'],\n 'event': event,\n },\n }]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n\n def getChanges(self, request):\n \"\"\"\n Reponds only to POST events and starts the build process\n\n :arguments:\n request\n the http request object\n \"\"\"\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n received_secret = bytes2unicode(received_secret)\n if received_secret != expected_secret:\n raise ValueError(\"Invalid secret\")\n try:\n content = request.content.read()\n payload = json.loads(bytes2unicode(content))\n except Exception as e:\n raise ValueError(\"Error loading JSON: \" + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2unicode(event_type)\n # newer version of gitlab have a object_kind parameter,\n # which allows not to use the http header\n event_type = payload.get('object_kind', event_type)\n codebase = request.args.get(b'codebase', [None])[0]\n codebase = bytes2unicode(codebase)\n if event_type in (\"push\", \"tag_push\", \"Push Hook\"):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(\n payload, user, repo, repo_url, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(\n payload, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg(\"Received {} changes from {} gitlab event\".format(\n len(changes), event_type))\n return (changes, 'git')\n\n\ngitlab = GitLabHandler\n", "path": "master/buildbot/www/hooks/gitlab.py"}]}
| 2,965 | 281 |
gh_patches_debug_10793
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-724
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate dependency on scipy for pyro-ppl package
Once distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`.
</issue>
<code>
[start of setup.py]
1 from __future__ import absolute_import, division, print_function
2
3 import sys
4
5 from setuptools import find_packages, setup
6
7 # Find pyro version.
8 for line in open('pyro/__init__.py'):
9 if line.startswith('__version__ = '):
10 version = line.strip().split()[2][1:-1]
11
12 # Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl
13 # When releasing on pypi, make sure pandoc is on your system:
14 # $ brew install pandoc # OS X
15 # $ sudo apt-get install pandoc # Ubuntu Linux
16 try:
17 import pypandoc
18 long_description = pypandoc.convert('README.md', 'rst')
19 except (IOError, ImportError, OSError) as e:
20 sys.stderr.write('Failed to convert README.md to rst:\n {}\n'.format(e))
21 sys.stderr.flush()
22 long_description = open('README.md').read()
23
24 # Remove badges since they will always be obsolete.
25 blacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']
26 long_description = '\n'.join(
27 [line for line in long_description.split('\n') if not any(patt in line for patt in blacklist)])
28
29 setup(
30 name='pyro-ppl',
31 version=version,
32 description='A Python library for probabilistic modeling and inference',
33 long_description=long_description,
34 packages=find_packages(exclude=('tests*',)),
35 url='http://pyro.ai',
36 author='Uber AI Labs',
37 author_email='[email protected]',
38 install_requires=[
39 'numpy>=1.7',
40 'scipy>=0.19.0',
41 'cloudpickle>=0.3.1',
42 'graphviz>=0.8',
43 'networkx>=2.0.0',
44 'observations>=0.1.4',
45 'torch',
46 'six>=1.10.0',
47 ],
48 extras_require={
49 'notebooks': ['jupyter>=1.0.0'],
50 'visualization': [
51 'matplotlib>=1.3',
52 'visdom>=0.1.4',
53 'pillow',
54 ],
55 'test': [
56 'pytest',
57 'pytest-cov',
58 'nbval',
59 # examples/tutorials
60 'matplotlib',
61 'visdom',
62 'torchvision',
63 ],
64 'profile': ['prettytable'],
65 'dev': [
66 'torchvision',
67 'flake8',
68 'yapf',
69 'isort',
70 'pytest',
71 'pytest-xdist',
72 'nbval',
73 'nbstripout',
74 'pypandoc',
75 'sphinx',
76 'sphinx_rtd_theme',
77 ],
78 },
79 tests_require=['flake8', 'pytest'],
80 keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',
81 license='MIT License',
82 classifiers=[
83 'Intended Audience :: Developers',
84 'Intended Audience :: Education',
85 'Intended Audience :: Science/Research',
86 'Operating System :: POSIX :: Linux',
87 'Operating System :: MacOS :: MacOS X',
88 'Programming Language :: Python :: 2.7',
89 'Programming Language :: Python :: 3.4',
90 ],
91 # yapf
92 )
93
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,6 @@
author_email='[email protected]',
install_requires=[
'numpy>=1.7',
- 'scipy>=0.19.0',
'cloudpickle>=0.3.1',
'graphviz>=0.8',
'networkx>=2.0.0',
@@ -56,6 +55,7 @@
'pytest',
'pytest-cov',
'nbval',
+ 'scipy>=0.19.0',
# examples/tutorials
'matplotlib',
'visdom',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,6 @@\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n- 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n@@ -56,6 +55,7 @@\n 'pytest',\n 'pytest-cov',\n 'nbval',\n+ 'scipy>=0.19.0',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n", "issue": "Deprecate dependency on scipy for pyro-ppl package\nOnce distributions are migrated upstream and we do not have a core dependency on scipy, we should remove this from our requirements in `setup.py` (add it to test dependencies), so that users do not need to install a separate 16MB file when they do a pip install of `pyro-ppl`.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport sys\n\nfrom setuptools import find_packages, setup\n\n# Find pyro version.\nfor line in open('pyro/__init__.py'):\n if line.startswith('__version__ = '):\n version = line.strip().split()[2][1:-1]\n\n# Convert README.md to rst for display at https://pypi.python.org/pypi/pyro-ppl\n# When releasing on pypi, make sure pandoc is on your system:\n# $ brew install pandoc # OS X\n# $ sudo apt-get install pandoc # Ubuntu Linux\ntry:\n import pypandoc\n long_description = pypandoc.convert('README.md', 'rst')\nexcept (IOError, ImportError, OSError) as e:\n sys.stderr.write('Failed to convert README.md to rst:\\n {}\\n'.format(e))\n sys.stderr.flush()\n long_description = open('README.md').read()\n\n# Remove badges since they will always be obsolete.\nblacklist = ['Build Status', 'Latest Version', 'travis-ci.org', 'pypi.python.org']\nlong_description = '\\n'.join(\n [line for line in long_description.split('\\n') if not any(patt in line for patt in blacklist)])\n\nsetup(\n name='pyro-ppl',\n version=version,\n description='A Python library for probabilistic modeling and inference',\n long_description=long_description,\n packages=find_packages(exclude=('tests*',)),\n url='http://pyro.ai',\n author='Uber AI Labs',\n author_email='[email protected]',\n install_requires=[\n 'numpy>=1.7',\n 'scipy>=0.19.0',\n 'cloudpickle>=0.3.1',\n 'graphviz>=0.8',\n 'networkx>=2.0.0',\n 'observations>=0.1.4',\n 'torch',\n 'six>=1.10.0',\n ],\n extras_require={\n 'notebooks': ['jupyter>=1.0.0'],\n 'visualization': [\n 'matplotlib>=1.3',\n 'visdom>=0.1.4',\n 'pillow',\n ],\n 'test': [\n 'pytest',\n 'pytest-cov',\n 'nbval',\n # examples/tutorials\n 'matplotlib',\n 'visdom',\n 'torchvision',\n ],\n 'profile': ['prettytable'],\n 'dev': [\n 'torchvision',\n 'flake8',\n 'yapf',\n 'isort',\n 'pytest',\n 'pytest-xdist',\n 'nbval',\n 'nbstripout',\n 'pypandoc',\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n },\n tests_require=['flake8', 'pytest'],\n keywords='machine learning statistics probabilistic programming bayesian modeling pytorch',\n license='MIT License',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n ],\n # yapf\n)\n", "path": "setup.py"}]}
| 1,489 | 153 |
gh_patches_debug_14605
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2444
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate (and remove) POD_ADDRESS_RANGE
As discussed in #1209, POD_ADDRESS_RANGE should be removed and SUBNET should be used instead.
Tasks:
- remove all occurences of POD_ADDRESS_RANGE from containers
- update the docs
- write a changelog (breaking change!)
</issue>
<code>
[start of core/admin/mailu/internal/views/dovecot.py]
1 from mailu import models
2 from mailu.internal import internal
3 from flask import current_app as app
4
5 import flask
6 import socket
7 import os
8
9 @internal.route("/dovecot/passdb/<path:user_email>")
10 def dovecot_passdb_dict(user_email):
11 user = models.User.query.get(user_email) or flask.abort(404)
12 allow_nets = []
13 allow_nets.append(app.config["SUBNET"])
14 if app.config["SUBNET6"]:
15 allow_nets.append(app.config["SUBNET6"])
16 if app.config["POD_ADDRESS_RANGE"]:
17 allow_nets.append(app.config["POD_ADDRESS_RANGE"])
18 return flask.jsonify({
19 "password": None,
20 "nopassword": "Y",
21 "allow_nets": ",".join(allow_nets)
22 })
23
24
25 @internal.route("/dovecot/userdb/<path:user_email>")
26 def dovecot_userdb_dict(user_email):
27 user = models.User.query.get(user_email) or flask.abort(404)
28 return flask.jsonify({
29 "quota_rule": "*:bytes={}".format(user.quota_bytes)
30 })
31
32
33 @internal.route("/dovecot/quota/<ns>/<path:user_email>", methods=["POST"])
34 def dovecot_quota(ns, user_email):
35 user = models.User.query.get(user_email) or flask.abort(404)
36 if ns == "storage":
37 user.quota_bytes_used = flask.request.get_json()
38 models.db.session.commit()
39 return flask.jsonify(None)
40
41
42 @internal.route("/dovecot/sieve/name/<script>/<path:user_email>")
43 def dovecot_sieve_name(script, user_email):
44 return flask.jsonify(script)
45
46
47 @internal.route("/dovecot/sieve/data/default/<path:user_email>")
48 def dovecot_sieve_data(user_email):
49 user = models.User.query.get(user_email) or flask.abort(404)
50 return flask.jsonify(flask.render_template("default.sieve", user=user))
51
[end of core/admin/mailu/internal/views/dovecot.py]
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5 import ipaddress
6
7 DEFAULT_CONFIG = {
8 # Specific to the admin UI
9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
10 'BABEL_DEFAULT_LOCALE': 'en',
11 'BABEL_DEFAULT_TIMEZONE': 'UTC',
12 'BOOTSTRAP_SERVE_LOCAL': True,
13 'RATELIMIT_STORAGE_URL': '',
14 'QUOTA_STORAGE_URL': '',
15 'DEBUG': False,
16 'DOMAIN_REGISTRATION': False,
17 'TEMPLATES_AUTO_RELOAD': True,
18 'MEMORY_SESSIONS': False,
19 # Database settings
20 'DB_FLAVOR': None,
21 'DB_USER': 'mailu',
22 'DB_PW': None,
23 'DB_HOST': 'database',
24 'DB_NAME': 'mailu',
25 'SQLITE_DATABASE_FILE':'data/main.db',
26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
27 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
28 # Statistics management
29 'INSTANCE_ID_PATH': '/data/instance',
30 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
31 # Common configuration variables
32 'SECRET_KEY': 'changeMe',
33 'DOMAIN': 'mailu.io',
34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
35 'POSTMASTER': 'postmaster',
36 'WILDCARD_SENDERS': '',
37 'TLS_FLAVOR': 'cert',
38 'INBOUND_TLS_ENFORCE': False,
39 'DEFER_ON_TLS_ERROR': True,
40 'AUTH_RATELIMIT_IP': '60/hour',
41 'AUTH_RATELIMIT_IP_V4_MASK': 24,
42 'AUTH_RATELIMIT_IP_V6_MASK': 56,
43 'AUTH_RATELIMIT_USER': '100/day',
44 'AUTH_RATELIMIT_EXEMPTION': '',
45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
46 'DISABLE_STATISTICS': False,
47 # Mail settings
48 'DMARC_RUA': None,
49 'DMARC_RUF': None,
50 'WELCOME': False,
51 'WELCOME_SUBJECT': 'Dummy welcome topic',
52 'WELCOME_BODY': 'Dummy welcome body',
53 'DKIM_SELECTOR': 'dkim',
54 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
55 'DEFAULT_QUOTA': 1000000000,
56 'MESSAGE_RATELIMIT': '200/day',
57 'MESSAGE_RATELIMIT_EXEMPTION': '',
58 'RECIPIENT_DELIMITER': '',
59 # Web settings
60 'SITENAME': 'Mailu',
61 'WEBSITE': 'https://mailu.io',
62 'ADMIN' : 'none',
63 'WEB_ADMIN': '/admin',
64 'WEB_WEBMAIL': '/webmail',
65 'WEBMAIL': 'none',
66 'RECAPTCHA_PUBLIC_KEY': '',
67 'RECAPTCHA_PRIVATE_KEY': '',
68 'LOGO_URL': None,
69 'LOGO_BACKGROUND': None,
70 # Advanced settings
71 'LOG_LEVEL': 'WARNING',
72 'SESSION_KEY_BITS': 128,
73 'SESSION_TIMEOUT': 3600,
74 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
75 'SESSION_COOKIE_SECURE': True,
76 'CREDENTIAL_ROUNDS': 12,
77 'TZ': 'Etc/UTC',
78 # Host settings
79 'HOST_IMAP': 'imap',
80 'HOST_LMTP': 'imap:2525',
81 'HOST_POP3': 'imap',
82 'HOST_SMTP': 'smtp',
83 'HOST_AUTHSMTP': 'smtp',
84 'HOST_ADMIN': 'admin',
85 'HOST_WEBMAIL': 'webmail',
86 'HOST_WEBDAV': 'webdav:5232',
87 'HOST_REDIS': 'redis',
88 'HOST_FRONT': 'front',
89 'SUBNET': '192.168.203.0/24',
90 'SUBNET6': None,
91 'POD_ADDRESS_RANGE': None
92 }
93
94 class ConfigManager:
95 """ Naive configuration manager that uses environment only
96 """
97
98 DB_TEMPLATES = {
99 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
100 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
101 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
102 }
103
104 def __init__(self):
105 self.config = dict()
106
107 def get_host_address(self, name):
108 # if MYSERVICE_ADDRESS is defined, use this
109 if f'{name}_ADDRESS' in os.environ:
110 return os.environ.get(f'{name}_ADDRESS')
111 # otherwise use the host name and resolve it
112 return system.resolve_address(self.config[f'HOST_{name}'])
113
114 def resolve_hosts(self):
115 for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:
116 self.config[f'{key}_ADDRESS'] = self.get_host_address(key)
117 if self.config['WEBMAIL'] != 'none':
118 self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')
119
120 def __get_env(self, key, value):
121 key_file = key + "_FILE"
122 if key_file in os.environ:
123 with open(os.environ.get(key_file)) as file:
124 value_from_file = file.read()
125 return value_from_file.strip()
126 else:
127 return os.environ.get(key, value)
128
129 def __coerce_value(self, value):
130 if isinstance(value, str) and value.lower() in ('true','yes'):
131 return True
132 elif isinstance(value, str) and value.lower() in ('false', 'no'):
133 return False
134 return value
135
136 def init_app(self, app):
137 # get current app config
138 self.config.update(app.config)
139 # get environment variables
140 self.config.update({
141 key: self.__coerce_value(self.__get_env(key, value))
142 for key, value in DEFAULT_CONFIG.items()
143 })
144 self.resolve_hosts()
145
146 # automatically set the sqlalchemy string
147 if self.config['DB_FLAVOR']:
148 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
149 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
150
151 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
152 self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/1'
153 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
154 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
155 self.config['SESSION_COOKIE_HTTPONLY'] = True
156 self.config['SESSION_PERMANENT'] = True
157 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
158 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
159 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
160 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
161 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
162 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
163 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
164 self.config['HOSTNAMES'] = ','.join(hostnames)
165 self.config['HOSTNAME'] = hostnames[0]
166
167 # update the app config
168 app.config.update(self.config)
169
170
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -87,8 +87,7 @@
'HOST_REDIS': 'redis',
'HOST_FRONT': 'front',
'SUBNET': '192.168.203.0/24',
- 'SUBNET6': None,
- 'POD_ADDRESS_RANGE': None
+ 'SUBNET6': None
}
class ConfigManager:
diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py
--- a/core/admin/mailu/internal/views/dovecot.py
+++ b/core/admin/mailu/internal/views/dovecot.py
@@ -13,8 +13,6 @@
allow_nets.append(app.config["SUBNET"])
if app.config["SUBNET6"]:
allow_nets.append(app.config["SUBNET6"])
- if app.config["POD_ADDRESS_RANGE"]:
- allow_nets.append(app.config["POD_ADDRESS_RANGE"])
return flask.jsonify({
"password": None,
"nopassword": "Y",
|
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -87,8 +87,7 @@\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n- 'SUBNET6': None,\n- 'POD_ADDRESS_RANGE': None\n+ 'SUBNET6': None\n }\n \n class ConfigManager:\ndiff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py\n--- a/core/admin/mailu/internal/views/dovecot.py\n+++ b/core/admin/mailu/internal/views/dovecot.py\n@@ -13,8 +13,6 @@\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n- if app.config[\"POD_ADDRESS_RANGE\"]:\n- allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n", "issue": "Deprecate (and remove) POD_ADDRESS_RANGE\nAs discussed in #1209, POD_ADDRESS_RANGE should be removed and SUBNET should be used instead.\r\nTasks:\r\n- remove all occurences of POD_ADDRESS_RANGE from containers\r\n- update the docs\r\n- write a changelog (breaking change!)\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n if app.config[\"POD_ADDRESS_RANGE\"]:\n allow_nets.append(app.config[\"POD_ADDRESS_RANGE\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": \"*:bytes={}\".format(user.quota_bytes)\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}, {"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}]}
| 3,266 | 263 |
gh_patches_debug_14989
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server Error when downloading dataset
This dataset, recently added by @JavierTeran , has been throwing a server error when I click on "download": https://data.hdx.rwlabs.org/dataset/victimas-de-minas/resource/0f39852c-e4bd-4c41-b5a1-88f38564955f
</issue>
<code>
[start of ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py]
1 import ckan.controllers.storage as storage
2 import os
3 import re
4 import urllib
5 import uuid
6 from datetime import datetime
7 from cgi import FieldStorage
8
9 from ofs import get_impl
10 from pylons import request, response
11 from pylons.controllers.util import abort, redirect_to
12 from pylons import config
13 from paste.fileapp import FileApp
14 from paste.deploy.converters import asbool
15
16 from ckan.lib.base import BaseController, c, request, render, config, h, abort
17 from ckan.lib.jsonp import jsonpify
18 import ckan.model as model
19 import ckan.logic as logic
20
21 try:
22 from cStringIO import StringIO
23 except ImportError:
24 from StringIO import StringIO
25 try:
26 import json
27 except:
28 import simplejson as json
29
30 from logging import getLogger
31 log = getLogger(__name__)
32
33
34 BUCKET = config.get('ckan.storage.bucket', 'default')
35 key_prefix = config.get('ckan.storage.key_prefix', 'file/')
36
37 _eq_re = re.compile(r"^(.*)(=[0-9]*)$")
38
39
40 def generate_response(http_status, unicode_body, no_cache=True, other_headers=None):
41 r = request.environ['pylons.pylons'].response
42 if no_cache:
43 r.headers['Pragma'] = 'no-cache'
44 r.headers['Cache-Control'] = 'no-cache'
45
46 if other_headers:
47 for key, value in other_headers.iteritems():
48 r.headers[key] = value
49
50 r.unicode_body = unicode_body
51 r.status = http_status
52 return r
53
54
55 class FileDownloadController(storage.StorageController):
56 _ofs_impl = None
57
58 @property
59 def ofs(self):
60 if not FileDownloadController._ofs_impl:
61 FileDownloadController._ofs_impl = get_ofs()
62 return FileDownloadController._ofs_impl
63
64 def _download_file(self, res, label):
65 # We need this as a resource object to check access so create a dummy
66 # obj and trick CKAN
67 resource = model.Resource()
68
69 for k in res.keys():
70 setattr(resource, k, res[k])
71
72 context = {'model': model, 'session': model.Session,
73 'user': c.user or c.author, 'for_view': True,
74 'auth_user_obj': c.userobj, 'resource': resource}
75 data_dict = {'id': resource.id}
76 try:
77 logic.check_access('resource_show', context, data_dict)
78 except logic.NotAuthorized:
79 redirect_url = h.url_for(controller='user', action='login',
80 came_from=resource.url)
81 r = generate_response(303, u'Not authorized to read file ' + resource.id,
82 other_headers={'Location': redirect_url, 'X-CKAN-Error': '403 Access Denied'})
83 return r
84
85 exists = self.ofs.exists(BUCKET, label)
86 if not exists:
87 # handle erroneous trailing slash by redirecting to url w/o slash
88 if label.endswith('/'):
89 label = label[:-1]
90 # This may be best being cached_url until we have moved it into
91 # permanent storage
92 file_url = h.url_for('storage_file', label=label)
93 h.redirect_to(file_url)
94 else:
95 # abort(404)
96 r = generate_response(404, u'File not found')
97 return r
98
99 file_url = self.ofs.get_url(BUCKET, label)
100 if file_url.startswith("file://") or file_url.endswith('xlsx'):
101 metadata = self.ofs.get_metadata(BUCKET, label)
102 filepath = file_url[len("file://"):]
103 headers = {
104 # 'Content-Disposition':'attachment; filename="%s"' % label,
105 'Pragma': 'no-cache',
106 'Cache-Control': 'no-cache',
107 'Content-Type': metadata.get('_format', 'text/plain')}
108 if resource.name:
109 res_name = resource.name.replace('"', '_')
110 headers[
111 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name)
112 fapp = FileApp(filepath, headers=None, **headers)
113 return fapp(request.environ, self.start_response)
114 else:
115 h.redirect_to(file_url.encode('ascii', 'ignore'))
116
117 def file(self, label):
118 from sqlalchemy.engine import create_engine
119 # from label find resource id
120 url = config.get('ckan.site_url', '') + \
121 '/storage/f/' + urllib.quote(label)
122 engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)
123 connection = engine.connect()
124 query = connection.execute(
125 """SELECT * from resource where url= %s""", (url,))
126 res = query.fetchone()
127 if not res:
128 # raise logic.NotFound
129 r = generate_response(404, u'File not found')
130 return r
131
132 return self._download_file(res, label)
133
134 def perma_file(self, id, resource_id):
135 from sqlalchemy.engine import create_engine
136 # from label find resource id
137 engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)
138 connection = engine.connect()
139 query = connection.execute(
140 """SELECT * from resource where id= %s""", (resource_id,))
141 res = query.fetchone()
142 if not res:
143 # raise logic.NotFound
144 r = generate_response(404, u'File not found')
145 return r
146 else:
147 label = self._get_label_from_resource(res)
148
149 return self._download_file(res, label)
150
151 def _get_label_from_resource(self, resource):
152 prefix = '/storage/f/'
153 url = resource.url
154 index = url.find(prefix)
155 label = url[index + len(prefix):]
156 uq_label = urllib.unquote(label)
157 return uq_label
158
159
160 def create_pairtree_marker(folder):
161 """ Creates the pairtree marker for tests if it doesn't exist """
162 if not folder[:-1] == '/':
163 folder = folder + '/'
164
165 directory = os.path.dirname(folder)
166 if not os.path.exists(directory):
167 os.makedirs(directory)
168
169 target = os.path.join(directory, 'pairtree_version0_1')
170 if os.path.exists(target):
171 return
172
173 open(target, 'wb').close()
174
175
176 def get_ofs():
177 """Return a configured instance of the appropriate OFS driver.
178 """
179 storage_backend = config['ofs.impl']
180 kw = {}
181 for k, v in config.items():
182 if not k.startswith('ofs.') or k == 'ofs.impl':
183 continue
184 kw[k[4:]] = v
185
186 # Make sure we have created the marker file to avoid pairtree issues
187 if storage_backend == 'pairtree' and 'storage_dir' in kw:
188 create_pairtree_marker(kw['storage_dir'])
189
190 ofs = get_impl(storage_backend)(**kw)
191 return ofs
192
[end of ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py
--- a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py
+++ b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py
@@ -107,8 +107,9 @@
'Content-Type': metadata.get('_format', 'text/plain')}
if resource.name:
res_name = resource.name.replace('"', '_')
+ res_name_encoded = res_name.encode('utf-8', 'ignore')
headers[
- 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name)
+ 'Content-Disposition'] = 'inline; filename="{}"'.format(res_name_encoded)
fapp = FileApp(filepath, headers=None, **headers)
return fapp(request.environ, self.start_response)
else:
|
{"golden_diff": "diff --git a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n--- a/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n+++ b/ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py\n@@ -107,8 +107,9 @@\n 'Content-Type': metadata.get('_format', 'text/plain')}\n if resource.name:\n res_name = resource.name.replace('\"', '_')\n+ res_name_encoded = res_name.encode('utf-8', 'ignore')\n headers[\n- 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name)\n+ 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name_encoded)\n fapp = FileApp(filepath, headers=None, **headers)\n return fapp(request.environ, self.start_response)\n else:\n", "issue": "Server Error when downloading dataset\nThis dataset, recently added by @JavierTeran , has been throwing a server error when I click on \"download\": https://data.hdx.rwlabs.org/dataset/victimas-de-minas/resource/0f39852c-e4bd-4c41-b5a1-88f38564955f\n\n", "before_files": [{"content": "import ckan.controllers.storage as storage\nimport os\nimport re\nimport urllib\nimport uuid\nfrom datetime import datetime\nfrom cgi import FieldStorage\n\nfrom ofs import get_impl\nfrom pylons import request, response\nfrom pylons.controllers.util import abort, redirect_to\nfrom pylons import config\nfrom paste.fileapp import FileApp\nfrom paste.deploy.converters import asbool\n\nfrom ckan.lib.base import BaseController, c, request, render, config, h, abort\nfrom ckan.lib.jsonp import jsonpify\nimport ckan.model as model\nimport ckan.logic as logic\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\ntry:\n import json\nexcept:\n import simplejson as json\n\nfrom logging import getLogger\nlog = getLogger(__name__)\n\n\nBUCKET = config.get('ckan.storage.bucket', 'default')\nkey_prefix = config.get('ckan.storage.key_prefix', 'file/')\n\n_eq_re = re.compile(r\"^(.*)(=[0-9]*)$\")\n\n\ndef generate_response(http_status, unicode_body, no_cache=True, other_headers=None):\n r = request.environ['pylons.pylons'].response\n if no_cache:\n r.headers['Pragma'] = 'no-cache'\n r.headers['Cache-Control'] = 'no-cache'\n\n if other_headers:\n for key, value in other_headers.iteritems():\n r.headers[key] = value\n\n r.unicode_body = unicode_body\n r.status = http_status\n return r\n\n\nclass FileDownloadController(storage.StorageController):\n _ofs_impl = None\n\n @property\n def ofs(self):\n if not FileDownloadController._ofs_impl:\n FileDownloadController._ofs_impl = get_ofs()\n return FileDownloadController._ofs_impl\n\n def _download_file(self, res, label):\n # We need this as a resource object to check access so create a dummy\n # obj and trick CKAN\n resource = model.Resource()\n\n for k in res.keys():\n setattr(resource, k, res[k])\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj, 'resource': resource}\n data_dict = {'id': resource.id}\n try:\n logic.check_access('resource_show', context, data_dict)\n except logic.NotAuthorized:\n redirect_url = h.url_for(controller='user', action='login',\n came_from=resource.url)\n r = generate_response(303, u'Not authorized to read file ' + resource.id,\n other_headers={'Location': redirect_url, 'X-CKAN-Error': '403 Access Denied'})\n return r\n\n exists = self.ofs.exists(BUCKET, label)\n if not exists:\n # handle erroneous trailing slash by redirecting to url w/o slash\n if label.endswith('/'):\n label = label[:-1]\n # This may be best being cached_url until we have moved it into\n # permanent storage\n file_url = h.url_for('storage_file', label=label)\n h.redirect_to(file_url)\n else:\n # abort(404)\n r = generate_response(404, u'File not found')\n return r\n\n file_url = self.ofs.get_url(BUCKET, label)\n if file_url.startswith(\"file://\") or file_url.endswith('xlsx'):\n metadata = self.ofs.get_metadata(BUCKET, label)\n filepath = file_url[len(\"file://\"):]\n headers = {\n # 'Content-Disposition':'attachment; filename=\"%s\"' % label,\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'Content-Type': metadata.get('_format', 'text/plain')}\n if resource.name:\n res_name = resource.name.replace('\"', '_')\n headers[\n 'Content-Disposition'] = 'inline; filename=\"{}\"'.format(res_name)\n fapp = FileApp(filepath, headers=None, **headers)\n return fapp(request.environ, self.start_response)\n else:\n h.redirect_to(file_url.encode('ascii', 'ignore'))\n\n def file(self, label):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n url = config.get('ckan.site_url', '') + \\\n '/storage/f/' + urllib.quote(label)\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where url= %s\"\"\", (url,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n\n return self._download_file(res, label)\n\n def perma_file(self, id, resource_id):\n from sqlalchemy.engine import create_engine\n # from label find resource id\n engine = create_engine(config.get('sqlalchemy.url', ''), echo=True)\n connection = engine.connect()\n query = connection.execute(\n \"\"\"SELECT * from resource where id= %s\"\"\", (resource_id,))\n res = query.fetchone()\n if not res:\n # raise logic.NotFound\n r = generate_response(404, u'File not found')\n return r\n else:\n label = self._get_label_from_resource(res)\n\n return self._download_file(res, label)\n\n def _get_label_from_resource(self, resource):\n prefix = '/storage/f/'\n url = resource.url\n index = url.find(prefix)\n label = url[index + len(prefix):]\n uq_label = urllib.unquote(label)\n return uq_label\n\n\ndef create_pairtree_marker(folder):\n \"\"\" Creates the pairtree marker for tests if it doesn't exist \"\"\"\n if not folder[:-1] == '/':\n folder = folder + '/'\n\n directory = os.path.dirname(folder)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n target = os.path.join(directory, 'pairtree_version0_1')\n if os.path.exists(target):\n return\n\n open(target, 'wb').close()\n\n\ndef get_ofs():\n \"\"\"Return a configured instance of the appropriate OFS driver.\n \"\"\"\n storage_backend = config['ofs.impl']\n kw = {}\n for k, v in config.items():\n if not k.startswith('ofs.') or k == 'ofs.impl':\n continue\n kw[k[4:]] = v\n\n # Make sure we have created the marker file to avoid pairtree issues\n if storage_backend == 'pairtree' and 'storage_dir' in kw:\n create_pairtree_marker(kw['storage_dir'])\n\n ofs = get_impl(storage_backend)(**kw)\n return ofs\n", "path": "ckanext-hdx_package/ckanext/hdx_package/controllers/storage_controller.py"}]}
| 2,579 | 215 |
gh_patches_debug_27848
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-1876
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
error basic auth url encoding
I get the following error when attempting to use basic auth url encoding.
```
$ curl https://user:[email protected]/test.json
{"some":" data"}
$ cat config
order += "getjson test"
getjson test {
url = "https://user:[email protected]/test.json"
format = "{some}"
cache_timeout = 300
}
$ py3status --config config --log-file log-file --wm sway
{"version": 1, "click_events": true, "stop_signal": 20}
[[]
,[{"full_text": "getjson test", "color": "#FF0000", "instance": "test", "name": "getjson"}]
^C
$ cat log-file
2019-12-15 13:00:36 INFO ========
2019-12-15 13:00:36 INFO Starting py3status version 3.22 python 3.8.0
2019-12-15 13:00:36 INFO window manager: sway
2019-12-15 13:00:36 INFO config file: config
2019-12-15 13:00:36 INFO modules include paths: []
2019-12-15 13:00:36 INFO loading module "getjson test" from py3status.modules.getjson
2019-12-15 13:00:36 INFO starting module getjson test
2019-12-15 13:00:36 WARNING Instance `getjson test`, user method `getjson` failed (InvalidURL) request.py line 72.
2019-12-15 13:00:36 INFO Traceback
InvalidURL: nonnumeric port: '[email protected]'
File "/usr/lib/python3.8/site-packages/py3status/module.py", line 946, in run
response = method()
File "/usr/lib/python3.8/site-packages/py3status/modules/getjson.py", line 85, in getjson
json_data = self.py3.request(self.url).json()
File "/usr/lib/python3.8/site-packages/py3status/py3.py", line 1318, in request
return get_http_response()
File "/usr/lib/python3.8/site-packages/py3status/py3.py", line 1306, in get_http_response
return HttpResponse(
File "/usr/lib/python3.8/site-packages/py3status/request.py", line 72, in __init__
self._response = urlopen(request, data=data, timeout=timeout)
File "/usr/lib/python3.8/urllib/request.py", line 222, in urlopen
return opener.open(url, data, timeout)
File "/usr/lib/python3.8/urllib/request.py", line 525, in open
response = self._open(req, data)
File "/usr/lib/python3.8/urllib/request.py", line 542, in _open
result = self._call_chain(self.handle_open, protocol, protocol +
File "/usr/lib/python3.8/urllib/request.py", line 502, in _call_chain
result = func(*args)
File "/usr/lib/python3.8/urllib/request.py", line 1360, in https_open
return self.do_open(http.client.HTTPSConnection, req,
File "/usr/lib/python3.8/urllib/request.py", line 1286, in do_open
h = http_class(host, timeout=req.timeout, **http_conn_args)
File "/usr/lib/python3.8/http/client.py", line 1357, in __init__
super(HTTPSConnection, self).__init__(host, port, timeout,
File "/usr/lib/python3.8/http/client.py", line 829, in __init__
(self.host, self.port) = self._get_hostport(host, port)
File "/usr/lib/python3.8/http/client.py", line 870, in _get_hostport
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
```
</issue>
<code>
[start of py3status/modules/getjson.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display JSON data fetched from a URL.
4
5 This module gets the given `url` configuration parameter and assumes the
6 response is a JSON object. The keys of the JSON object are used as the format
7 placeholders. The format placeholders are replaced by the value. Objects that
8 are nested can be accessed by using the `delimiter` configuration parameter
9 in between.
10
11 Configuration parameters:
12 cache_timeout: refresh interval for this module (default 30)
13 delimiter: the delimiter between parent and child objects (default '-')
14 format: display format for this module (default None)
15 url: specify URL to fetch JSON from (default None)
16
17 Format placeholders:
18 Placeholders will be replaced by the JSON keys.
19
20 Placeholders for objects with sub-objects are flattened using 'delimiter'
21 in between (eg. {'parent': {'child': 'value'}} will use placeholder
22 {parent-child}).
23
24 Placeholders for list elements have 'delimiter' followed by the index
25 (eg. {'parent': ['this', 'that']) will use placeholders {parent-0}
26 for 'this' and {parent-1} for 'that'.
27
28 Examples:
29 ```
30 # straightforward key replacement
31 url = 'http://ip-api.com/json'
32 format = '{lat}, {lon}'
33
34 # access child objects
35 url = 'https://api.icndb.com/jokes/random'
36 format = '{value-joke}'
37
38 # access title from 0th element of articles list
39 url = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'
40 format = '{articles-0-title}'
41
42 # access if top-level object is a list
43 url = 'https://jsonplaceholder.typicode.com/posts/1/comments'
44 format = '{0-name}'
45 ```
46
47 @author vicyap
48
49 SAMPLE OUTPUT
50 {'full_text': 'Github: Everything operating normally'}
51 """
52
53 STRING_ERROR = "missing url"
54
55
56 class Py3status:
57 """
58 """
59
60 # available configuration parameters
61 cache_timeout = 30
62 delimiter = "-"
63 format = None
64 url = None
65
66 class Meta:
67 deprecated = {
68 "rename": [
69 {
70 "param": "timeout",
71 "new": "request_timeout",
72 "msg": "obsolete parameter use `request_timeout`",
73 }
74 ]
75 }
76
77 def post_config_hook(self):
78 if not self.url:
79 raise Exception(STRING_ERROR)
80
81 def getjson(self):
82 """
83 """
84 try:
85 json_data = self.py3.request(self.url).json()
86 json_data = self.py3.flatten_dict(json_data, self.delimiter, True)
87 except self.py3.RequestException:
88 json_data = None
89
90 if json_data:
91 full_text = self.py3.safe_format(self.format, json_data)
92 else:
93 full_text = ""
94
95 return {
96 "cached_until": self.py3.time_in(self.cache_timeout),
97 "full_text": full_text,
98 }
99
100
101 if __name__ == "__main__":
102 """
103 Run module in test mode.
104 """
105 from py3status.module_test import module_test
106
107 module_test(Py3status)
108
[end of py3status/modules/getjson.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py3status/modules/getjson.py b/py3status/modules/getjson.py
--- a/py3status/modules/getjson.py
+++ b/py3status/modules/getjson.py
@@ -12,7 +12,9 @@
cache_timeout: refresh interval for this module (default 30)
delimiter: the delimiter between parent and child objects (default '-')
format: display format for this module (default None)
+ password: basic auth password information (default None)
url: specify URL to fetch JSON from (default None)
+ username: basic auth user information (default None)
Format placeholders:
Placeholders will be replaced by the JSON keys.
@@ -61,7 +63,9 @@
cache_timeout = 30
delimiter = "-"
format = None
+ password = None
url = None
+ username = None
class Meta:
deprecated = {
@@ -82,7 +86,11 @@
"""
"""
try:
- json_data = self.py3.request(self.url).json()
+ if self.username is not None and self.password is not None:
+ auth = (self.username, self.password)
+ else:
+ auth = None
+ json_data = self.py3.request(self.url, auth=auth).json()
json_data = self.py3.flatten_dict(json_data, self.delimiter, True)
except self.py3.RequestException:
json_data = None
|
{"golden_diff": "diff --git a/py3status/modules/getjson.py b/py3status/modules/getjson.py\n--- a/py3status/modules/getjson.py\n+++ b/py3status/modules/getjson.py\n@@ -12,7 +12,9 @@\n cache_timeout: refresh interval for this module (default 30)\n delimiter: the delimiter between parent and child objects (default '-')\n format: display format for this module (default None)\n+ password: basic auth password information (default None)\n url: specify URL to fetch JSON from (default None)\n+ username: basic auth user information (default None)\n \n Format placeholders:\n Placeholders will be replaced by the JSON keys.\n@@ -61,7 +63,9 @@\n cache_timeout = 30\n delimiter = \"-\"\n format = None\n+ password = None\n url = None\n+ username = None\n \n class Meta:\n deprecated = {\n@@ -82,7 +86,11 @@\n \"\"\"\n \"\"\"\n try:\n- json_data = self.py3.request(self.url).json()\n+ if self.username is not None and self.password is not None:\n+ auth = (self.username, self.password)\n+ else:\n+ auth = None\n+ json_data = self.py3.request(self.url, auth=auth).json()\n json_data = self.py3.flatten_dict(json_data, self.delimiter, True)\n except self.py3.RequestException:\n json_data = None\n", "issue": "error basic auth url encoding\nI get the following error when attempting to use basic auth url encoding.\r\n\r\n```\r\n$ curl https://user:[email protected]/test.json \r\n{\"some\":\" data\"}\r\n\r\n$ cat config \r\norder += \"getjson test\"\r\n\r\ngetjson test {\r\n url = \"https://user:[email protected]/test.json\"\r\n format = \"{some}\"\r\n cache_timeout = 300\r\n}\r\n\r\n$ py3status --config config --log-file log-file --wm sway\r\n{\"version\": 1, \"click_events\": true, \"stop_signal\": 20}\r\n[[]\r\n,[{\"full_text\": \"getjson test\", \"color\": \"#FF0000\", \"instance\": \"test\", \"name\": \"getjson\"}]\r\n^C\r\n\r\n$ cat log-file\r\n2019-12-15 13:00:36 INFO ========\r\n2019-12-15 13:00:36 INFO Starting py3status version 3.22 python 3.8.0\r\n2019-12-15 13:00:36 INFO window manager: sway\r\n2019-12-15 13:00:36 INFO config file: config\r\n2019-12-15 13:00:36 INFO modules include paths: []\r\n2019-12-15 13:00:36 INFO loading module \"getjson test\" from py3status.modules.getjson\r\n2019-12-15 13:00:36 INFO starting module getjson test\r\n2019-12-15 13:00:36 WARNING Instance `getjson test`, user method `getjson` failed (InvalidURL) request.py line 72.\r\n2019-12-15 13:00:36 INFO Traceback\r\nInvalidURL: nonnumeric port: '[email protected]'\r\n File \"/usr/lib/python3.8/site-packages/py3status/module.py\", line 946, in run\r\n response = method()\r\n File \"/usr/lib/python3.8/site-packages/py3status/modules/getjson.py\", line 85, in getjson\r\n json_data = self.py3.request(self.url).json()\r\n File \"/usr/lib/python3.8/site-packages/py3status/py3.py\", line 1318, in request\r\n return get_http_response()\r\n File \"/usr/lib/python3.8/site-packages/py3status/py3.py\", line 1306, in get_http_response\r\n return HttpResponse(\r\n File \"/usr/lib/python3.8/site-packages/py3status/request.py\", line 72, in __init__\r\n self._response = urlopen(request, data=data, timeout=timeout)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 222, in urlopen\r\n return opener.open(url, data, timeout)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 525, in open\r\n response = self._open(req, data)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 542, in _open\r\n result = self._call_chain(self.handle_open, protocol, protocol +\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 502, in _call_chain\r\n result = func(*args)\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 1360, in https_open\r\n return self.do_open(http.client.HTTPSConnection, req,\r\n File \"/usr/lib/python3.8/urllib/request.py\", line 1286, in do_open\r\n h = http_class(host, timeout=req.timeout, **http_conn_args)\r\n File \"/usr/lib/python3.8/http/client.py\", line 1357, in __init__\r\n super(HTTPSConnection, self).__init__(host, port, timeout,\r\n File \"/usr/lib/python3.8/http/client.py\", line 829, in __init__\r\n (self.host, self.port) = self._get_hostport(host, port)\r\n File \"/usr/lib/python3.8/http/client.py\", line 870, in _get_hostport\r\n raise InvalidURL(\"nonnumeric port: '%s'\" % host[i+1:])\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay JSON data fetched from a URL.\n\nThis module gets the given `url` configuration parameter and assumes the\nresponse is a JSON object. The keys of the JSON object are used as the format\nplaceholders. The format placeholders are replaced by the value. Objects that\nare nested can be accessed by using the `delimiter` configuration parameter\nin between.\n\nConfiguration parameters:\n cache_timeout: refresh interval for this module (default 30)\n delimiter: the delimiter between parent and child objects (default '-')\n format: display format for this module (default None)\n url: specify URL to fetch JSON from (default None)\n\nFormat placeholders:\n Placeholders will be replaced by the JSON keys.\n\n Placeholders for objects with sub-objects are flattened using 'delimiter'\n in between (eg. {'parent': {'child': 'value'}} will use placeholder\n {parent-child}).\n\n Placeholders for list elements have 'delimiter' followed by the index\n (eg. {'parent': ['this', 'that']) will use placeholders {parent-0}\n for 'this' and {parent-1} for 'that'.\n\nExamples:\n```\n# straightforward key replacement\nurl = 'http://ip-api.com/json'\nformat = '{lat}, {lon}'\n\n# access child objects\nurl = 'https://api.icndb.com/jokes/random'\nformat = '{value-joke}'\n\n# access title from 0th element of articles list\nurl = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey={KEY}'\nformat = '{articles-0-title}'\n\n# access if top-level object is a list\nurl = 'https://jsonplaceholder.typicode.com/posts/1/comments'\nformat = '{0-name}'\n```\n\n@author vicyap\n\nSAMPLE OUTPUT\n{'full_text': 'Github: Everything operating normally'}\n\"\"\"\n\nSTRING_ERROR = \"missing url\"\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n\n # available configuration parameters\n cache_timeout = 30\n delimiter = \"-\"\n format = None\n url = None\n\n class Meta:\n deprecated = {\n \"rename\": [\n {\n \"param\": \"timeout\",\n \"new\": \"request_timeout\",\n \"msg\": \"obsolete parameter use `request_timeout`\",\n }\n ]\n }\n\n def post_config_hook(self):\n if not self.url:\n raise Exception(STRING_ERROR)\n\n def getjson(self):\n \"\"\"\n \"\"\"\n try:\n json_data = self.py3.request(self.url).json()\n json_data = self.py3.flatten_dict(json_data, self.delimiter, True)\n except self.py3.RequestException:\n json_data = None\n\n if json_data:\n full_text = self.py3.safe_format(self.format, json_data)\n else:\n full_text = \"\"\n\n return {\n \"cached_until\": self.py3.time_in(self.cache_timeout),\n \"full_text\": full_text,\n }\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n\n module_test(Py3status)\n", "path": "py3status/modules/getjson.py"}]}
| 2,405 | 318 |
gh_patches_debug_9779
|
rasdani/github-patches
|
git_diff
|
kedro-org__kedro-1057
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update pyarrow requirement to support newer versions
## Description
The current pyarrow requirements don't allow pyarrow > 4.0.0, which is old (current version = 6.0.0).
</issue>
<code>
[start of setup.py]
1 import re
2 from codecs import open
3 from glob import glob
4 from itertools import chain
5 from os import path
6
7 from setuptools import find_packages, setup
8
9 name = "kedro"
10 here = path.abspath(path.dirname(__file__))
11
12
13 PANDAS = "pandas>=0.24"
14 SPARK = "pyspark>=2.2, <4.0"
15 HDFS = "hdfs>=2.5.8, <3.0"
16 S3FS = "s3fs>=0.3.0, <0.5"
17
18 # get package version
19 with open(path.join(here, name, "__init__.py"), encoding="utf-8") as f:
20 result = re.search(r'__version__ = ["\']([^"\']+)', f.read())
21
22 if not result:
23 raise ValueError("Can't find the version in kedro/__init__.py")
24
25 version = result.group(1)
26
27 # get the dependencies and installs
28 with open("requirements.txt", encoding="utf-8") as f:
29 requires = [x.strip() for x in f if x.strip()]
30
31 # get test dependencies and installs
32 with open("test_requirements.txt", encoding="utf-8") as f:
33 test_requires = [x.strip() for x in f if x.strip() and not x.startswith("-r")]
34
35
36 # Get the long description from the README file
37 with open(path.join(here, "README.md"), encoding="utf-8") as f:
38 readme = f.read()
39
40 doc_html_files = [
41 name.replace("kedro/", "", 1)
42 for name in glob("kedro/framework/html/**/*", recursive=True)
43 ]
44
45 template_files = []
46 for pattern in ["**/*", "**/.*", "**/.*/**", "**/.*/.**"]:
47 template_files.extend(
48 [
49 name.replace("kedro/", "", 1)
50 for name in glob("kedro/templates/" + pattern, recursive=True)
51 ]
52 )
53
54
55 def _collect_requirements(requires):
56 return sorted(set(chain.from_iterable(requires.values())))
57
58
59 api_require = {"api.APIDataSet": ["requests~=2.20"]}
60 biosequence_require = {"biosequence.BioSequenceDataSet": ["biopython~=1.73"]}
61 dask_require = {
62 "dask.ParquetDataSet": [
63 "dask>=2021.10.0, <2022.01; python_version > '3.6'",
64 "dask[complete]~=2.6; python_version == '3.6'",
65 ]
66 }
67 geopandas_require = {
68 "geopandas.GeoJSONDataSet": ["geopandas>=0.6.0, <1.0", "pyproj>=2.2.0, <3.0"]
69 }
70 matplotlib_require = {"matplotlib.MatplotlibWriter": ["matplotlib>=3.0.3, <4.0"]}
71 holoviews_require = {"holoviews.HoloviewsWriter": ["holoviews~=1.13.0"]}
72 networkx_require = {"networkx.NetworkXDataSet": ["networkx~=2.4"]}
73 pandas_require = {
74 "pandas.CSVDataSet": [PANDAS],
75 "pandas.ExcelDataSet": [PANDAS, "xlrd~=1.0", "xlsxwriter~=1.0"],
76 "pandas.AppendableExcelDataSet": [PANDAS, "openpyxl>=3.0.3, <4.0"],
77 "pandas.FeatherDataSet": [PANDAS],
78 "pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
79 "pandas.HDFDataSet": [PANDAS, "tables~=3.6"],
80 "pandas.JSONDataSet": [PANDAS],
81 "pandas.ParquetDataSet": [PANDAS, "pyarrow>=0.12.0, <4.0"],
82 "pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
83 "pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"],
84 "pandas.GenericDataSet": [PANDAS],
85 }
86 pillow_require = {"pillow.ImageDataSet": ["Pillow~=8.0"]}
87 plotly_require = {
88 "plotly.PlotlyDataSet": [PANDAS, "plotly>=4.8.0, <6.0"],
89 "plotly.JSONDataSet": ["plotly>=4.8.0, <6.0"],
90 }
91 spark_require = {
92 "spark.SparkDataSet": [SPARK, HDFS, S3FS],
93 "spark.SparkHiveDataSet": [SPARK, HDFS, S3FS],
94 "spark.SparkJDBCDataSet": [SPARK, HDFS, S3FS],
95 }
96 tensorflow_required = {
97 "tensorflow.TensorflowModelDataset": [
98 # currently only TensorFlow V2 supported for saving and loading.
99 # V1 requires HDF5 and serializes differently
100 "tensorflow~=2.0"
101 ]
102 }
103 yaml_require = {"yaml.YAMLDataSet": [PANDAS, "PyYAML>=4.2, <6.0"]}
104
105 extras_require = {
106 "api": _collect_requirements(api_require),
107 "biosequence": _collect_requirements(biosequence_require),
108 "dask": _collect_requirements(dask_require),
109 "docs": [
110 "docutils==0.16",
111 "sphinx~=3.4.3",
112 "sphinx_rtd_theme==0.4.1",
113 "nbsphinx==0.8.1",
114 "nbstripout~=0.4",
115 "recommonmark==0.7.1",
116 "sphinx-autodoc-typehints==1.11.1",
117 "sphinx_copybutton==0.3.1",
118 "ipykernel>=5.3, <7.0",
119 ],
120 "geopandas": _collect_requirements(geopandas_require),
121 "ipython": ["ipython~=7.10"],
122 "matplotlib": _collect_requirements(matplotlib_require),
123 "holoviews": _collect_requirements(holoviews_require),
124 "networkx": _collect_requirements(networkx_require),
125 "notebook_templates": ["nbconvert>=5.3.1, <6.0", "nbformat~=4.4"],
126 "pandas": _collect_requirements(pandas_require),
127 "pillow": _collect_requirements(pillow_require),
128 "plotly": _collect_requirements(plotly_require),
129 "profilers": ["memory_profiler>=0.50.0, <1.0"],
130 "spark": _collect_requirements(spark_require),
131 "tensorflow": _collect_requirements(tensorflow_required),
132 "yaml": _collect_requirements(yaml_require),
133 **api_require,
134 **biosequence_require,
135 **dask_require,
136 **geopandas_require,
137 **matplotlib_require,
138 **holoviews_require,
139 **networkx_require,
140 **pandas_require,
141 **pillow_require,
142 **plotly_require,
143 **spark_require,
144 **tensorflow_required,
145 **yaml_require,
146 }
147
148 extras_require["all"] = _collect_requirements(extras_require)
149
150 setup(
151 name=name,
152 version=version,
153 description="Kedro helps you build production-ready data and analytics pipelines",
154 license="Apache Software License (Apache 2.0)",
155 long_description=readme,
156 long_description_content_type="text/markdown",
157 url="https://github.com/quantumblacklabs/kedro",
158 python_requires=">=3.6, <3.9",
159 packages=find_packages(exclude=["docs*", "tests*", "tools*", "features*"]),
160 include_package_data=True,
161 tests_require=test_requires,
162 install_requires=requires,
163 author="QuantumBlack Labs",
164 entry_points={"console_scripts": ["kedro = kedro.framework.cli:main"]},
165 package_data={
166 name: ["py.typed", "test_requirements.txt"] + template_files + doc_html_files
167 },
168 zip_safe=False,
169 keywords="pipelines, machine learning, data pipelines, data science, data engineering",
170 classifiers=[
171 "Development Status :: 4 - Beta",
172 "Programming Language :: Python :: 3.6",
173 "Programming Language :: Python :: 3.7",
174 "Programming Language :: Python :: 3.8",
175 ],
176 extras_require=extras_require,
177 )
178
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,7 +78,7 @@
"pandas.GBQTableDataSet": [PANDAS, "pandas-gbq>=0.12.0, <1.0"],
"pandas.HDFDataSet": [PANDAS, "tables~=3.6"],
"pandas.JSONDataSet": [PANDAS],
- "pandas.ParquetDataSet": [PANDAS, "pyarrow>=0.12.0, <4.0"],
+ "pandas.ParquetDataSet": [PANDAS, "pyarrow>=1.0, <7.0"],
"pandas.SQLTableDataSet": [PANDAS, "SQLAlchemy~=1.2"],
"pandas.SQLQueryDataSet": [PANDAS, "SQLAlchemy~=1.2"],
"pandas.GenericDataSet": [PANDAS],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,7 +78,7 @@\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6\"],\n \"pandas.JSONDataSet\": [PANDAS],\n- \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=0.12.0, <4.0\"],\n+ \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=1.0, <7.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n", "issue": "Update pyarrow requirement to support newer versions\n## Description\r\nThe current pyarrow requirements don't allow pyarrow > 4.0.0, which is old (current version = 6.0.0).\r\n\n", "before_files": [{"content": "import re\nfrom codecs import open\nfrom glob import glob\nfrom itertools import chain\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nname = \"kedro\"\nhere = path.abspath(path.dirname(__file__))\n\n\nPANDAS = \"pandas>=0.24\"\nSPARK = \"pyspark>=2.2, <4.0\"\nHDFS = \"hdfs>=2.5.8, <3.0\"\nS3FS = \"s3fs>=0.3.0, <0.5\"\n\n# get package version\nwith open(path.join(here, name, \"__init__.py\"), encoding=\"utf-8\") as f:\n result = re.search(r'__version__ = [\"\\']([^\"\\']+)', f.read())\n\n if not result:\n raise ValueError(\"Can't find the version in kedro/__init__.py\")\n\n version = result.group(1)\n\n# get the dependencies and installs\nwith open(\"requirements.txt\", encoding=\"utf-8\") as f:\n requires = [x.strip() for x in f if x.strip()]\n\n# get test dependencies and installs\nwith open(\"test_requirements.txt\", encoding=\"utf-8\") as f:\n test_requires = [x.strip() for x in f if x.strip() and not x.startswith(\"-r\")]\n\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n readme = f.read()\n\ndoc_html_files = [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/framework/html/**/*\", recursive=True)\n]\n\ntemplate_files = []\nfor pattern in [\"**/*\", \"**/.*\", \"**/.*/**\", \"**/.*/.**\"]:\n template_files.extend(\n [\n name.replace(\"kedro/\", \"\", 1)\n for name in glob(\"kedro/templates/\" + pattern, recursive=True)\n ]\n )\n\n\ndef _collect_requirements(requires):\n return sorted(set(chain.from_iterable(requires.values())))\n\n\napi_require = {\"api.APIDataSet\": [\"requests~=2.20\"]}\nbiosequence_require = {\"biosequence.BioSequenceDataSet\": [\"biopython~=1.73\"]}\ndask_require = {\n \"dask.ParquetDataSet\": [\n \"dask>=2021.10.0, <2022.01; python_version > '3.6'\",\n \"dask[complete]~=2.6; python_version == '3.6'\",\n ]\n}\ngeopandas_require = {\n \"geopandas.GeoJSONDataSet\": [\"geopandas>=0.6.0, <1.0\", \"pyproj>=2.2.0, <3.0\"]\n}\nmatplotlib_require = {\"matplotlib.MatplotlibWriter\": [\"matplotlib>=3.0.3, <4.0\"]}\nholoviews_require = {\"holoviews.HoloviewsWriter\": [\"holoviews~=1.13.0\"]}\nnetworkx_require = {\"networkx.NetworkXDataSet\": [\"networkx~=2.4\"]}\npandas_require = {\n \"pandas.CSVDataSet\": [PANDAS],\n \"pandas.ExcelDataSet\": [PANDAS, \"xlrd~=1.0\", \"xlsxwriter~=1.0\"],\n \"pandas.AppendableExcelDataSet\": [PANDAS, \"openpyxl>=3.0.3, <4.0\"],\n \"pandas.FeatherDataSet\": [PANDAS],\n \"pandas.GBQTableDataSet\": [PANDAS, \"pandas-gbq>=0.12.0, <1.0\"],\n \"pandas.HDFDataSet\": [PANDAS, \"tables~=3.6\"],\n \"pandas.JSONDataSet\": [PANDAS],\n \"pandas.ParquetDataSet\": [PANDAS, \"pyarrow>=0.12.0, <4.0\"],\n \"pandas.SQLTableDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.SQLQueryDataSet\": [PANDAS, \"SQLAlchemy~=1.2\"],\n \"pandas.GenericDataSet\": [PANDAS],\n}\npillow_require = {\"pillow.ImageDataSet\": [\"Pillow~=8.0\"]}\nplotly_require = {\n \"plotly.PlotlyDataSet\": [PANDAS, \"plotly>=4.8.0, <6.0\"],\n \"plotly.JSONDataSet\": [\"plotly>=4.8.0, <6.0\"],\n}\nspark_require = {\n \"spark.SparkDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkHiveDataSet\": [SPARK, HDFS, S3FS],\n \"spark.SparkJDBCDataSet\": [SPARK, HDFS, S3FS],\n}\ntensorflow_required = {\n \"tensorflow.TensorflowModelDataset\": [\n # currently only TensorFlow V2 supported for saving and loading.\n # V1 requires HDF5 and serializes differently\n \"tensorflow~=2.0\"\n ]\n}\nyaml_require = {\"yaml.YAMLDataSet\": [PANDAS, \"PyYAML>=4.2, <6.0\"]}\n\nextras_require = {\n \"api\": _collect_requirements(api_require),\n \"biosequence\": _collect_requirements(biosequence_require),\n \"dask\": _collect_requirements(dask_require),\n \"docs\": [\n \"docutils==0.16\",\n \"sphinx~=3.4.3\",\n \"sphinx_rtd_theme==0.4.1\",\n \"nbsphinx==0.8.1\",\n \"nbstripout~=0.4\",\n \"recommonmark==0.7.1\",\n \"sphinx-autodoc-typehints==1.11.1\",\n \"sphinx_copybutton==0.3.1\",\n \"ipykernel>=5.3, <7.0\",\n ],\n \"geopandas\": _collect_requirements(geopandas_require),\n \"ipython\": [\"ipython~=7.10\"],\n \"matplotlib\": _collect_requirements(matplotlib_require),\n \"holoviews\": _collect_requirements(holoviews_require),\n \"networkx\": _collect_requirements(networkx_require),\n \"notebook_templates\": [\"nbconvert>=5.3.1, <6.0\", \"nbformat~=4.4\"],\n \"pandas\": _collect_requirements(pandas_require),\n \"pillow\": _collect_requirements(pillow_require),\n \"plotly\": _collect_requirements(plotly_require),\n \"profilers\": [\"memory_profiler>=0.50.0, <1.0\"],\n \"spark\": _collect_requirements(spark_require),\n \"tensorflow\": _collect_requirements(tensorflow_required),\n \"yaml\": _collect_requirements(yaml_require),\n **api_require,\n **biosequence_require,\n **dask_require,\n **geopandas_require,\n **matplotlib_require,\n **holoviews_require,\n **networkx_require,\n **pandas_require,\n **pillow_require,\n **plotly_require,\n **spark_require,\n **tensorflow_required,\n **yaml_require,\n}\n\nextras_require[\"all\"] = _collect_requirements(extras_require)\n\nsetup(\n name=name,\n version=version,\n description=\"Kedro helps you build production-ready data and analytics pipelines\",\n license=\"Apache Software License (Apache 2.0)\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/quantumblacklabs/kedro\",\n python_requires=\">=3.6, <3.9\",\n packages=find_packages(exclude=[\"docs*\", \"tests*\", \"tools*\", \"features*\"]),\n include_package_data=True,\n tests_require=test_requires,\n install_requires=requires,\n author=\"QuantumBlack Labs\",\n entry_points={\"console_scripts\": [\"kedro = kedro.framework.cli:main\"]},\n package_data={\n name: [\"py.typed\", \"test_requirements.txt\"] + template_files + doc_html_files\n },\n zip_safe=False,\n keywords=\"pipelines, machine learning, data pipelines, data science, data engineering\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
| 2,829 | 209 |
gh_patches_debug_13188
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-5738
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/zero/gemini/gemini_mgr.py]
1 import functools
2 from time import time
3 from typing import Dict, Iterable, List, Optional, Tuple
4
5 import torch
6 import torch.distributed as dist
7
8 from .chunk import Chunk, ChunkManager
9 from .memory_tracer import ChunkMemStatsCollector, MemStats
10 from .placement_policy import PlacementPolicy, PlacementPolicyFactory
11
12
13 class GeminiManager:
14 """
15 Stateful Tensor Manager, inspired from PatrickStar
16
17 PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management
18 https://arxiv.org/abs/2108.05818
19
20 Args:
21 placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'.
22 If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well.
23 Note that 'auto' policy can only work well when no other processes use CUDA during your training.
24 chunk_manager (ChunkManager): A ``ChunkManager`` instance.
25 memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.
26 """
27
28 def __init__(
29 self,
30 placement_policy: str,
31 chunk_manager: ChunkManager,
32 memstats: Optional[MemStats] = None,
33 **placement_kwargs,
34 ) -> None:
35 assert placement_policy in PlacementPolicyFactory.get_policy_names()
36 self.policy_name = placement_policy
37 policy_cls = PlacementPolicyFactory.create(placement_policy)
38 self._chunk_manager = chunk_manager
39
40 self._premade_memstats_ = memstats is not None
41 self._memstats = memstats
42 self._mem_stats_collector = (
43 ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None
44 )
45 self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)
46 self._compute_list: List[Tuple[Chunk, ...]] = []
47 self._compute_idx: int = -1
48 self._async_works: Dict[Chunk, dist.Work] = {}
49
50 self._h2d_volume = 0
51 self._d2h_volume = 0
52 self._layout_time = 0
53 self._evict_time = 0
54 self._warmup = True
55 self._comp_cuda_demand_time = 0
56
57 def reset_attributes(self):
58 self._compute_idx = -1
59 self._h2d_volume = 0
60 self._d2h_volume = 0
61 self._layout_time = 0
62 self._evict_time = 0
63 self._comp_cuda_demand_time = 0
64
65 @property
66 def need_warmup(self) -> bool:
67 return self.policy_name in ("auto", "const")
68
69 def is_warmup(self):
70 return self._warmup
71
72 def memstats(self):
73 """memstats
74
75 get the memory statistics during training.
76 The stats could be collected by a runtime memory tracer, or collected by the GeminiManager.
77 Note, for the latter, you can not access the memstats before warmup iteration finishes.
78 """
79 if self._premade_memstats_:
80 return self._memstats
81 else:
82 assert not self._warmup, "Gemini Manager has memstats after warm up! Now is during warmup."
83 return self._mem_stats_collector._memstats
84
85 def pre_iter(self, *args):
86 if self._mem_stats_collector and self._warmup:
87 self._mem_stats_collector.start_collection()
88
89 def post_iter(self):
90 """This function must be called when each iteration finishes"""
91 if self._mem_stats_collector and self._warmup:
92 self._mem_stats_collector.finish_collection()
93 self._warmup = False
94 self.reset_attributes()
95
96 def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:
97 """Adjust the layout of stateful tensors according to the information provided
98 by mem_stats_collector, which should belongs to a Sharded Model.
99 """
100 # find stateful tensor in state COMPUTE
101 start = time()
102 self._record_warmup_chunks_order(chunks, record_anyway=record_anyway)
103 cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks)
104 # don't evict chunks that are asynchronously fetched
105 can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works]
106 self._layout_time += time() - start
107
108 vol, evict_time = self._placement_policy.evict_tensors(
109 can_evict_chunks=can_evict_chunks,
110 cuda_demand=cuda_demand,
111 warmup=self._warmup,
112 compute_list=self._compute_list,
113 compute_idx=self._compute_idx,
114 )
115
116 self._d2h_volume += vol
117 self._evict_time += evict_time
118 # move COMPUTE tensors to CUDA
119 self._h2d_volume += cuda_demand
120
121 def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]:
122 non_prefetched_chunks = []
123 for chunk in chunks:
124 if chunk in self._async_works:
125 self._async_works[chunk].wait()
126 del self._async_works[chunk]
127 else:
128 non_prefetched_chunks.append(chunk)
129 return tuple(non_prefetched_chunks)
130
131 def add_work(self, chunk: Chunk, work: dist.Work):
132 assert work is not None
133 assert chunk not in self._async_works
134 self._async_works[chunk] = work
135
136 @functools.lru_cache(maxsize=None)
137 def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]):
138 start = time()
139 cuda_demand = 0
140 for chunk in chunks:
141 if chunk.device_type == "cuda" or chunk.device_type == "npu":
142 if chunk.is_gathered:
143 pass
144 else:
145 cuda_demand += chunk.chunk_mem - chunk.shard_mem
146 elif chunk.device_type == "cpu":
147 cuda_demand += chunk.chunk_mem
148 else:
149 raise RuntimeError
150 self._comp_cuda_demand_time += time() - start
151
152 can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks()
153 return cuda_demand, can_evict_chunks
154
155 def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:
156 self._compute_idx += 1
157 if self._warmup and (self._placement_policy.need_mem_stats or record_anyway):
158 self._compute_list.append(chunks)
159
160 def sample_overall_data(self):
161 if self._mem_stats_collector:
162 self._mem_stats_collector.sample_overall_data()
163
164 def record_model_data_volume(self):
165 if self._mem_stats_collector:
166 self._mem_stats_collector.record_model_data_volume()
167
168 @property
169 def chunk_manager(self):
170 return self._chunk_manager
171
172 @property
173 def cuda_margin_mem(self) -> Optional[float]:
174 if self._mem_stats_collector:
175 return self._mem_stats_collector.cuda_margin_mem
176 return None
177
178 @property
179 def compute_list(self) -> List[Tuple[Chunk, ...]]:
180 return self._compute_list
181
182 @property
183 def compute_idx(self) -> int:
184 return self._compute_idx
185
186 @property
187 def async_works(self) -> Dict[Chunk, dist.Work]:
188 return self._async_works
189
190 @property
191 def placement_policy(self) -> PlacementPolicy:
192 return self._placement_policy
193
194 @property
195 def is_cuda_margin_mem_avail(self) -> bool:
196 return self._placement_policy.need_mem_stats
197
198 def setup_grads_device(
199 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]
200 ) -> None:
201 self._placement_policy.setup_grads_device(params, grads_device_map)
202
[end of colossalai/zero/gemini/gemini_mgr.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/colossalai/zero/gemini/gemini_mgr.py b/colossalai/zero/gemini/gemini_mgr.py
--- a/colossalai/zero/gemini/gemini_mgr.py
+++ b/colossalai/zero/gemini/gemini_mgr.py
@@ -42,7 +42,9 @@
self._mem_stats_collector = (
ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None
)
- self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)
+ self._placement_policy = policy_cls(
+ chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs
+ )
self._compute_list: List[Tuple[Chunk, ...]] = []
self._compute_idx: int = -1
self._async_works: Dict[Chunk, dist.Work] = {}
|
{"golden_diff": "diff --git a/colossalai/zero/gemini/gemini_mgr.py b/colossalai/zero/gemini/gemini_mgr.py\n--- a/colossalai/zero/gemini/gemini_mgr.py\n+++ b/colossalai/zero/gemini/gemini_mgr.py\n@@ -42,7 +42,9 @@\n self._mem_stats_collector = (\n ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None\n )\n- self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)\n+ self._placement_policy = policy_cls(\n+ chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs\n+ )\n self._compute_list: List[Tuple[Chunk, ...]] = []\n self._compute_idx: int = -1\n self._async_works: Dict[Chunk, dist.Work] = {}\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import functools\nfrom time import time\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nimport torch\nimport torch.distributed as dist\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector, MemStats\nfrom .placement_policy import PlacementPolicy, PlacementPolicyFactory\n\n\nclass GeminiManager:\n \"\"\"\n Stateful Tensor Manager, inspired from PatrickStar\n\n PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management\n https://arxiv.org/abs/2108.05818\n\n Args:\n placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'.\n If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well.\n Note that 'auto' policy can only work well when no other processes use CUDA during your training.\n chunk_manager (ChunkManager): A ``ChunkManager`` instance.\n memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration.\n \"\"\"\n\n def __init__(\n self,\n placement_policy: str,\n chunk_manager: ChunkManager,\n memstats: Optional[MemStats] = None,\n **placement_kwargs,\n ) -> None:\n assert placement_policy in PlacementPolicyFactory.get_policy_names()\n self.policy_name = placement_policy\n policy_cls = PlacementPolicyFactory.create(placement_policy)\n self._chunk_manager = chunk_manager\n\n self._premade_memstats_ = memstats is not None\n self._memstats = memstats\n self._mem_stats_collector = (\n ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None\n )\n self._placement_policy = policy_cls(self, chunk_manager, self._mem_stats_collector, **placement_kwargs)\n self._compute_list: List[Tuple[Chunk, ...]] = []\n self._compute_idx: int = -1\n self._async_works: Dict[Chunk, dist.Work] = {}\n\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._warmup = True\n self._comp_cuda_demand_time = 0\n\n def reset_attributes(self):\n self._compute_idx = -1\n self._h2d_volume = 0\n self._d2h_volume = 0\n self._layout_time = 0\n self._evict_time = 0\n self._comp_cuda_demand_time = 0\n\n @property\n def need_warmup(self) -> bool:\n return self.policy_name in (\"auto\", \"const\")\n\n def is_warmup(self):\n return self._warmup\n\n def memstats(self):\n \"\"\"memstats\n\n get the memory statistics during training.\n The stats could be collected by a runtime memory tracer, or collected by the GeminiManager.\n Note, for the latter, you can not access the memstats before warmup iteration finishes.\n \"\"\"\n if self._premade_memstats_:\n return self._memstats\n else:\n assert not self._warmup, \"Gemini Manager has memstats after warm up! Now is during warmup.\"\n return self._mem_stats_collector._memstats\n\n def pre_iter(self, *args):\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.start_collection()\n\n def post_iter(self):\n \"\"\"This function must be called when each iteration finishes\"\"\"\n if self._mem_stats_collector and self._warmup:\n self._mem_stats_collector.finish_collection()\n self._warmup = False\n self.reset_attributes()\n\n def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n \"\"\"Adjust the layout of stateful tensors according to the information provided\n by mem_stats_collector, which should belongs to a Sharded Model.\n \"\"\"\n # find stateful tensor in state COMPUTE\n start = time()\n self._record_warmup_chunks_order(chunks, record_anyway=record_anyway)\n cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks)\n # don't evict chunks that are asynchronously fetched\n can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works]\n self._layout_time += time() - start\n\n vol, evict_time = self._placement_policy.evict_tensors(\n can_evict_chunks=can_evict_chunks,\n cuda_demand=cuda_demand,\n warmup=self._warmup,\n compute_list=self._compute_list,\n compute_idx=self._compute_idx,\n )\n\n self._d2h_volume += vol\n self._evict_time += evict_time\n # move COMPUTE tensors to CUDA\n self._h2d_volume += cuda_demand\n\n def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]:\n non_prefetched_chunks = []\n for chunk in chunks:\n if chunk in self._async_works:\n self._async_works[chunk].wait()\n del self._async_works[chunk]\n else:\n non_prefetched_chunks.append(chunk)\n return tuple(non_prefetched_chunks)\n\n def add_work(self, chunk: Chunk, work: dist.Work):\n assert work is not None\n assert chunk not in self._async_works\n self._async_works[chunk] = work\n\n @functools.lru_cache(maxsize=None)\n def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]):\n start = time()\n cuda_demand = 0\n for chunk in chunks:\n if chunk.device_type == \"cuda\" or chunk.device_type == \"npu\":\n if chunk.is_gathered:\n pass\n else:\n cuda_demand += chunk.chunk_mem - chunk.shard_mem\n elif chunk.device_type == \"cpu\":\n cuda_demand += chunk.chunk_mem\n else:\n raise RuntimeError\n self._comp_cuda_demand_time += time() - start\n\n can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks()\n return cuda_demand, can_evict_chunks\n\n def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None:\n self._compute_idx += 1\n if self._warmup and (self._placement_policy.need_mem_stats or record_anyway):\n self._compute_list.append(chunks)\n\n def sample_overall_data(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.sample_overall_data()\n\n def record_model_data_volume(self):\n if self._mem_stats_collector:\n self._mem_stats_collector.record_model_data_volume()\n\n @property\n def chunk_manager(self):\n return self._chunk_manager\n\n @property\n def cuda_margin_mem(self) -> Optional[float]:\n if self._mem_stats_collector:\n return self._mem_stats_collector.cuda_margin_mem\n return None\n\n @property\n def compute_list(self) -> List[Tuple[Chunk, ...]]:\n return self._compute_list\n\n @property\n def compute_idx(self) -> int:\n return self._compute_idx\n\n @property\n def async_works(self) -> Dict[Chunk, dist.Work]:\n return self._async_works\n\n @property\n def placement_policy(self) -> PlacementPolicy:\n return self._placement_policy\n\n @property\n def is_cuda_margin_mem_avail(self) -> bool:\n return self._placement_policy.need_mem_stats\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n self._placement_policy.setup_grads_device(params, grads_device_map)\n", "path": "colossalai/zero/gemini/gemini_mgr.py"}]}
| 2,845 | 218 |
gh_patches_debug_27703
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1395
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pre-commt run --all-files only runs on what is staged still
When I run .git_hooks/pre-commit run --all-files, pre-commit still only runs on what is staged. Infact, passing any options to pre-commit doesn't seem to have any affect on what is executed.
```
git_hooks/pre-commit run --all-files
Trim Trailing Whitespace.............................(no files to check)Skipped
Fix End of Files.....................................(no files to check)Skipped
Check Yaml...........................................(no files to check)Skipped
Check for added large files..........................(no files to check)Skipped
Check JSON...........................................(no files to check)Skipped
Check Xml............................................(no files to check)Skipped
Check for merge conflicts............................(no files to check)Skipped
Pretty format JSON...................................(no files to check)Skipped
```
```
.git_hooks/pre-commit flake8
Trim Trailing Whitespace.............................(no files to check)Skipped
Fix End of Files.....................................(no files to check)Skipped
Check Yaml...........................................(no files to check)Skipped
Check for added large files..........................(no files to check)Skipped
Check JSON...........................................(no files to check)Skipped
Check Xml............................................(no files to check)Skipped
Check for merge conflicts............................(no files to check)Skipped
Pretty format JSON...................................(no files to check)Skipped
```
Wondering if my install is somehow incorrect?
Thx,
Phil
</issue>
<code>
[start of pre_commit/commands/hook_impl.py]
1 import argparse
2 import os.path
3 import subprocess
4 import sys
5 from typing import Optional
6 from typing import Sequence
7 from typing import Tuple
8
9 from pre_commit.commands.run import run
10 from pre_commit.envcontext import envcontext
11 from pre_commit.parse_shebang import normalize_cmd
12 from pre_commit.store import Store
13
14 Z40 = '0' * 40
15
16
17 def _run_legacy(
18 hook_type: str,
19 hook_dir: str,
20 args: Sequence[str],
21 ) -> Tuple[int, bytes]:
22 if os.environ.get('PRE_COMMIT_RUNNING_LEGACY'):
23 raise SystemExit(
24 f"bug: pre-commit's script is installed in migration mode\n"
25 f'run `pre-commit install -f --hook-type {hook_type}` to fix '
26 f'this\n\n'
27 f'Please report this bug at '
28 f'https://github.com/pre-commit/pre-commit/issues',
29 )
30
31 if hook_type == 'pre-push':
32 stdin = sys.stdin.buffer.read()
33 else:
34 stdin = b''
35
36 # not running in legacy mode
37 legacy_hook = os.path.join(hook_dir, f'{hook_type}.legacy')
38 if not os.access(legacy_hook, os.X_OK):
39 return 0, stdin
40
41 with envcontext((('PRE_COMMIT_RUNNING_LEGACY', '1'),)):
42 cmd = normalize_cmd((legacy_hook, *args))
43 return subprocess.run(cmd, input=stdin).returncode, stdin
44
45
46 def _validate_config(
47 retv: int,
48 config: str,
49 skip_on_missing_config: bool,
50 ) -> None:
51 if not os.path.isfile(config):
52 if skip_on_missing_config or os.getenv('PRE_COMMIT_ALLOW_NO_CONFIG'):
53 print(f'`{config}` config file not found. Skipping `pre-commit`.')
54 raise SystemExit(retv)
55 else:
56 print(
57 f'No {config} file was found\n'
58 f'- To temporarily silence this, run '
59 f'`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\n'
60 f'- To permanently silence this, install pre-commit with the '
61 f'--allow-missing-config option\n'
62 f'- To uninstall pre-commit run `pre-commit uninstall`',
63 )
64 raise SystemExit(1)
65
66
67 def _ns(
68 hook_type: str,
69 color: bool,
70 *,
71 all_files: bool = False,
72 from_ref: Optional[str] = None,
73 to_ref: Optional[str] = None,
74 remote_name: Optional[str] = None,
75 remote_url: Optional[str] = None,
76 commit_msg_filename: Optional[str] = None,
77 checkout_type: Optional[str] = None,
78 ) -> argparse.Namespace:
79 return argparse.Namespace(
80 color=color,
81 hook_stage=hook_type.replace('pre-', ''),
82 from_ref=from_ref,
83 to_ref=to_ref,
84 remote_name=remote_name,
85 remote_url=remote_url,
86 commit_msg_filename=commit_msg_filename,
87 all_files=all_files,
88 checkout_type=checkout_type,
89 files=(),
90 hook=None,
91 verbose=False,
92 show_diff_on_failure=False,
93 )
94
95
96 def _rev_exists(rev: str) -> bool:
97 return not subprocess.call(('git', 'rev-list', '--quiet', rev))
98
99
100 def _pre_push_ns(
101 color: bool,
102 args: Sequence[str],
103 stdin: bytes,
104 ) -> Optional[argparse.Namespace]:
105 remote_name = args[0]
106 remote_url = args[1]
107
108 for line in stdin.decode().splitlines():
109 _, local_sha, _, remote_sha = line.split()
110 if local_sha == Z40:
111 continue
112 elif remote_sha != Z40 and _rev_exists(remote_sha):
113 return _ns(
114 'pre-push', color,
115 from_ref=remote_sha, to_ref=local_sha,
116 remote_name=remote_name, remote_url=remote_url,
117 )
118 else:
119 # ancestors not found in remote
120 ancestors = subprocess.check_output((
121 'git', 'rev-list', local_sha, '--topo-order', '--reverse',
122 '--not', f'--remotes={remote_name}',
123 )).decode().strip()
124 if not ancestors:
125 continue
126 else:
127 first_ancestor = ancestors.splitlines()[0]
128 cmd = ('git', 'rev-list', '--max-parents=0', local_sha)
129 roots = set(subprocess.check_output(cmd).decode().splitlines())
130 if first_ancestor in roots:
131 # pushing the whole tree including root commit
132 return _ns(
133 'pre-push', color,
134 all_files=True,
135 remote_name=remote_name, remote_url=remote_url,
136 )
137 else:
138 rev_cmd = ('git', 'rev-parse', f'{first_ancestor}^')
139 source = subprocess.check_output(rev_cmd).decode().strip()
140 return _ns(
141 'pre-push', color,
142 from_ref=source, to_ref=local_sha,
143 remote_name=remote_name, remote_url=remote_url,
144 )
145
146 # nothing to push
147 return None
148
149
150 def _run_ns(
151 hook_type: str,
152 color: bool,
153 args: Sequence[str],
154 stdin: bytes,
155 ) -> Optional[argparse.Namespace]:
156 if hook_type == 'pre-push':
157 return _pre_push_ns(color, args, stdin)
158 elif hook_type in {'prepare-commit-msg', 'commit-msg'}:
159 return _ns(hook_type, color, commit_msg_filename=args[0])
160 elif hook_type in {'pre-merge-commit', 'pre-commit'}:
161 return _ns(hook_type, color)
162 elif hook_type == 'post-checkout':
163 return _ns(
164 hook_type, color,
165 from_ref=args[0], to_ref=args[1], checkout_type=args[2],
166 )
167 else:
168 raise AssertionError(f'unexpected hook type: {hook_type}')
169
170
171 def hook_impl(
172 store: Store,
173 *,
174 config: str,
175 color: bool,
176 hook_type: str,
177 hook_dir: str,
178 skip_on_missing_config: bool,
179 args: Sequence[str],
180 ) -> int:
181 retv, stdin = _run_legacy(hook_type, hook_dir, args)
182 _validate_config(retv, config, skip_on_missing_config)
183 ns = _run_ns(hook_type, color, args, stdin)
184 if ns is None:
185 return retv
186 else:
187 return retv | run(config, store, ns)
188
[end of pre_commit/commands/hook_impl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/commands/hook_impl.py b/pre_commit/commands/hook_impl.py
--- a/pre_commit/commands/hook_impl.py
+++ b/pre_commit/commands/hook_impl.py
@@ -147,15 +147,44 @@
return None
+_EXPECTED_ARG_LENGTH_BY_HOOK = {
+ 'commit-msg': 1,
+ 'post-checkout': 3,
+ 'pre-commit': 0,
+ 'pre-merge-commit': 0,
+ 'pre-push': 2,
+}
+
+
+def _check_args_length(hook_type: str, args: Sequence[str]) -> None:
+ if hook_type == 'prepare-commit-msg':
+ if len(args) < 1 or len(args) > 3:
+ raise SystemExit(
+ f'hook-impl for {hook_type} expected 1, 2, or 3 arguments '
+ f'but got {len(args)}: {args}',
+ )
+ elif hook_type in _EXPECTED_ARG_LENGTH_BY_HOOK:
+ expected = _EXPECTED_ARG_LENGTH_BY_HOOK[hook_type]
+ if len(args) != expected:
+ arguments_s = 'argument' if expected == 1 else 'arguments'
+ raise SystemExit(
+ f'hook-impl for {hook_type} expected {expected} {arguments_s} '
+ f'but got {len(args)}: {args}',
+ )
+ else:
+ raise AssertionError(f'unexpected hook type: {hook_type}')
+
+
def _run_ns(
hook_type: str,
color: bool,
args: Sequence[str],
stdin: bytes,
) -> Optional[argparse.Namespace]:
+ _check_args_length(hook_type, args)
if hook_type == 'pre-push':
return _pre_push_ns(color, args, stdin)
- elif hook_type in {'prepare-commit-msg', 'commit-msg'}:
+ elif hook_type in {'commit-msg', 'prepare-commit-msg'}:
return _ns(hook_type, color, commit_msg_filename=args[0])
elif hook_type in {'pre-merge-commit', 'pre-commit'}:
return _ns(hook_type, color)
|
{"golden_diff": "diff --git a/pre_commit/commands/hook_impl.py b/pre_commit/commands/hook_impl.py\n--- a/pre_commit/commands/hook_impl.py\n+++ b/pre_commit/commands/hook_impl.py\n@@ -147,15 +147,44 @@\n return None\n \n \n+_EXPECTED_ARG_LENGTH_BY_HOOK = {\n+ 'commit-msg': 1,\n+ 'post-checkout': 3,\n+ 'pre-commit': 0,\n+ 'pre-merge-commit': 0,\n+ 'pre-push': 2,\n+}\n+\n+\n+def _check_args_length(hook_type: str, args: Sequence[str]) -> None:\n+ if hook_type == 'prepare-commit-msg':\n+ if len(args) < 1 or len(args) > 3:\n+ raise SystemExit(\n+ f'hook-impl for {hook_type} expected 1, 2, or 3 arguments '\n+ f'but got {len(args)}: {args}',\n+ )\n+ elif hook_type in _EXPECTED_ARG_LENGTH_BY_HOOK:\n+ expected = _EXPECTED_ARG_LENGTH_BY_HOOK[hook_type]\n+ if len(args) != expected:\n+ arguments_s = 'argument' if expected == 1 else 'arguments'\n+ raise SystemExit(\n+ f'hook-impl for {hook_type} expected {expected} {arguments_s} '\n+ f'but got {len(args)}: {args}',\n+ )\n+ else:\n+ raise AssertionError(f'unexpected hook type: {hook_type}')\n+\n+\n def _run_ns(\n hook_type: str,\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n ) -> Optional[argparse.Namespace]:\n+ _check_args_length(hook_type, args)\n if hook_type == 'pre-push':\n return _pre_push_ns(color, args, stdin)\n- elif hook_type in {'prepare-commit-msg', 'commit-msg'}:\n+ elif hook_type in {'commit-msg', 'prepare-commit-msg'}:\n return _ns(hook_type, color, commit_msg_filename=args[0])\n elif hook_type in {'pre-merge-commit', 'pre-commit'}:\n return _ns(hook_type, color)\n", "issue": "pre-commt run --all-files only runs on what is staged still\nWhen I run .git_hooks/pre-commit run --all-files, pre-commit still only runs on what is staged. Infact, passing any options to pre-commit doesn't seem to have any affect on what is executed.\r\n\r\n```\r\ngit_hooks/pre-commit run --all-files\r\nTrim Trailing Whitespace.............................(no files to check)Skipped\r\nFix End of Files.....................................(no files to check)Skipped\r\nCheck Yaml...........................................(no files to check)Skipped\r\nCheck for added large files..........................(no files to check)Skipped\r\nCheck JSON...........................................(no files to check)Skipped\r\nCheck Xml............................................(no files to check)Skipped\r\nCheck for merge conflicts............................(no files to check)Skipped\r\nPretty format JSON...................................(no files to check)Skipped\r\n```\r\n\r\n\r\n```\r\n.git_hooks/pre-commit flake8\r\nTrim Trailing Whitespace.............................(no files to check)Skipped\r\nFix End of Files.....................................(no files to check)Skipped\r\nCheck Yaml...........................................(no files to check)Skipped\r\nCheck for added large files..........................(no files to check)Skipped\r\nCheck JSON...........................................(no files to check)Skipped\r\nCheck Xml............................................(no files to check)Skipped\r\nCheck for merge conflicts............................(no files to check)Skipped\r\nPretty format JSON...................................(no files to check)Skipped\r\n```\r\n\r\nWondering if my install is somehow incorrect?\r\n\r\nThx,\r\nPhil\n", "before_files": [{"content": "import argparse\nimport os.path\nimport subprocess\nimport sys\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nfrom pre_commit.commands.run import run\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.parse_shebang import normalize_cmd\nfrom pre_commit.store import Store\n\nZ40 = '0' * 40\n\n\ndef _run_legacy(\n hook_type: str,\n hook_dir: str,\n args: Sequence[str],\n) -> Tuple[int, bytes]:\n if os.environ.get('PRE_COMMIT_RUNNING_LEGACY'):\n raise SystemExit(\n f\"bug: pre-commit's script is installed in migration mode\\n\"\n f'run `pre-commit install -f --hook-type {hook_type}` to fix '\n f'this\\n\\n'\n f'Please report this bug at '\n f'https://github.com/pre-commit/pre-commit/issues',\n )\n\n if hook_type == 'pre-push':\n stdin = sys.stdin.buffer.read()\n else:\n stdin = b''\n\n # not running in legacy mode\n legacy_hook = os.path.join(hook_dir, f'{hook_type}.legacy')\n if not os.access(legacy_hook, os.X_OK):\n return 0, stdin\n\n with envcontext((('PRE_COMMIT_RUNNING_LEGACY', '1'),)):\n cmd = normalize_cmd((legacy_hook, *args))\n return subprocess.run(cmd, input=stdin).returncode, stdin\n\n\ndef _validate_config(\n retv: int,\n config: str,\n skip_on_missing_config: bool,\n) -> None:\n if not os.path.isfile(config):\n if skip_on_missing_config or os.getenv('PRE_COMMIT_ALLOW_NO_CONFIG'):\n print(f'`{config}` config file not found. Skipping `pre-commit`.')\n raise SystemExit(retv)\n else:\n print(\n f'No {config} file was found\\n'\n f'- To temporarily silence this, run '\n f'`PRE_COMMIT_ALLOW_NO_CONFIG=1 git ...`\\n'\n f'- To permanently silence this, install pre-commit with the '\n f'--allow-missing-config option\\n'\n f'- To uninstall pre-commit run `pre-commit uninstall`',\n )\n raise SystemExit(1)\n\n\ndef _ns(\n hook_type: str,\n color: bool,\n *,\n all_files: bool = False,\n from_ref: Optional[str] = None,\n to_ref: Optional[str] = None,\n remote_name: Optional[str] = None,\n remote_url: Optional[str] = None,\n commit_msg_filename: Optional[str] = None,\n checkout_type: Optional[str] = None,\n) -> argparse.Namespace:\n return argparse.Namespace(\n color=color,\n hook_stage=hook_type.replace('pre-', ''),\n from_ref=from_ref,\n to_ref=to_ref,\n remote_name=remote_name,\n remote_url=remote_url,\n commit_msg_filename=commit_msg_filename,\n all_files=all_files,\n checkout_type=checkout_type,\n files=(),\n hook=None,\n verbose=False,\n show_diff_on_failure=False,\n )\n\n\ndef _rev_exists(rev: str) -> bool:\n return not subprocess.call(('git', 'rev-list', '--quiet', rev))\n\n\ndef _pre_push_ns(\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n remote_name = args[0]\n remote_url = args[1]\n\n for line in stdin.decode().splitlines():\n _, local_sha, _, remote_sha = line.split()\n if local_sha == Z40:\n continue\n elif remote_sha != Z40 and _rev_exists(remote_sha):\n return _ns(\n 'pre-push', color,\n from_ref=remote_sha, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n # ancestors not found in remote\n ancestors = subprocess.check_output((\n 'git', 'rev-list', local_sha, '--topo-order', '--reverse',\n '--not', f'--remotes={remote_name}',\n )).decode().strip()\n if not ancestors:\n continue\n else:\n first_ancestor = ancestors.splitlines()[0]\n cmd = ('git', 'rev-list', '--max-parents=0', local_sha)\n roots = set(subprocess.check_output(cmd).decode().splitlines())\n if first_ancestor in roots:\n # pushing the whole tree including root commit\n return _ns(\n 'pre-push', color,\n all_files=True,\n remote_name=remote_name, remote_url=remote_url,\n )\n else:\n rev_cmd = ('git', 'rev-parse', f'{first_ancestor}^')\n source = subprocess.check_output(rev_cmd).decode().strip()\n return _ns(\n 'pre-push', color,\n from_ref=source, to_ref=local_sha,\n remote_name=remote_name, remote_url=remote_url,\n )\n\n # nothing to push\n return None\n\n\ndef _run_ns(\n hook_type: str,\n color: bool,\n args: Sequence[str],\n stdin: bytes,\n) -> Optional[argparse.Namespace]:\n if hook_type == 'pre-push':\n return _pre_push_ns(color, args, stdin)\n elif hook_type in {'prepare-commit-msg', 'commit-msg'}:\n return _ns(hook_type, color, commit_msg_filename=args[0])\n elif hook_type in {'pre-merge-commit', 'pre-commit'}:\n return _ns(hook_type, color)\n elif hook_type == 'post-checkout':\n return _ns(\n hook_type, color,\n from_ref=args[0], to_ref=args[1], checkout_type=args[2],\n )\n else:\n raise AssertionError(f'unexpected hook type: {hook_type}')\n\n\ndef hook_impl(\n store: Store,\n *,\n config: str,\n color: bool,\n hook_type: str,\n hook_dir: str,\n skip_on_missing_config: bool,\n args: Sequence[str],\n) -> int:\n retv, stdin = _run_legacy(hook_type, hook_dir, args)\n _validate_config(retv, config, skip_on_missing_config)\n ns = _run_ns(hook_type, color, args, stdin)\n if ns is None:\n return retv\n else:\n return retv | run(config, store, ns)\n", "path": "pre_commit/commands/hook_impl.py"}]}
| 2,712 | 479 |
gh_patches_debug_17814
|
rasdani/github-patches
|
git_diff
|
Qiskit__qiskit-7328
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Parameters needs examples in the documentation
### What is the expected enhancement?
Many users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example.
</issue>
<code>
[start of qiskit/circuit/parameter.py]
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2017, 2019.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12 """
13 Parameter Class for variable parameters.
14 """
15
16 from uuid import uuid4
17
18 from .parameterexpression import ParameterExpression
19
20 try:
21 import symengine
22
23 HAS_SYMENGINE = True
24 except ImportError:
25 HAS_SYMENGINE = False
26
27
28 class Parameter(ParameterExpression):
29 """Parameter Class for variable parameters."""
30
31 def __new__(cls, name, uuid=None): # pylint: disable=unused-argument
32 # Parameter relies on self._uuid being set prior to other attributes
33 # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash
34 # or __eq__ functions.
35 obj = object.__new__(cls)
36
37 if uuid is None:
38 obj._uuid = uuid4()
39 else:
40 obj._uuid = uuid
41
42 obj._hash = hash(obj._uuid)
43 return obj
44
45 def __getnewargs__(self):
46 # Unpickling won't in general call __init__ but will always call
47 # __new__. Specify arguments to be passed to __new__ when unpickling.
48
49 return (self.name, self._uuid)
50
51 def __init__(self, name: str):
52 """Create a new named :class:`Parameter`.
53
54 Args:
55 name: name of the ``Parameter``, used for visual representation. This can
56 be any unicode string, e.g. "ϕ".
57 """
58 self._name = name
59 if not HAS_SYMENGINE:
60 from sympy import Symbol
61
62 symbol = Symbol(name)
63 else:
64 symbol = symengine.Symbol(name)
65 super().__init__(symbol_map={self: symbol}, expr=symbol)
66
67 def subs(self, parameter_map: dict):
68 """Substitute self with the corresponding parameter in ``parameter_map``."""
69 return parameter_map[self]
70
71 @property
72 def name(self):
73 """Returns the name of the :class:`Parameter`."""
74 return self._name
75
76 def __str__(self):
77 return self.name
78
79 def __copy__(self):
80 return self
81
82 def __deepcopy__(self, memo=None):
83 return self
84
85 def __repr__(self):
86 return f"{self.__class__.__name__}({self.name})"
87
88 def __eq__(self, other):
89 if isinstance(other, Parameter):
90 return self._uuid == other._uuid
91 elif isinstance(other, ParameterExpression):
92 return super().__eq__(other)
93 else:
94 return False
95
96 def __hash__(self):
97 return self._hash
98
99 def __getstate__(self):
100 return {"name": self._name}
101
102 def __setstate__(self, state):
103 self._name = state["name"]
104 if not HAS_SYMENGINE:
105 from sympy import Symbol
106
107 symbol = Symbol(self._name)
108 else:
109 symbol = symengine.Symbol(self._name)
110 super().__init__(symbol_map={self: symbol}, expr=symbol)
111
[end of qiskit/circuit/parameter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py
--- a/qiskit/circuit/parameter.py
+++ b/qiskit/circuit/parameter.py
@@ -26,7 +26,32 @@
class Parameter(ParameterExpression):
- """Parameter Class for variable parameters."""
+ """Parameter Class for variable parameters.
+
+ A parameter is a variable value that is not required to be fixed
+ at circuit definition.
+
+ Examples:
+
+ Construct a variable-rotation X gate using circuit parameters.
+
+ .. jupyter-execute::
+
+ from qiskit.circuit import QuantumCircuit, Parameter
+
+ # create the parameter
+ phi = Parameter('phi')
+ qc = QuantumCircuit(1)
+
+ # parameterize the rotation
+ qc.rx(phi, 0)
+ qc.draw()
+
+ # bind the parameters after circuit to create a bound circuit
+ bc = qc.bind_parameters({phi: 3.14})
+ bc.measure_all()
+ bc.draw()
+ """
def __new__(cls, name, uuid=None): # pylint: disable=unused-argument
# Parameter relies on self._uuid being set prior to other attributes
|
{"golden_diff": "diff --git a/qiskit/circuit/parameter.py b/qiskit/circuit/parameter.py\n--- a/qiskit/circuit/parameter.py\n+++ b/qiskit/circuit/parameter.py\n@@ -26,7 +26,32 @@\n \n \n class Parameter(ParameterExpression):\n- \"\"\"Parameter Class for variable parameters.\"\"\"\n+ \"\"\"Parameter Class for variable parameters.\n+\n+ A parameter is a variable value that is not required to be fixed\n+ at circuit definition.\n+\n+ Examples:\n+\n+ Construct a variable-rotation X gate using circuit parameters.\n+\n+ .. jupyter-execute::\n+\n+ from qiskit.circuit import QuantumCircuit, Parameter\n+\n+ # create the parameter\n+ phi = Parameter('phi')\n+ qc = QuantumCircuit(1)\n+\n+ # parameterize the rotation\n+ qc.rx(phi, 0)\n+ qc.draw()\n+\n+ # bind the parameters after circuit to create a bound circuit\n+ bc = qc.bind_parameters({phi: 3.14})\n+ bc.measure_all()\n+ bc.draw()\n+ \"\"\"\n \n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n", "issue": "Parameters needs examples in the documentation\n### What is the expected enhancement?\n\nMany users land in https://qiskit.org/documentation/stubs/qiskit.circuit.Parameter.html when search on how to parametrize circuits. @Cryoris did a good jobs at explaining them in https://quantumcomputing.stackexchange.com/questions/13558/how-to-add-several-parameters-in-qiskit-circuit . I think the Parameter docstring needs a short example.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nParameter Class for variable parameters.\n\"\"\"\n\nfrom uuid import uuid4\n\nfrom .parameterexpression import ParameterExpression\n\ntry:\n import symengine\n\n HAS_SYMENGINE = True\nexcept ImportError:\n HAS_SYMENGINE = False\n\n\nclass Parameter(ParameterExpression):\n \"\"\"Parameter Class for variable parameters.\"\"\"\n\n def __new__(cls, name, uuid=None): # pylint: disable=unused-argument\n # Parameter relies on self._uuid being set prior to other attributes\n # (e.g. symbol_map) which may depend on self._uuid for Parameter's hash\n # or __eq__ functions.\n obj = object.__new__(cls)\n\n if uuid is None:\n obj._uuid = uuid4()\n else:\n obj._uuid = uuid\n\n obj._hash = hash(obj._uuid)\n return obj\n\n def __getnewargs__(self):\n # Unpickling won't in general call __init__ but will always call\n # __new__. Specify arguments to be passed to __new__ when unpickling.\n\n return (self.name, self._uuid)\n\n def __init__(self, name: str):\n \"\"\"Create a new named :class:`Parameter`.\n\n Args:\n name: name of the ``Parameter``, used for visual representation. This can\n be any unicode string, e.g. \"\u03d5\".\n \"\"\"\n self._name = name\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(name)\n else:\n symbol = symengine.Symbol(name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n\n def subs(self, parameter_map: dict):\n \"\"\"Substitute self with the corresponding parameter in ``parameter_map``.\"\"\"\n return parameter_map[self]\n\n @property\n def name(self):\n \"\"\"Returns the name of the :class:`Parameter`.\"\"\"\n return self._name\n\n def __str__(self):\n return self.name\n\n def __copy__(self):\n return self\n\n def __deepcopy__(self, memo=None):\n return self\n\n def __repr__(self):\n return f\"{self.__class__.__name__}({self.name})\"\n\n def __eq__(self, other):\n if isinstance(other, Parameter):\n return self._uuid == other._uuid\n elif isinstance(other, ParameterExpression):\n return super().__eq__(other)\n else:\n return False\n\n def __hash__(self):\n return self._hash\n\n def __getstate__(self):\n return {\"name\": self._name}\n\n def __setstate__(self, state):\n self._name = state[\"name\"]\n if not HAS_SYMENGINE:\n from sympy import Symbol\n\n symbol = Symbol(self._name)\n else:\n symbol = symengine.Symbol(self._name)\n super().__init__(symbol_map={self: symbol}, expr=symbol)\n", "path": "qiskit/circuit/parameter.py"}]}
| 1,617 | 280 |
gh_patches_debug_39684
|
rasdani/github-patches
|
git_diff
|
certbot__certbot-3230
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Nginx parser apparently can't parse "map"
I was looking at issues on the nginxparser project and found this one:
https://github.com/fatiherikli/nginxparser/issues/19
It appears to me that we can't parse configurations containing this. I got something like
`ParseException: Expected stringEnd (at char 100), (line:5, col:1)`
when trying to `nginxparser.loads()` a file that included that map directive.
cc @pde
</issue>
<code>
[start of certbot-nginx/certbot_nginx/nginxparser.py]
1 """Very low-level nginx config parser based on pyparsing."""
2 # Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)
3 import copy
4 import logging
5 import string
6
7 from pyparsing import (
8 Literal, White, Word, alphanums, CharsNotIn, Combine, Forward, Group,
9 Optional, OneOrMore, Regex, ZeroOrMore)
10 from pyparsing import stringEnd
11 from pyparsing import restOfLine
12
13 logger = logging.getLogger(__name__)
14
15 class RawNginxParser(object):
16 # pylint: disable=expression-not-assigned
17 """A class that parses nginx configuration with pyparsing."""
18
19 # constants
20 space = Optional(White())
21 nonspace = Regex(r"\S+")
22 left_bracket = Literal("{").suppress()
23 right_bracket = space.leaveWhitespace() + Literal("}").suppress()
24 semicolon = Literal(";").suppress()
25 key = Word(alphanums + "_/+-.")
26 dollar_var = Combine(Literal('$') + nonspace)
27 condition = Regex(r"\(.+\)")
28 # Matches anything that is not a special character AND any chars in single
29 # or double quotes
30 value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
31 location = CharsNotIn("{};," + string.whitespace)
32 # modifier for location uri [ = | ~ | ~* | ^~ ]
33 modifier = Literal("=") | Literal("~*") | Literal("~") | Literal("^~")
34
35 # rules
36 comment = space + Literal('#') + restOfLine()
37
38 assignment = space + key + Optional(space + value, default=None) + semicolon
39 location_statement = space + Optional(modifier) + Optional(space + location + space)
40 if_statement = space + Literal("if") + space + condition + space
41 map_statement = space + Literal("map") + space + nonspace + space + dollar_var + space
42 block = Forward()
43
44 block << Group(
45 # key could for instance be "server" or "http", or "location" (in which case
46 # location_statement needs to have a non-empty location)
47 (Group(space + key + location_statement) ^ Group(if_statement) ^
48 Group(map_statement)).leaveWhitespace() +
49 left_bracket +
50 Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +
51 right_bracket)
52
53 script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd
54 script.parseWithTabs()
55
56 def __init__(self, source):
57 self.source = source
58
59 def parse(self):
60 """Returns the parsed tree."""
61 return self.script.parseString(self.source)
62
63 def as_list(self):
64 """Returns the parsed tree as a list."""
65 return self.parse().asList()
66
67 class RawNginxDumper(object):
68 # pylint: disable=too-few-public-methods
69 """A class that dumps nginx configuration from the provided tree."""
70 def __init__(self, blocks):
71 self.blocks = blocks
72
73 def __iter__(self, blocks=None):
74 """Iterates the dumped nginx content."""
75 blocks = blocks or self.blocks
76 for b0 in blocks:
77 if isinstance(b0, str):
78 yield b0
79 continue
80 b = copy.deepcopy(b0)
81 if spacey(b[0]):
82 yield b.pop(0) # indentation
83 if not b:
84 continue
85 key, values = b.pop(0), b.pop(0)
86
87 if isinstance(key, list):
88 yield "".join(key) + '{'
89 for parameter in values:
90 for line in self.__iter__([parameter]): # negate "for b0 in blocks"
91 yield line
92 yield '}'
93 else:
94 if isinstance(key, str) and key.strip() == '#': # comment
95 yield key + values
96 else: # assignment
97 gap = ""
98 # Sometimes the parser has stuck some gap whitespace in here;
99 # if so rotate it into gap
100 if values and spacey(values):
101 gap = values
102 values = b.pop(0)
103 yield key + gap + values + ';'
104
105 def __str__(self):
106 """Return the parsed block as a string."""
107 return ''.join(self)
108
109
110 # Shortcut functions to respect Python's serialization interface
111 # (like pyyaml, picker or json)
112
113 def loads(source):
114 """Parses from a string.
115
116 :param str souce: The string to parse
117 :returns: The parsed tree
118 :rtype: list
119
120 """
121 return UnspacedList(RawNginxParser(source).as_list())
122
123
124 def load(_file):
125 """Parses from a file.
126
127 :param file _file: The file to parse
128 :returns: The parsed tree
129 :rtype: list
130
131 """
132 return loads(_file.read())
133
134
135 def dumps(blocks):
136 """Dump to a string.
137
138 :param UnspacedList block: The parsed tree
139 :param int indentation: The number of spaces to indent
140 :rtype: str
141
142 """
143 return str(RawNginxDumper(blocks.spaced))
144
145
146 def dump(blocks, _file):
147 """Dump to a file.
148
149 :param UnspacedList block: The parsed tree
150 :param file _file: The file to dump to
151 :param int indentation: The number of spaces to indent
152 :rtype: NoneType
153
154 """
155 return _file.write(dumps(blocks))
156
157
158 spacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''
159
160 class UnspacedList(list):
161 """Wrap a list [of lists], making any whitespace entries magically invisible"""
162
163 def __init__(self, list_source):
164 # ensure our argument is not a generator, and duplicate any sublists
165 self.spaced = copy.deepcopy(list(list_source))
166 self.dirty = False
167
168 # Turn self into a version of the source list that has spaces removed
169 # and all sub-lists also UnspacedList()ed
170 list.__init__(self, list_source)
171 for i, entry in reversed(list(enumerate(self))):
172 if isinstance(entry, list):
173 sublist = UnspacedList(entry)
174 list.__setitem__(self, i, sublist)
175 self.spaced[i] = sublist.spaced
176 elif spacey(entry):
177 # don't delete comments
178 if "#" not in self[:i]:
179 list.__delitem__(self, i)
180
181 def _coerce(self, inbound):
182 """
183 Coerce some inbound object to be appropriately usable in this object
184
185 :param inbound: string or None or list or UnspacedList
186 :returns: (coerced UnspacedList or string or None, spaced equivalent)
187 :rtype: tuple
188
189 """
190 if not isinstance(inbound, list): # str or None
191 return (inbound, inbound)
192 else:
193 if not hasattr(inbound, "spaced"):
194 inbound = UnspacedList(inbound)
195 return (inbound, inbound.spaced)
196
197
198 def insert(self, i, x):
199 item, spaced_item = self._coerce(x)
200 self.spaced.insert(self._spaced_position(i), spaced_item)
201 list.insert(self, i, item)
202 self.dirty = True
203
204 def append(self, x):
205 item, spaced_item = self._coerce(x)
206 self.spaced.append(spaced_item)
207 list.append(self, item)
208 self.dirty = True
209
210 def extend(self, x):
211 item, spaced_item = self._coerce(x)
212 self.spaced.extend(spaced_item)
213 list.extend(self, item)
214 self.dirty = True
215
216 def __add__(self, other):
217 l = copy.deepcopy(self)
218 l.extend(other)
219 l.dirty = True
220 return l
221
222 def pop(self, _i=None):
223 raise NotImplementedError("UnspacedList.pop() not yet implemented")
224 def remove(self, _):
225 raise NotImplementedError("UnspacedList.remove() not yet implemented")
226 def reverse(self):
227 raise NotImplementedError("UnspacedList.reverse() not yet implemented")
228 def sort(self, _cmp=None, _key=None, _Rev=None):
229 raise NotImplementedError("UnspacedList.sort() not yet implemented")
230 def __setslice__(self, _i, _j, _newslice):
231 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
232
233 def __setitem__(self, i, value):
234 if isinstance(i, slice):
235 raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
236 item, spaced_item = self._coerce(value)
237 self.spaced.__setitem__(self._spaced_position(i), spaced_item)
238 list.__setitem__(self, i, item)
239 self.dirty = True
240
241 def __delitem__(self, i):
242 self.spaced.__delitem__(self._spaced_position(i))
243 list.__delitem__(self, i)
244 self.dirty = True
245
246 def __deepcopy__(self, memo):
247 l = UnspacedList(self[:])
248 l.spaced = copy.deepcopy(self.spaced, memo=memo)
249 l.dirty = self.dirty
250 return l
251
252 def is_dirty(self):
253 """Recurse through the parse tree to figure out if any sublists are dirty"""
254 if self.dirty:
255 return True
256 return any((isinstance(x, list) and x.is_dirty() for x in self))
257
258 def _spaced_position(self, idx):
259 "Convert from indexes in the unspaced list to positions in the spaced one"
260 pos = spaces = 0
261 # Normalize indexes like list[-1] etc, and save the result
262 if idx < 0:
263 idx = len(self) + idx
264 if not 0 <= idx < len(self):
265 raise IndexError("list index out of range")
266 idx0 = idx
267 # Count the number of spaces in the spaced list before idx in the unspaced one
268 while idx != -1:
269 if spacey(self.spaced[pos]):
270 spaces += 1
271 else:
272 idx -= 1
273 pos += 1
274 return idx0 + spaces
275
[end of certbot-nginx/certbot_nginx/nginxparser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/certbot-nginx/certbot_nginx/nginxparser.py b/certbot-nginx/certbot_nginx/nginxparser.py
--- a/certbot-nginx/certbot_nginx/nginxparser.py
+++ b/certbot-nginx/certbot_nginx/nginxparser.py
@@ -23,10 +23,12 @@
right_bracket = space.leaveWhitespace() + Literal("}").suppress()
semicolon = Literal(";").suppress()
key = Word(alphanums + "_/+-.")
- dollar_var = Combine(Literal('$') + nonspace)
+ dollar_var = Combine(Literal('$') + Regex(r"[^\{\};,\s]+"))
condition = Regex(r"\(.+\)")
# Matches anything that is not a special character AND any chars in single
# or double quotes
+ # All of these COULD be upgraded to something like
+ # https://stackoverflow.com/a/16130746
value = Regex(r"((\".*\")?(\'.*\')?[^\{\};,]?)+")
location = CharsNotIn("{};," + string.whitespace)
# modifier for location uri [ = | ~ | ~* | ^~ ]
@@ -38,19 +40,35 @@
assignment = space + key + Optional(space + value, default=None) + semicolon
location_statement = space + Optional(modifier) + Optional(space + location + space)
if_statement = space + Literal("if") + space + condition + space
+
map_statement = space + Literal("map") + space + nonspace + space + dollar_var + space
- block = Forward()
+ # This is NOT an accurate way to parse nginx map entries; it's almost
+ # certianly too permissive and may be wrong in other ways, but it should
+ # preserve things correctly in mmmmost or all cases.
+ #
+ # - I can neither prove nor disprove that it is corect wrt all escaped
+ # semicolon situations
+ # Addresses https://github.com/fatiherikli/nginxparser/issues/19
+ map_pattern = Regex(r'".*"') | Regex(r"'.*'") | nonspace
+ map_entry = space + map_pattern + space + value + space + semicolon
+ map_block = Group(
+ # key could for instance be "server" or "http", or "location" (in which case
+ # location_statement needs to have a non-empty location)
+ Group(map_statement).leaveWhitespace() +
+ left_bracket +
+ Group(ZeroOrMore(Group(comment | map_entry)) + space).leaveWhitespace() +
+ right_bracket)
+ block = Forward()
block << Group(
# key could for instance be "server" or "http", or "location" (in which case
# location_statement needs to have a non-empty location)
- (Group(space + key + location_statement) ^ Group(if_statement) ^
- Group(map_statement)).leaveWhitespace() +
+ (Group(space + key + location_statement) ^ Group(if_statement)).leaveWhitespace() +
left_bracket +
- Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +
- right_bracket)
+ Group(ZeroOrMore(Group(comment | assignment) | block | map_block) + space).leaveWhitespace()
+ + right_bracket)
- script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd
+ script = OneOrMore(Group(comment | assignment) ^ block ^ map_block) + space + stringEnd
script.parseWithTabs()
def __init__(self, source):
|
{"golden_diff": "diff --git a/certbot-nginx/certbot_nginx/nginxparser.py b/certbot-nginx/certbot_nginx/nginxparser.py\n--- a/certbot-nginx/certbot_nginx/nginxparser.py\n+++ b/certbot-nginx/certbot_nginx/nginxparser.py\n@@ -23,10 +23,12 @@\n right_bracket = space.leaveWhitespace() + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n key = Word(alphanums + \"_/+-.\")\n- dollar_var = Combine(Literal('$') + nonspace)\n+ dollar_var = Combine(Literal('$') + Regex(r\"[^\\{\\};,\\s]+\"))\n condition = Regex(r\"\\(.+\\)\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n+ # All of these COULD be upgraded to something like\n+ # https://stackoverflow.com/a/16130746\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n@@ -38,19 +40,35 @@\n assignment = space + key + Optional(space + value, default=None) + semicolon\n location_statement = space + Optional(modifier) + Optional(space + location + space)\n if_statement = space + Literal(\"if\") + space + condition + space\n+\n map_statement = space + Literal(\"map\") + space + nonspace + space + dollar_var + space\n- block = Forward()\n+ # This is NOT an accurate way to parse nginx map entries; it's almost\n+ # certianly too permissive and may be wrong in other ways, but it should\n+ # preserve things correctly in mmmmost or all cases.\n+ #\n+ # - I can neither prove nor disprove that it is corect wrt all escaped\n+ # semicolon situations\n+ # Addresses https://github.com/fatiherikli/nginxparser/issues/19\n+ map_pattern = Regex(r'\".*\"') | Regex(r\"'.*'\") | nonspace\n+ map_entry = space + map_pattern + space + value + space + semicolon\n+ map_block = Group(\n+ # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n+ # location_statement needs to have a non-empty location)\n+ Group(map_statement).leaveWhitespace() +\n+ left_bracket +\n+ Group(ZeroOrMore(Group(comment | map_entry)) + space).leaveWhitespace() +\n+ right_bracket)\n \n+ block = Forward()\n block << Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n- (Group(space + key + location_statement) ^ Group(if_statement) ^\n- Group(map_statement)).leaveWhitespace() +\n+ (Group(space + key + location_statement) ^ Group(if_statement)).leaveWhitespace() +\n left_bracket +\n- Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +\n- right_bracket)\n+ Group(ZeroOrMore(Group(comment | assignment) | block | map_block) + space).leaveWhitespace()\n+ + right_bracket)\n \n- script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd\n+ script = OneOrMore(Group(comment | assignment) ^ block ^ map_block) + space + stringEnd\n script.parseWithTabs()\n \n def __init__(self, source):\n", "issue": "Nginx parser apparently can't parse \"map\"\nI was looking at issues on the nginxparser project and found this one:\n\nhttps://github.com/fatiherikli/nginxparser/issues/19\n\nIt appears to me that we can't parse configurations containing this. I got something like\n\n`ParseException: Expected stringEnd (at char 100), (line:5, col:1)`\n\nwhen trying to `nginxparser.loads()` a file that included that map directive.\n\ncc @pde \n\n", "before_files": [{"content": "\"\"\"Very low-level nginx config parser based on pyparsing.\"\"\"\n# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)\nimport copy\nimport logging\nimport string\n\nfrom pyparsing import (\n Literal, White, Word, alphanums, CharsNotIn, Combine, Forward, Group,\n Optional, OneOrMore, Regex, ZeroOrMore)\nfrom pyparsing import stringEnd\nfrom pyparsing import restOfLine\n\nlogger = logging.getLogger(__name__)\n\nclass RawNginxParser(object):\n # pylint: disable=expression-not-assigned\n \"\"\"A class that parses nginx configuration with pyparsing.\"\"\"\n\n # constants\n space = Optional(White())\n nonspace = Regex(r\"\\S+\")\n left_bracket = Literal(\"{\").suppress()\n right_bracket = space.leaveWhitespace() + Literal(\"}\").suppress()\n semicolon = Literal(\";\").suppress()\n key = Word(alphanums + \"_/+-.\")\n dollar_var = Combine(Literal('$') + nonspace)\n condition = Regex(r\"\\(.+\\)\")\n # Matches anything that is not a special character AND any chars in single\n # or double quotes\n value = Regex(r\"((\\\".*\\\")?(\\'.*\\')?[^\\{\\};,]?)+\")\n location = CharsNotIn(\"{};,\" + string.whitespace)\n # modifier for location uri [ = | ~ | ~* | ^~ ]\n modifier = Literal(\"=\") | Literal(\"~*\") | Literal(\"~\") | Literal(\"^~\")\n\n # rules\n comment = space + Literal('#') + restOfLine()\n\n assignment = space + key + Optional(space + value, default=None) + semicolon\n location_statement = space + Optional(modifier) + Optional(space + location + space)\n if_statement = space + Literal(\"if\") + space + condition + space\n map_statement = space + Literal(\"map\") + space + nonspace + space + dollar_var + space\n block = Forward()\n\n block << Group(\n # key could for instance be \"server\" or \"http\", or \"location\" (in which case\n # location_statement needs to have a non-empty location)\n (Group(space + key + location_statement) ^ Group(if_statement) ^\n Group(map_statement)).leaveWhitespace() +\n left_bracket +\n Group(ZeroOrMore(Group(comment | assignment) | block) + space).leaveWhitespace() +\n right_bracket)\n\n script = OneOrMore(Group(comment | assignment) ^ block) + space + stringEnd\n script.parseWithTabs()\n\n def __init__(self, source):\n self.source = source\n\n def parse(self):\n \"\"\"Returns the parsed tree.\"\"\"\n return self.script.parseString(self.source)\n\n def as_list(self):\n \"\"\"Returns the parsed tree as a list.\"\"\"\n return self.parse().asList()\n\nclass RawNginxDumper(object):\n # pylint: disable=too-few-public-methods\n \"\"\"A class that dumps nginx configuration from the provided tree.\"\"\"\n def __init__(self, blocks):\n self.blocks = blocks\n\n def __iter__(self, blocks=None):\n \"\"\"Iterates the dumped nginx content.\"\"\"\n blocks = blocks or self.blocks\n for b0 in blocks:\n if isinstance(b0, str):\n yield b0\n continue\n b = copy.deepcopy(b0)\n if spacey(b[0]):\n yield b.pop(0) # indentation\n if not b:\n continue\n key, values = b.pop(0), b.pop(0)\n\n if isinstance(key, list):\n yield \"\".join(key) + '{'\n for parameter in values:\n for line in self.__iter__([parameter]): # negate \"for b0 in blocks\"\n yield line\n yield '}'\n else:\n if isinstance(key, str) and key.strip() == '#': # comment\n yield key + values\n else: # assignment\n gap = \"\"\n # Sometimes the parser has stuck some gap whitespace in here;\n # if so rotate it into gap\n if values and spacey(values):\n gap = values\n values = b.pop(0)\n yield key + gap + values + ';'\n\n def __str__(self):\n \"\"\"Return the parsed block as a string.\"\"\"\n return ''.join(self)\n\n\n# Shortcut functions to respect Python's serialization interface\n# (like pyyaml, picker or json)\n\ndef loads(source):\n \"\"\"Parses from a string.\n\n :param str souce: The string to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return UnspacedList(RawNginxParser(source).as_list())\n\n\ndef load(_file):\n \"\"\"Parses from a file.\n\n :param file _file: The file to parse\n :returns: The parsed tree\n :rtype: list\n\n \"\"\"\n return loads(_file.read())\n\n\ndef dumps(blocks):\n \"\"\"Dump to a string.\n\n :param UnspacedList block: The parsed tree\n :param int indentation: The number of spaces to indent\n :rtype: str\n\n \"\"\"\n return str(RawNginxDumper(blocks.spaced))\n\n\ndef dump(blocks, _file):\n \"\"\"Dump to a file.\n\n :param UnspacedList block: The parsed tree\n :param file _file: The file to dump to\n :param int indentation: The number of spaces to indent\n :rtype: NoneType\n\n \"\"\"\n return _file.write(dumps(blocks))\n\n\nspacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''\n\nclass UnspacedList(list):\n \"\"\"Wrap a list [of lists], making any whitespace entries magically invisible\"\"\"\n\n def __init__(self, list_source):\n # ensure our argument is not a generator, and duplicate any sublists\n self.spaced = copy.deepcopy(list(list_source))\n self.dirty = False\n\n # Turn self into a version of the source list that has spaces removed\n # and all sub-lists also UnspacedList()ed\n list.__init__(self, list_source)\n for i, entry in reversed(list(enumerate(self))):\n if isinstance(entry, list):\n sublist = UnspacedList(entry)\n list.__setitem__(self, i, sublist)\n self.spaced[i] = sublist.spaced\n elif spacey(entry):\n # don't delete comments\n if \"#\" not in self[:i]:\n list.__delitem__(self, i)\n\n def _coerce(self, inbound):\n \"\"\"\n Coerce some inbound object to be appropriately usable in this object\n\n :param inbound: string or None or list or UnspacedList\n :returns: (coerced UnspacedList or string or None, spaced equivalent)\n :rtype: tuple\n\n \"\"\"\n if not isinstance(inbound, list): # str or None\n return (inbound, inbound)\n else:\n if not hasattr(inbound, \"spaced\"):\n inbound = UnspacedList(inbound)\n return (inbound, inbound.spaced)\n\n\n def insert(self, i, x):\n item, spaced_item = self._coerce(x)\n self.spaced.insert(self._spaced_position(i), spaced_item)\n list.insert(self, i, item)\n self.dirty = True\n\n def append(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.append(spaced_item)\n list.append(self, item)\n self.dirty = True\n\n def extend(self, x):\n item, spaced_item = self._coerce(x)\n self.spaced.extend(spaced_item)\n list.extend(self, item)\n self.dirty = True\n\n def __add__(self, other):\n l = copy.deepcopy(self)\n l.extend(other)\n l.dirty = True\n return l\n\n def pop(self, _i=None):\n raise NotImplementedError(\"UnspacedList.pop() not yet implemented\")\n def remove(self, _):\n raise NotImplementedError(\"UnspacedList.remove() not yet implemented\")\n def reverse(self):\n raise NotImplementedError(\"UnspacedList.reverse() not yet implemented\")\n def sort(self, _cmp=None, _key=None, _Rev=None):\n raise NotImplementedError(\"UnspacedList.sort() not yet implemented\")\n def __setslice__(self, _i, _j, _newslice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n\n def __setitem__(self, i, value):\n if isinstance(i, slice):\n raise NotImplementedError(\"Slice operations on UnspacedLists not yet implemented\")\n item, spaced_item = self._coerce(value)\n self.spaced.__setitem__(self._spaced_position(i), spaced_item)\n list.__setitem__(self, i, item)\n self.dirty = True\n\n def __delitem__(self, i):\n self.spaced.__delitem__(self._spaced_position(i))\n list.__delitem__(self, i)\n self.dirty = True\n\n def __deepcopy__(self, memo):\n l = UnspacedList(self[:])\n l.spaced = copy.deepcopy(self.spaced, memo=memo)\n l.dirty = self.dirty\n return l\n\n def is_dirty(self):\n \"\"\"Recurse through the parse tree to figure out if any sublists are dirty\"\"\"\n if self.dirty:\n return True\n return any((isinstance(x, list) and x.is_dirty() for x in self))\n\n def _spaced_position(self, idx):\n \"Convert from indexes in the unspaced list to positions in the spaced one\"\n pos = spaces = 0\n # Normalize indexes like list[-1] etc, and save the result\n if idx < 0:\n idx = len(self) + idx\n if not 0 <= idx < len(self):\n raise IndexError(\"list index out of range\")\n idx0 = idx\n # Count the number of spaces in the spaced list before idx in the unspaced one\n while idx != -1:\n if spacey(self.spaced[pos]):\n spaces += 1\n else:\n idx -= 1\n pos += 1\n return idx0 + spaces\n", "path": "certbot-nginx/certbot_nginx/nginxparser.py"}]}
| 3,607 | 820 |
gh_patches_debug_14331
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairscale-795
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The Argument broadcast_fp16 is not working on OSS
## 🐛 Bug
The Argument `broadcast_fp16` is not working on `OSS`.
## Command
## To Reproduce
Steps to reproduce the behavior:
1. Add one print line in fairscale's source code [here](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/optim/oss.py#L603)
```
for device in self.buckets.keys():
for dst_rank, bucket in self.buckets[device].items():
print(bucket.buffer.dtype, bucket.buffer.shape)
work_handles.append(
```
2. Run a test script as below:
```
import sys
import pdb
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP
from fairscale.optim.oss import OSS
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
CUDA_DEVICE = [0, 1]
HIDDEN_SIZE = 8
class ToyModel(torch.nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.net1 = torch.nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(HIDDEN_SIZE, 5)
def forward(self, x):
return self.net2(self.relu(self.net1(x)))
def main(rank, world_size):
dist.init_process_group(
backend='nccl',
init_method="tcp://localhost:29501",
rank=rank,
world_size=world_size)
device = CUDA_DEVICE[rank]
torch.manual_seed(rank + 1007)
torch.cuda.set_device(device)
torch.cuda.manual_seed(rank + 1007)
print('Rank: %d/%d Device: %s' % (rank, world_size, device))
model = ToyModel().to(device)
loss_fn = torch.nn.MSELoss()
# ddp_model = DDP(model, device_ids=[device])
# optimizer = torch.optim.SGD(ddp_model.parameters(), lr=0.1)
optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=0.1, broadcast_fp16=True)
ddp_model = ShardedDDP(model, optimizer)
inputs = torch.randn(2, HIDDEN_SIZE).to(device)
print('inputs', rank, inputs)
optimizer.zero_grad()
outputs = ddp_model(inputs)
labels = torch.randn(2, 5).to(device)
loss_fn(outputs, labels).backward()
optimizer.step()
dist.destroy_process_group()
if __name__ == "__main__":
WORLD_SIZE = len(CUDA_DEVICE)
mp.spawn(
main,
args=(WORLD_SIZE,),
nprocs=WORLD_SIZE,
join=True,
)
```
And the output is:
```
...
torch.float32 torch.Size([64])
torch.float32 torch.Size([53])
torch.float32 torch.Size([64])
torch.float32 torch.Size([53])
```
Note that we have already set the argument `broadcast_fp16` to be true, but fairscale still boardcasts float32 tensor.
## Reason
The Tensor.to [(code here)](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) method is not in-place operation.
## Posible Solution
Change [fairscale/nn/misc/param_bucket.py:35](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) to
```
self.buffer = self.buffer.to(device, dtype, non_blocking)
```
</issue>
<code>
[start of fairscale/nn/misc/param_bucket.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the BSD license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from typing import Any, Callable, List, Optional, Union
7
8 import torch
9
10
11 class Bucket:
12 """
13 Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors
14 """
15
16 def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:
17 self._params: List[torch.Tensor] = []
18 self._param_ids: List[int] = []
19 self._fill = 0
20
21 # The actual flat tensor
22 self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device)
23
24 def to( # type: ignore
25 self,
26 device: Optional[Union[int, torch.device]],
27 dtype: Optional[torch.dtype] = None,
28 non_blocking: bool = False,
29 keep_param_alignment: bool = True,
30 ) -> "ParamBucket":
31 """
32 Move the underlying buffer
33 """
34 assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it"
35 self.buffer.to(device, dtype, non_blocking)
36
37
38 class ParamBucket(Bucket):
39 """
40 Helper class to simplify the handling of parameter buckets
41 """
42
43 def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:
44 super().__init__(size, dtype, device)
45
46 def to( # type: ignore
47 self,
48 device: Optional[Union[int, torch.device]],
49 dtype: Optional[torch.dtype] = None,
50 non_blocking: bool = False,
51 keep_param_alignment: bool = True,
52 ) -> "ParamBucket":
53 """
54 Move the underlying buffer
55 """
56 super().to(device, dtype, non_blocking)
57
58 if keep_param_alignment:
59 self._reattach_params()
60
61 @torch.no_grad()
62 def add_param(self, param: torch.Tensor) -> None:
63 """
64 Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer
65 """
66
67 assert id(param) not in self._param_ids, "The same param cannot be checked in twice"
68
69 self._add_param_as_view(param)
70 self._params.append(param)
71 self._param_ids.append(id(param))
72
73 @torch.no_grad()
74 def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:
75 assert self.buffer is not None
76 assert (
77 param.dtype == self.buffer.dtype
78 ), f"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}"
79 assert (
80 param.device == self.buffer.device
81 ), f"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}"
82
83 fill_next = self._fill + param.numel()
84 assert fill_next <= self.buffer.numel()
85
86 # Copy the current param value
87 if keep_existing_value:
88 self.buffer[self._fill : fill_next].copy_(param.data.flatten())
89 param.data = self.buffer[self._fill : fill_next].view_as(param.data)
90 self._fill = fill_next
91
92 @torch.no_grad()
93 def _reattach_params(self) -> None:
94 """
95 Given the parameters which have been registered previously, rebuild the whole bucket
96 """
97 assert len(self._params) > 0
98
99 self._fill = 0
100 for p in self._params:
101 self._add_param_as_view(p, keep_existing_value=False)
102
103
104 class GradBucket(Bucket):
105 """
106 Helper class to simplify the handling of gradient buckets
107 """
108
109 def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None:
110 super().__init__(size, dtype, device)
111
112 self._max_size = size
113 self._is_collapsed = False
114
115 self.params_checked_in = 0
116 self.destination = destination
117 self.sent = True
118 self.callback: Optional[Callable[[Any], None]] = None
119
120 def reset_checked_in(self) -> None:
121 """ Reset the counter of the parameter grads which have been checked in
122 """
123 self.params_checked_in = 0
124 self.sent = False
125
126 @property
127 def all_checked_in(self) -> bool:
128 """ Have all the expected gradient check-in happened ?"""
129 return len(self._params) == self.params_checked_in
130
131 def can_add_grad_view(self, param: torch.Tensor) -> bool:
132 """ Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?
133 """
134 return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids
135
136 def to( # type: ignore
137 self,
138 device: Optional[Union[int, torch.device]],
139 dtype: Optional[torch.dtype] = None,
140 non_blocking: bool = False,
141 keep_param_alignment: bool = True,
142 ) -> "GradBucket":
143 """
144 Move the underlying buffer
145 """
146 if self._is_collapsed:
147 self.rebuild()
148
149 super().to(device, dtype, non_blocking)
150
151 if keep_param_alignment:
152 self._reattach_grads()
153
154 def zero(self) -> None:
155 """
156 Set all the grads to zero
157 """
158 self.buffer.fill_(0.0)
159
160 @torch.no_grad()
161 def add_grad(self, param: torch.Tensor) -> None:
162 """
163 Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer
164 """
165
166 assert id(param) not in self._param_ids, "The same gradients cannot be checked in twice"
167
168 if param.grad is None:
169 param.grad = torch.zeros_like(param)
170
171 self._add_grad_as_view(param)
172 self._params.append(param)
173 self._param_ids.append(id(param))
174
175 @torch.no_grad()
176 def collapse(self) -> None:
177 """
178 Release the buffer from memory. The bucket will need to be rebuilt before use
179 """
180 if not self._is_collapsed:
181 for p in self._params:
182 assert p.grad is not None
183 p.grad.detach_()
184 p.grad = None
185
186 self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device)
187 self._fill = 0
188 self.params_checked_in = 0
189 self._is_collapsed = True
190
191 @torch.no_grad()
192 def rebuild(self) -> None:
193 """
194 Given the parameter gradients which have been registered previously, rebuild the whole bucket
195 """
196 assert len(self._params) > 0
197
198 if self._is_collapsed:
199 self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device)
200
201 for p in self._params:
202 self._add_grad_as_view(p)
203
204 self._is_collapsed = False
205
206 @torch.no_grad()
207 def shrink(self) -> None:
208 """
209 Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory
210 """
211 assert self.buffer.numel() > 0, "Cannot shrink a collapsed bucket, please rebuild"
212
213 self.buffer = self.buffer.resize_(self._fill).clone()
214 self._fill = 0
215 for p in self._params:
216 self._add_grad_as_view(p)
217
218 self._max_size = self._fill
219
220 @torch.no_grad()
221 def _reattach_grads(self) -> None:
222 """
223 Given the parameters gradients which have been registered previously, rebuild the whole bucket
224 """
225 assert len(self._params) > 0
226
227 self._fill = 0
228 for p in self._params:
229 self._add_grad_as_view(p, keep_existing_value=False)
230
231 @torch.no_grad()
232 def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:
233 assert self.buffer.numel() > 0, "Cannot add a gradient to a collapsed bucket, please rebuild"
234 assert param.dtype == self.buffer.dtype
235 assert param.device == self.buffer.device
236
237 fill_next = self._fill + param.numel()
238 assert fill_next <= self.buffer.numel()
239
240 # Copy the current grad value, if any
241 if param.grad is not None:
242 # keep param.grad in place
243 if keep_existing_value:
244 self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten())
245 param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data)
246 else:
247 param.grad = self.buffer[self._fill : fill_next].view_as(param.data)
248 self._fill = fill_next
249
[end of fairscale/nn/misc/param_bucket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/fairscale/nn/misc/param_bucket.py b/fairscale/nn/misc/param_bucket.py
--- a/fairscale/nn/misc/param_bucket.py
+++ b/fairscale/nn/misc/param_bucket.py
@@ -32,7 +32,7 @@
Move the underlying buffer
"""
assert self.buffer is not None, "Cannot move a collapsed bucket, please rebuild it"
- self.buffer.to(device, dtype, non_blocking)
+ self.buffer = self.buffer.to(device, dtype, non_blocking)
class ParamBucket(Bucket):
@@ -98,6 +98,8 @@
self._fill = 0
for p in self._params:
+ if p.dtype != self.buffer.dtype:
+ p.data = p.data.to(self.buffer.dtype)
self._add_param_as_view(p, keep_existing_value=False)
|
{"golden_diff": "diff --git a/fairscale/nn/misc/param_bucket.py b/fairscale/nn/misc/param_bucket.py\n--- a/fairscale/nn/misc/param_bucket.py\n+++ b/fairscale/nn/misc/param_bucket.py\n@@ -32,7 +32,7 @@\n Move the underlying buffer\n \"\"\"\n assert self.buffer is not None, \"Cannot move a collapsed bucket, please rebuild it\"\n- self.buffer.to(device, dtype, non_blocking)\n+ self.buffer = self.buffer.to(device, dtype, non_blocking)\n \n \n class ParamBucket(Bucket):\n@@ -98,6 +98,8 @@\n \n self._fill = 0\n for p in self._params:\n+ if p.dtype != self.buffer.dtype:\n+ p.data = p.data.to(self.buffer.dtype)\n self._add_param_as_view(p, keep_existing_value=False)\n", "issue": "The Argument broadcast_fp16 is not working on OSS\n## \ud83d\udc1b Bug\r\n\r\nThe Argument `broadcast_fp16` is not working on `OSS`.\r\n\r\n## Command\r\n\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Add one print line in fairscale's source code [here](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/optim/oss.py#L603)\r\n```\r\n for device in self.buckets.keys():\r\n for dst_rank, bucket in self.buckets[device].items():\r\n print(bucket.buffer.dtype, bucket.buffer.shape)\r\n work_handles.append(\r\n```\r\n2. Run a test script as below:\r\n```\r\nimport sys\r\nimport pdb\r\nimport torch\r\nimport torch.distributed as dist\r\nimport torch.multiprocessing as mp\r\nfrom torch.nn.parallel import DistributedDataParallel as DDP\r\n\r\nfrom fairscale.optim.oss import OSS\r\nfrom fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\r\n\r\nCUDA_DEVICE = [0, 1]\r\nHIDDEN_SIZE = 8\r\n\r\nclass ToyModel(torch.nn.Module):\r\n def __init__(self):\r\n super(ToyModel, self).__init__()\r\n self.net1 = torch.nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)\r\n self.relu = torch.nn.ReLU()\r\n self.net2 = torch.nn.Linear(HIDDEN_SIZE, 5)\r\n\r\n def forward(self, x):\r\n return self.net2(self.relu(self.net1(x)))\r\n\r\n\r\ndef main(rank, world_size):\r\n\r\n dist.init_process_group(\r\n backend='nccl',\r\n init_method=\"tcp://localhost:29501\",\r\n rank=rank,\r\n world_size=world_size)\r\n device = CUDA_DEVICE[rank]\r\n torch.manual_seed(rank + 1007)\r\n torch.cuda.set_device(device)\r\n torch.cuda.manual_seed(rank + 1007)\r\n print('Rank: %d/%d Device: %s' % (rank, world_size, device))\r\n\r\n\r\n model = ToyModel().to(device)\r\n loss_fn = torch.nn.MSELoss()\r\n # ddp_model = DDP(model, device_ids=[device])\r\n # optimizer = torch.optim.SGD(ddp_model.parameters(), lr=0.1)\r\n optimizer = OSS(params=model.parameters(), optim=torch.optim.SGD, lr=0.1, broadcast_fp16=True)\r\n ddp_model = ShardedDDP(model, optimizer)\r\n\r\n inputs = torch.randn(2, HIDDEN_SIZE).to(device)\r\n print('inputs', rank, inputs)\r\n\r\n optimizer.zero_grad()\r\n outputs = ddp_model(inputs)\r\n labels = torch.randn(2, 5).to(device)\r\n loss_fn(outputs, labels).backward()\r\n optimizer.step()\r\n \r\n\r\n dist.destroy_process_group()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n WORLD_SIZE = len(CUDA_DEVICE)\r\n mp.spawn(\r\n main,\r\n args=(WORLD_SIZE,),\r\n nprocs=WORLD_SIZE,\r\n join=True,\r\n )\r\n\r\n```\r\n\r\nAnd the output is:\r\n```\r\n...\r\ntorch.float32 torch.Size([64])\r\ntorch.float32 torch.Size([53])\r\ntorch.float32 torch.Size([64])\r\ntorch.float32 torch.Size([53])\r\n```\r\n\r\nNote that we have already set the argument `broadcast_fp16` to be true, but fairscale still boardcasts float32 tensor.\r\n\r\n## Reason\r\n\r\nThe Tensor.to [(code here)](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) method is not in-place operation.\r\n\r\n## Posible Solution\r\nChange [fairscale/nn/misc/param_bucket.py:35](https://github.com/facebookresearch/fairscale/blob/31e3645371e1462c0fb119a7a54642e327d0d4bf/fairscale/nn/misc/param_bucket.py#L35) to \r\n```\r\nself.buffer = self.buffer.to(device, dtype, non_blocking)\r\n```\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom typing import Any, Callable, List, Optional, Union\n\nimport torch\n\n\nclass Bucket:\n \"\"\"\n Helper class to simplify the handling of buckets, which unify the underlying storage of multiple tensors\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n self._params: List[torch.Tensor] = []\n self._param_ids: List[int] = []\n self._fill = 0\n\n # The actual flat tensor\n self.buffer: torch.Tensor = torch.zeros(size, dtype=dtype, device=device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n assert self.buffer is not None, \"Cannot move a collapsed bucket, please rebuild it\"\n self.buffer.to(device, dtype, non_blocking)\n\n\nclass ParamBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of parameter buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device) -> None:\n super().__init__(size, dtype, device)\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"ParamBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_params()\n\n @torch.no_grad()\n def add_param(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same param cannot be checked in twice\"\n\n self._add_param_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def _add_param_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer is not None\n assert (\n param.dtype == self.buffer.dtype\n ), f\"Different types for the bucket and the param, cannot proceed: {param.dtype} - {self.buffer.dtype}\"\n assert (\n param.device == self.buffer.device\n ), f\"Different devices for the bucket and the param, cannot proceed: {param.device} - {self.buffer.device}\"\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current param value\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.data.flatten())\n param.data = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n\n @torch.no_grad()\n def _reattach_params(self) -> None:\n \"\"\"\n Given the parameters which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n self._add_param_as_view(p, keep_existing_value=False)\n\n\nclass GradBucket(Bucket):\n \"\"\"\n Helper class to simplify the handling of gradient buckets\n \"\"\"\n\n def __init__(self, size: int, dtype: torch.dtype, device: torch.device, destination: int) -> None:\n super().__init__(size, dtype, device)\n\n self._max_size = size\n self._is_collapsed = False\n\n self.params_checked_in = 0\n self.destination = destination\n self.sent = True\n self.callback: Optional[Callable[[Any], None]] = None\n\n def reset_checked_in(self) -> None:\n \"\"\" Reset the counter of the parameter grads which have been checked in\n \"\"\"\n self.params_checked_in = 0\n self.sent = False\n\n @property\n def all_checked_in(self) -> bool:\n \"\"\" Have all the expected gradient check-in happened ?\"\"\"\n return len(self._params) == self.params_checked_in\n\n def can_add_grad_view(self, param: torch.Tensor) -> bool:\n \"\"\" Is there enough room in the bucket to add this parameter gradient, and is this param not already checked in ?\n \"\"\"\n return self._fill + param.numel() < self._max_size and id(param) not in self._param_ids\n\n def to( # type: ignore\n self,\n device: Optional[Union[int, torch.device]],\n dtype: Optional[torch.dtype] = None,\n non_blocking: bool = False,\n keep_param_alignment: bool = True,\n ) -> \"GradBucket\":\n \"\"\"\n Move the underlying buffer\n \"\"\"\n if self._is_collapsed:\n self.rebuild()\n\n super().to(device, dtype, non_blocking)\n\n if keep_param_alignment:\n self._reattach_grads()\n\n def zero(self) -> None:\n \"\"\"\n Set all the grads to zero\n \"\"\"\n self.buffer.fill_(0.0)\n\n @torch.no_grad()\n def add_grad(self, param: torch.Tensor) -> None:\n \"\"\"\n Add a new parameter gradient to the bucket. Param.grad becomes a view of this bucket buffer\n \"\"\"\n\n assert id(param) not in self._param_ids, \"The same gradients cannot be checked in twice\"\n\n if param.grad is None:\n param.grad = torch.zeros_like(param)\n\n self._add_grad_as_view(param)\n self._params.append(param)\n self._param_ids.append(id(param))\n\n @torch.no_grad()\n def collapse(self) -> None:\n \"\"\"\n Release the buffer from memory. The bucket will need to be rebuilt before use\n \"\"\"\n if not self._is_collapsed:\n for p in self._params:\n assert p.grad is not None\n p.grad.detach_()\n p.grad = None\n\n self.buffer = torch.zeros(0, dtype=self.buffer.dtype, device=self.buffer.device)\n self._fill = 0\n self.params_checked_in = 0\n self._is_collapsed = True\n\n @torch.no_grad()\n def rebuild(self) -> None:\n \"\"\"\n Given the parameter gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n if self._is_collapsed:\n self.buffer = torch.zeros(self._max_size, dtype=self._params[0].dtype, device=self._params[0].device)\n\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._is_collapsed = False\n\n @torch.no_grad()\n def shrink(self) -> None:\n \"\"\"\n Shrink the buffer to the size of the parameter gradients currently checked in, release the extra memory\n \"\"\"\n assert self.buffer.numel() > 0, \"Cannot shrink a collapsed bucket, please rebuild\"\n\n self.buffer = self.buffer.resize_(self._fill).clone()\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p)\n\n self._max_size = self._fill\n\n @torch.no_grad()\n def _reattach_grads(self) -> None:\n \"\"\"\n Given the parameters gradients which have been registered previously, rebuild the whole bucket\n \"\"\"\n assert len(self._params) > 0\n\n self._fill = 0\n for p in self._params:\n self._add_grad_as_view(p, keep_existing_value=False)\n\n @torch.no_grad()\n def _add_grad_as_view(self, param: torch.Tensor, keep_existing_value: bool = True) -> None:\n assert self.buffer.numel() > 0, \"Cannot add a gradient to a collapsed bucket, please rebuild\"\n assert param.dtype == self.buffer.dtype\n assert param.device == self.buffer.device\n\n fill_next = self._fill + param.numel()\n assert fill_next <= self.buffer.numel()\n\n # Copy the current grad value, if any\n if param.grad is not None:\n # keep param.grad in place\n if keep_existing_value:\n self.buffer[self._fill : fill_next].copy_(param.grad.data.flatten())\n param.grad.data = self.buffer[self._fill : fill_next].view_as(param.data)\n else:\n param.grad = self.buffer[self._fill : fill_next].view_as(param.data)\n self._fill = fill_next\n", "path": "fairscale/nn/misc/param_bucket.py"}]}
| 4,050 | 190 |
gh_patches_debug_21047
|
rasdani/github-patches
|
git_diff
|
dask__distributed-3387
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
When --dashboard-prefix is used root location is always redirected to /status without adding the prefix
The problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status.
</issue>
<code>
[start of distributed/dashboard/core.py]
1 from distutils.version import LooseVersion
2 import os
3 import warnings
4
5 import bokeh
6 from bokeh.server.server import Server
7 from tornado import web
8
9
10 if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"):
11 warnings.warn(
12 "\nDask needs bokeh >= 0.13.0 for the dashboard."
13 "\nContinuing without the dashboard."
14 )
15 raise ImportError("Dask needs bokeh >= 0.13.0")
16
17
18 class BokehServer(object):
19 server_kwargs = {}
20
21 def listen(self, addr):
22 if self.server:
23 return
24 if isinstance(addr, tuple):
25 ip, port = addr
26 else:
27 port = addr
28 ip = None
29 for i in range(5):
30 try:
31 server_kwargs = dict(
32 port=port,
33 address=ip,
34 check_unused_sessions_milliseconds=500,
35 allow_websocket_origin=["*"],
36 use_index=False,
37 extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})],
38 )
39 server_kwargs.update(self.server_kwargs)
40 self.server = Server(self.apps, **server_kwargs)
41 self.server.start()
42
43 handlers = [
44 (
45 self.prefix + r"/statics/(.*)",
46 web.StaticFileHandler,
47 {"path": os.path.join(os.path.dirname(__file__), "static")},
48 )
49 ]
50
51 self.server._tornado.add_handlers(r".*", handlers)
52
53 return
54 except (SystemExit, EnvironmentError) as exc:
55 if port != 0:
56 if "already in use" in str(
57 exc
58 ) or "Only one usage of" in str( # Unix/Mac
59 exc
60 ): # Windows
61 msg = (
62 "Port %d is already in use. "
63 "\nPerhaps you already have a cluster running?"
64 "\nHosting the diagnostics dashboard on a random port instead."
65 % port
66 )
67 else:
68 msg = (
69 "Failed to start diagnostics server on port %d. " % port
70 + str(exc)
71 )
72 warnings.warn("\n" + msg)
73 port = 0
74 if i == 4:
75 raise
76
77 @property
78 def port(self):
79 return (
80 self.server.port
81 or list(self.server._http._sockets.values())[0].getsockname()[1]
82 )
83
84 def stop(self):
85 for context in self.server._tornado._applications.values():
86 context.run_unload_hook()
87
88 self.server._tornado._stats_job.stop()
89 self.server._tornado._cleanup_job.stop()
90 if self.server._tornado._ping_job is not None:
91 self.server._tornado._ping_job.stop()
92
93 # https://github.com/bokeh/bokeh/issues/5494
94 if LooseVersion(bokeh.__version__) >= "0.12.4":
95 self.server.stop()
96
[end of distributed/dashboard/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py
--- a/distributed/dashboard/core.py
+++ b/distributed/dashboard/core.py
@@ -5,6 +5,7 @@
import bokeh
from bokeh.server.server import Server
from tornado import web
+from urllib.parse import urljoin
if LooseVersion(bokeh.__version__) < LooseVersion("0.13.0"):
@@ -34,7 +35,13 @@
check_unused_sessions_milliseconds=500,
allow_websocket_origin=["*"],
use_index=False,
- extra_patterns=[(r"/", web.RedirectHandler, {"url": "/status"})],
+ extra_patterns=[
+ (
+ r"/",
+ web.RedirectHandler,
+ {"url": urljoin(self.prefix.rstrip("/") + "/", r"status")},
+ )
+ ],
)
server_kwargs.update(self.server_kwargs)
self.server = Server(self.apps, **server_kwargs)
|
{"golden_diff": "diff --git a/distributed/dashboard/core.py b/distributed/dashboard/core.py\n--- a/distributed/dashboard/core.py\n+++ b/distributed/dashboard/core.py\n@@ -5,6 +5,7 @@\n import bokeh\n from bokeh.server.server import Server\n from tornado import web\n+from urllib.parse import urljoin\n \n \n if LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n@@ -34,7 +35,13 @@\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n- extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n+ extra_patterns=[\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin(self.prefix.rstrip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n", "issue": "When --dashboard-prefix is used root location is always redirected to /status without adding the prefix\nThe problem arises when using --dashboard-prefix, using http://DASHBOARD[:PORT]/PREFIX/ redirects to http://DASHBOARD[:PORT]/status instead of http://DASHBOARD[:PORT]/PREFIX/status.\n", "before_files": [{"content": "from distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport bokeh\nfrom bokeh.server.server import Server\nfrom tornado import web\n\n\nif LooseVersion(bokeh.__version__) < LooseVersion(\"0.13.0\"):\n warnings.warn(\n \"\\nDask needs bokeh >= 0.13.0 for the dashboard.\"\n \"\\nContinuing without the dashboard.\"\n )\n raise ImportError(\"Dask needs bokeh >= 0.13.0\")\n\n\nclass BokehServer(object):\n server_kwargs = {}\n\n def listen(self, addr):\n if self.server:\n return\n if isinstance(addr, tuple):\n ip, port = addr\n else:\n port = addr\n ip = None\n for i in range(5):\n try:\n server_kwargs = dict(\n port=port,\n address=ip,\n check_unused_sessions_milliseconds=500,\n allow_websocket_origin=[\"*\"],\n use_index=False,\n extra_patterns=[(r\"/\", web.RedirectHandler, {\"url\": \"/status\"})],\n )\n server_kwargs.update(self.server_kwargs)\n self.server = Server(self.apps, **server_kwargs)\n self.server.start()\n\n handlers = [\n (\n self.prefix + r\"/statics/(.*)\",\n web.StaticFileHandler,\n {\"path\": os.path.join(os.path.dirname(__file__), \"static\")},\n )\n ]\n\n self.server._tornado.add_handlers(r\".*\", handlers)\n\n return\n except (SystemExit, EnvironmentError) as exc:\n if port != 0:\n if \"already in use\" in str(\n exc\n ) or \"Only one usage of\" in str( # Unix/Mac\n exc\n ): # Windows\n msg = (\n \"Port %d is already in use. \"\n \"\\nPerhaps you already have a cluster running?\"\n \"\\nHosting the diagnostics dashboard on a random port instead.\"\n % port\n )\n else:\n msg = (\n \"Failed to start diagnostics server on port %d. \" % port\n + str(exc)\n )\n warnings.warn(\"\\n\" + msg)\n port = 0\n if i == 4:\n raise\n\n @property\n def port(self):\n return (\n self.server.port\n or list(self.server._http._sockets.values())[0].getsockname()[1]\n )\n\n def stop(self):\n for context in self.server._tornado._applications.values():\n context.run_unload_hook()\n\n self.server._tornado._stats_job.stop()\n self.server._tornado._cleanup_job.stop()\n if self.server._tornado._ping_job is not None:\n self.server._tornado._ping_job.stop()\n\n # https://github.com/bokeh/bokeh/issues/5494\n if LooseVersion(bokeh.__version__) >= \"0.12.4\":\n self.server.stop()\n", "path": "distributed/dashboard/core.py"}]}
| 1,414 | 209 |
gh_patches_debug_40460
|
rasdani/github-patches
|
git_diff
|
vacanza__python-holidays-371
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong workday info for country HR
Today (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore.
binary_sensor:
- platform: workday
country: HR

</issue>
<code>
[start of holidays/countries/croatia.py]
1 # -*- coding: utf-8 -*-
2
3 # python-holidays
4 # ---------------
5 # A fast, efficient Python library for generating country, province and state
6 # specific sets of holidays on the fly. It aims to make determining whether a
7 # specific date is a holiday as fast and flexible as possible.
8 #
9 # Author: ryanss <[email protected]> (c) 2014-2017
10 # dr-prodigy <[email protected]> (c) 2017-2020
11 # Website: https://github.com/dr-prodigy/python-holidays
12 # License: MIT (see LICENSE file)
13
14 from datetime import date
15
16 from dateutil.easter import easter
17 from dateutil.relativedelta import relativedelta as rd
18
19 from holidays.constants import JAN, MAY, JUN, AUG, OCT, \
20 NOV, DEC
21 from holidays.holiday_base import HolidayBase
22
23
24 class Croatia(HolidayBase):
25
26 # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia
27
28 def __init__(self, **kwargs):
29 self.country = "HR"
30 HolidayBase.__init__(self, **kwargs)
31
32 def _populate(self, year):
33 # New years
34 self[date(year, JAN, 1)] = "Nova Godina"
35 # Epiphany
36 self[date(year, JAN, 6)] = "Sveta tri kralja"
37 easter_date = easter(year)
38
39 # Easter
40 self[easter_date] = "Uskrs"
41 # Easter Monday
42 self[easter_date + rd(days=1)] = "Uskršnji ponedjeljak"
43
44 # Corpus Christi
45 self[easter_date + rd(days=60)] = "Tijelovo"
46
47 # International Workers' Day
48 self[date(year, MAY, 1)] = "Međunarodni praznik rada"
49
50 if year >= 2020:
51 # Statehood day
52 self[date(year, MAY, 30)] = "Dan državnosti"
53
54 # Anti-fascist struggle day
55 self[date(year, JUN, 22)] = "Dan antifašističke borbe"
56
57 if year < 2020:
58 # Statehood day
59 self[date(year, JUN, 25)] = "Dan državnosti"
60
61 # Victory and Homeland Thanksgiving Day
62 self[date(year, AUG, 5)] = "Dan pobjede i domovinske zahvalnosti"
63
64 # Assumption of Mary
65 self[date(year, AUG, 15)] = "Velika Gospa"
66
67 if year < 2020:
68 # Independence Day
69 self[date(year, OCT, 8)] = "Dan neovisnosti"
70
71 # All Saints' Day
72 self[date(year, NOV, 1)] = "Dan svih svetih"
73
74 if year >= 2020:
75 # Memorial day
76 self[date(year, NOV, 18)] =\
77 "Dan sjećanja na žrtve Domovinskog rata"
78
79 # Christmas day
80 self[date(year, DEC, 25)] = "Božić"
81
82 # St. Stephen's day
83 self[date(year, DEC, 26)] = "Sveti Stjepan"
84
85
86 class HR(Croatia):
87 pass
88
89
90 class HRV(Croatia):
91 pass
92
[end of holidays/countries/croatia.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py
--- a/holidays/countries/croatia.py
+++ b/holidays/countries/croatia.py
@@ -11,18 +11,18 @@
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
-from datetime import date
+from datetime import date, timedelta
from dateutil.easter import easter
-from dateutil.relativedelta import relativedelta as rd
-from holidays.constants import JAN, MAY, JUN, AUG, OCT, \
- NOV, DEC
+from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC
from holidays.holiday_base import HolidayBase
class Croatia(HolidayBase):
+ # Updated with act 022-03 / 19-01 / 219 of 14 November 2019
+ # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html
# https://en.wikipedia.org/wiki/Public_holidays_in_Croatia
def __init__(self, **kwargs):
@@ -32,6 +32,7 @@
def _populate(self, year):
# New years
self[date(year, JAN, 1)] = "Nova Godina"
+
# Epiphany
self[date(year, JAN, 6)] = "Sveta tri kralja"
easter_date = easter(year)
@@ -39,23 +40,23 @@
# Easter
self[easter_date] = "Uskrs"
# Easter Monday
- self[easter_date + rd(days=1)] = "Uskršnji ponedjeljak"
+ self[easter_date + timedelta(days=1)] = "Uskrsni ponedjeljak"
# Corpus Christi
- self[easter_date + rd(days=60)] = "Tijelovo"
+ self[easter_date + timedelta(days=60)] = "Tijelovo"
# International Workers' Day
self[date(year, MAY, 1)] = "Međunarodni praznik rada"
+ # Statehood day (new)
if year >= 2020:
- # Statehood day
self[date(year, MAY, 30)] = "Dan državnosti"
# Anti-fascist struggle day
self[date(year, JUN, 22)] = "Dan antifašističke borbe"
+ # Statehood day (old)
if year < 2020:
- # Statehood day
self[date(year, JUN, 25)] = "Dan državnosti"
# Victory and Homeland Thanksgiving Day
@@ -64,17 +65,16 @@
# Assumption of Mary
self[date(year, AUG, 15)] = "Velika Gospa"
+ # Independence Day (old)
if year < 2020:
- # Independence Day
self[date(year, OCT, 8)] = "Dan neovisnosti"
# All Saints' Day
- self[date(year, NOV, 1)] = "Dan svih svetih"
+ self[date(year, NOV, 1)] = "Svi sveti"
if year >= 2020:
# Memorial day
- self[date(year, NOV, 18)] =\
- "Dan sjećanja na žrtve Domovinskog rata"
+ self[date(year, NOV, 18)] = "Dan sjećanja"
# Christmas day
self[date(year, DEC, 25)] = "Božić"
|
{"golden_diff": "diff --git a/holidays/countries/croatia.py b/holidays/countries/croatia.py\n--- a/holidays/countries/croatia.py\n+++ b/holidays/countries/croatia.py\n@@ -11,18 +11,18 @@\n # Website: https://github.com/dr-prodigy/python-holidays\n # License: MIT (see LICENSE file)\n \n-from datetime import date\n+from datetime import date, timedelta\n \n from dateutil.easter import easter\n-from dateutil.relativedelta import relativedelta as rd\n \n-from holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n- NOV, DEC\n+from holidays.constants import JAN, MAY, JUN, AUG, OCT, NOV, DEC\n from holidays.holiday_base import HolidayBase\n \n \n class Croatia(HolidayBase):\n \n+ # Updated with act 022-03 / 19-01 / 219 of 14 November 2019\n+ # https://narodne-novine.nn.hr/clanci/sluzbeni/2019_11_110_2212.html\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n \n def __init__(self, **kwargs):\n@@ -32,6 +32,7 @@\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n+\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n@@ -39,23 +40,23 @@\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n- self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n+ self[easter_date + timedelta(days=1)] = \"Uskrsni ponedjeljak\"\n \n # Corpus Christi\n- self[easter_date + rd(days=60)] = \"Tijelovo\"\n+ self[easter_date + timedelta(days=60)] = \"Tijelovo\"\n \n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n \n+ # Statehood day (new)\n if year >= 2020:\n- # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n \n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n \n+ # Statehood day (old)\n if year < 2020:\n- # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n \n # Victory and Homeland Thanksgiving Day\n@@ -64,17 +65,16 @@\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n \n+ # Independence Day (old)\n if year < 2020:\n- # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n \n # All Saints' Day\n- self[date(year, NOV, 1)] = \"Dan svih svetih\"\n+ self[date(year, NOV, 1)] = \"Svi sveti\"\n \n if year >= 2020:\n # Memorial day\n- self[date(year, NOV, 18)] =\\\n- \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n+ self[date(year, NOV, 18)] = \"Dan sje\u0107anja\"\n \n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n", "issue": "Wrong workday info for country HR\nToday (Oct. 8, 2020) my alarmclock automation did not go off, because my workday sensor gave the wrong info (no workday). This day used to be a holiday in Croatia, but is not anymore.\r\n\r\nbinary_sensor:\r\n - platform: workday\r\n country: HR\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# python-holidays\n# ---------------\n# A fast, efficient Python library for generating country, province and state\n# specific sets of holidays on the fly. It aims to make determining whether a\n# specific date is a holiday as fast and flexible as possible.\n#\n# Author: ryanss <[email protected]> (c) 2014-2017\n# dr-prodigy <[email protected]> (c) 2017-2020\n# Website: https://github.com/dr-prodigy/python-holidays\n# License: MIT (see LICENSE file)\n\nfrom datetime import date\n\nfrom dateutil.easter import easter\nfrom dateutil.relativedelta import relativedelta as rd\n\nfrom holidays.constants import JAN, MAY, JUN, AUG, OCT, \\\n NOV, DEC\nfrom holidays.holiday_base import HolidayBase\n\n\nclass Croatia(HolidayBase):\n\n # https://en.wikipedia.org/wiki/Public_holidays_in_Croatia\n\n def __init__(self, **kwargs):\n self.country = \"HR\"\n HolidayBase.__init__(self, **kwargs)\n\n def _populate(self, year):\n # New years\n self[date(year, JAN, 1)] = \"Nova Godina\"\n # Epiphany\n self[date(year, JAN, 6)] = \"Sveta tri kralja\"\n easter_date = easter(year)\n\n # Easter\n self[easter_date] = \"Uskrs\"\n # Easter Monday\n self[easter_date + rd(days=1)] = \"Uskr\u0161nji ponedjeljak\"\n\n # Corpus Christi\n self[easter_date + rd(days=60)] = \"Tijelovo\"\n\n # International Workers' Day\n self[date(year, MAY, 1)] = \"Me\u0111unarodni praznik rada\"\n\n if year >= 2020:\n # Statehood day\n self[date(year, MAY, 30)] = \"Dan dr\u017eavnosti\"\n\n # Anti-fascist struggle day\n self[date(year, JUN, 22)] = \"Dan antifa\u0161isti\u010dke borbe\"\n\n if year < 2020:\n # Statehood day\n self[date(year, JUN, 25)] = \"Dan dr\u017eavnosti\"\n\n # Victory and Homeland Thanksgiving Day\n self[date(year, AUG, 5)] = \"Dan pobjede i domovinske zahvalnosti\"\n\n # Assumption of Mary\n self[date(year, AUG, 15)] = \"Velika Gospa\"\n\n if year < 2020:\n # Independence Day\n self[date(year, OCT, 8)] = \"Dan neovisnosti\"\n\n # All Saints' Day\n self[date(year, NOV, 1)] = \"Dan svih svetih\"\n\n if year >= 2020:\n # Memorial day\n self[date(year, NOV, 18)] =\\\n \"Dan sje\u0107anja na \u017ertve Domovinskog rata\"\n\n # Christmas day\n self[date(year, DEC, 25)] = \"Bo\u017ei\u0107\"\n\n # St. Stephen's day\n self[date(year, DEC, 26)] = \"Sveti Stjepan\"\n\n\nclass HR(Croatia):\n pass\n\n\nclass HRV(Croatia):\n pass\n", "path": "holidays/countries/croatia.py"}]}
| 1,650 | 883 |
gh_patches_debug_7385
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-138
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix linting issue preventing us from updating to flake8 4.7.1
https://github.com/mozilla/bugbug/pull/135 is failing because:
> ./bugbug/model.py:102:21: E117 over-indented
We should fix it, and then we can merge https://github.com/mozilla/bugbug/pull/135.
</issue>
<code>
[start of bugbug/model.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import numpy as np
7 import shap
8 from imblearn.metrics import classification_report_imbalanced
9 from imblearn.under_sampling import RandomUnderSampler
10 from sklearn import metrics
11 from sklearn.externals import joblib
12 from sklearn.feature_extraction.text import TfidfVectorizer
13 from sklearn.model_selection import cross_validate
14 from sklearn.model_selection import train_test_split
15
16 from bugbug import bugzilla
17 from bugbug.nlp import SpacyVectorizer
18
19
20 class Model():
21 def __init__(self, lemmatization=False):
22 if lemmatization:
23 self.text_vectorizer = SpacyVectorizer
24 else:
25 self.text_vectorizer = TfidfVectorizer
26
27 self.undersampling_enabled = True
28 self.cross_validation_enabled = True
29
30 def get_feature_names(self):
31 return []
32
33 def get_important_features(self, cutoff, shap_values):
34 # Calculate the values that represent the fraction of the model output variability attributable
35 # to each feature across the whole dataset.
36 shap_sums = shap_values.sum(0)
37 abs_shap_sums = np.abs(shap_values).sum(0)
38 rel_shap_sums = abs_shap_sums / abs_shap_sums.sum()
39
40 cut_off_value = cutoff * np.amax(rel_shap_sums)
41
42 # Get indices of features that pass the cut off value
43 top_feature_indices = np.where(rel_shap_sums >= cut_off_value)[0]
44 # Get the importance values of the top features from their indices
45 top_features = np.take(rel_shap_sums, top_feature_indices)
46 # Gets the sign of the importance from shap_sums as boolean
47 is_positive = (np.take(shap_sums, top_feature_indices)) >= 0
48 # Stack the importance, indices and shap_sums in a 2D array
49 top_features = np.column_stack((top_features, top_feature_indices, is_positive))
50 # Sort the array (in decreasing order of importance values)
51 top_features = top_features[top_features[:, 0].argsort()][::-1]
52
53 return top_features
54
55 def train(self, importance_cutoff=0.15):
56 classes = self.get_labels()
57 class_names = sorted(list(set(classes.values())), reverse=True)
58
59 # Get bugs, filtering out those for which we have no labels.
60 def bugs():
61 return (bug for bug in bugzilla.get_bugs() if bug['id'] in classes)
62
63 # Calculate labels.
64 y = np.array([classes[bug['id']] for bug in bugs()])
65
66 # Extract features from the bugs.
67 X = self.extraction_pipeline.fit_transform(bugs())
68
69 print(f'X: {X.shape}, y: {y.shape}')
70
71 # Split dataset in training and test.
72 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)
73
74 if self.undersampling_enabled:
75 # Under-sample the majority classes, as the datasets are imbalanced.
76 X_train, y_train = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train)
77
78 print(f'X_train: {X_train.shape}, y_train: {y_train.shape}')
79 print(f'X_test: {X_test.shape}, y_test: {y_test.shape}')
80
81 # Use k-fold cross validation to evaluate results.
82 if self.cross_validation_enabled:
83 scorings = ['accuracy', 'precision', 'recall']
84 scores = cross_validate(self.clf, X_train, y_train, scoring=scorings, cv=5)
85 print('Cross Validation scores:')
86 for scoring in scorings:
87 score = scores[f'test_{scoring}']
88 print(f'{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})')
89
90 # Evaluate results on the test set.
91 self.clf.fit(X_train, y_train)
92
93 feature_names = self.get_feature_names()
94 if len(feature_names):
95 explainer = shap.TreeExplainer(self.clf)
96 shap_values = explainer.shap_values(X_train)
97
98 important_features = self.get_important_features(importance_cutoff, shap_values)
99
100 print(f'\nTop {len(important_features)} Features :')
101 for i, [importance, index, is_positive] in enumerate(important_features):
102 print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})')
103
104 y_pred = self.clf.predict(X_test)
105
106 print(f'No confidence threshold - {len(y_test)} classified')
107 print(metrics.confusion_matrix(y_test, y_pred, labels=class_names))
108 print(classification_report_imbalanced(y_test, y_pred, labels=class_names))
109
110 # Evaluate results on the test set for some confidence thresholds.
111 for confidence_threshold in [0.6, 0.7, 0.8, 0.9]:
112 y_pred_probas = self.clf.predict_proba(X_test)
113
114 y_test_filter = []
115 y_pred_filter = []
116 for i in range(0, len(y_test)):
117 argmax = np.argmax(y_pred_probas[i])
118 if y_pred_probas[i][argmax] < confidence_threshold:
119 continue
120
121 y_test_filter.append(y_test[i])
122 y_pred_filter.append(argmax)
123
124 y_pred_filter = self.clf._le.inverse_transform(y_pred_filter)
125
126 print(f'\nConfidence threshold > {confidence_threshold} - {len(y_test_filter)} classified')
127 print(metrics.confusion_matrix(y_test_filter, y_pred_filter, labels=class_names))
128 print(classification_report_imbalanced(y_test_filter, y_pred_filter, labels=class_names))
129
130 joblib.dump(self, self.__class__.__name__.lower())
131
132 @staticmethod
133 def load(model_file_name):
134 return joblib.load(model_file_name)
135
136 def overwrite_classes(self, bugs, classes, probabilities):
137 return classes
138
139 def classify(self, bugs, probabilities=False, importances=False, importance_cutoff=0.15):
140 assert bugs is not None
141 assert self.extraction_pipeline is not None and self.clf is not None, 'The module needs to be initialized first'
142
143 if not isinstance(bugs, list):
144 bugs = [bugs]
145
146 assert isinstance(bugs[0], dict)
147
148 X = self.extraction_pipeline.transform(bugs)
149 if probabilities:
150 classes = self.clf.predict_proba(X)
151 else:
152 classes = self.clf.predict(X)
153
154 classes = self.overwrite_classes(bugs, classes, probabilities)
155
156 if importances:
157 explainer = shap.TreeExplainer(self.clf)
158 shap_values = explainer.shap_values(X)
159
160 importances = self.get_important_features(importance_cutoff, shap_values)
161
162 return classes, importances
163
164 return classes
165
[end of bugbug/model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bugbug/model.py b/bugbug/model.py
--- a/bugbug/model.py
+++ b/bugbug/model.py
@@ -99,7 +99,7 @@
print(f'\nTop {len(important_features)} Features :')
for i, [importance, index, is_positive] in enumerate(important_features):
- print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})')
+ print(f'{i + 1}. \'{feature_names[int(index)]}\' ({"+" if (is_positive) else "-"}{importance})')
y_pred = self.clf.predict(X_test)
|
{"golden_diff": "diff --git a/bugbug/model.py b/bugbug/model.py\n--- a/bugbug/model.py\n+++ b/bugbug/model.py\n@@ -99,7 +99,7 @@\n \n print(f'\\nTop {len(important_features)} Features :')\n for i, [importance, index, is_positive] in enumerate(important_features):\n- print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n+ print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n \n y_pred = self.clf.predict(X_test)\n", "issue": "Fix linting issue preventing us from updating to flake8 4.7.1\nhttps://github.com/mozilla/bugbug/pull/135 is failing because:\r\n> ./bugbug/model.py:102:21: E117 over-indented\r\n\r\nWe should fix it, and then we can merge https://github.com/mozilla/bugbug/pull/135.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport numpy as np\nimport shap\nfrom imblearn.metrics import classification_report_imbalanced\nfrom imblearn.under_sampling import RandomUnderSampler\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import cross_validate\nfrom sklearn.model_selection import train_test_split\n\nfrom bugbug import bugzilla\nfrom bugbug.nlp import SpacyVectorizer\n\n\nclass Model():\n def __init__(self, lemmatization=False):\n if lemmatization:\n self.text_vectorizer = SpacyVectorizer\n else:\n self.text_vectorizer = TfidfVectorizer\n\n self.undersampling_enabled = True\n self.cross_validation_enabled = True\n\n def get_feature_names(self):\n return []\n\n def get_important_features(self, cutoff, shap_values):\n # Calculate the values that represent the fraction of the model output variability attributable\n # to each feature across the whole dataset.\n shap_sums = shap_values.sum(0)\n abs_shap_sums = np.abs(shap_values).sum(0)\n rel_shap_sums = abs_shap_sums / abs_shap_sums.sum()\n\n cut_off_value = cutoff * np.amax(rel_shap_sums)\n\n # Get indices of features that pass the cut off value\n top_feature_indices = np.where(rel_shap_sums >= cut_off_value)[0]\n # Get the importance values of the top features from their indices\n top_features = np.take(rel_shap_sums, top_feature_indices)\n # Gets the sign of the importance from shap_sums as boolean\n is_positive = (np.take(shap_sums, top_feature_indices)) >= 0\n # Stack the importance, indices and shap_sums in a 2D array\n top_features = np.column_stack((top_features, top_feature_indices, is_positive))\n # Sort the array (in decreasing order of importance values)\n top_features = top_features[top_features[:, 0].argsort()][::-1]\n\n return top_features\n\n def train(self, importance_cutoff=0.15):\n classes = self.get_labels()\n class_names = sorted(list(set(classes.values())), reverse=True)\n\n # Get bugs, filtering out those for which we have no labels.\n def bugs():\n return (bug for bug in bugzilla.get_bugs() if bug['id'] in classes)\n\n # Calculate labels.\n y = np.array([classes[bug['id']] for bug in bugs()])\n\n # Extract features from the bugs.\n X = self.extraction_pipeline.fit_transform(bugs())\n\n print(f'X: {X.shape}, y: {y.shape}')\n\n # Split dataset in training and test.\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0)\n\n if self.undersampling_enabled:\n # Under-sample the majority classes, as the datasets are imbalanced.\n X_train, y_train = RandomUnderSampler(random_state=0).fit_sample(X_train, y_train)\n\n print(f'X_train: {X_train.shape}, y_train: {y_train.shape}')\n print(f'X_test: {X_test.shape}, y_test: {y_test.shape}')\n\n # Use k-fold cross validation to evaluate results.\n if self.cross_validation_enabled:\n scorings = ['accuracy', 'precision', 'recall']\n scores = cross_validate(self.clf, X_train, y_train, scoring=scorings, cv=5)\n print('Cross Validation scores:')\n for scoring in scorings:\n score = scores[f'test_{scoring}']\n print(f'{scoring.capitalize()}: f{score.mean()} (+/- {score.std() * 2})')\n\n # Evaluate results on the test set.\n self.clf.fit(X_train, y_train)\n\n feature_names = self.get_feature_names()\n if len(feature_names):\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X_train)\n\n important_features = self.get_important_features(importance_cutoff, shap_values)\n\n print(f'\\nTop {len(important_features)} Features :')\n for i, [importance, index, is_positive] in enumerate(important_features):\n print(f'{i + 1}. \\'{feature_names[int(index)]}\\' ({\"+\" if (is_positive) else \"-\"}{importance})')\n\n y_pred = self.clf.predict(X_test)\n\n print(f'No confidence threshold - {len(y_test)} classified')\n print(metrics.confusion_matrix(y_test, y_pred, labels=class_names))\n print(classification_report_imbalanced(y_test, y_pred, labels=class_names))\n\n # Evaluate results on the test set for some confidence thresholds.\n for confidence_threshold in [0.6, 0.7, 0.8, 0.9]:\n y_pred_probas = self.clf.predict_proba(X_test)\n\n y_test_filter = []\n y_pred_filter = []\n for i in range(0, len(y_test)):\n argmax = np.argmax(y_pred_probas[i])\n if y_pred_probas[i][argmax] < confidence_threshold:\n continue\n\n y_test_filter.append(y_test[i])\n y_pred_filter.append(argmax)\n\n y_pred_filter = self.clf._le.inverse_transform(y_pred_filter)\n\n print(f'\\nConfidence threshold > {confidence_threshold} - {len(y_test_filter)} classified')\n print(metrics.confusion_matrix(y_test_filter, y_pred_filter, labels=class_names))\n print(classification_report_imbalanced(y_test_filter, y_pred_filter, labels=class_names))\n\n joblib.dump(self, self.__class__.__name__.lower())\n\n @staticmethod\n def load(model_file_name):\n return joblib.load(model_file_name)\n\n def overwrite_classes(self, bugs, classes, probabilities):\n return classes\n\n def classify(self, bugs, probabilities=False, importances=False, importance_cutoff=0.15):\n assert bugs is not None\n assert self.extraction_pipeline is not None and self.clf is not None, 'The module needs to be initialized first'\n\n if not isinstance(bugs, list):\n bugs = [bugs]\n\n assert isinstance(bugs[0], dict)\n\n X = self.extraction_pipeline.transform(bugs)\n if probabilities:\n classes = self.clf.predict_proba(X)\n else:\n classes = self.clf.predict(X)\n\n classes = self.overwrite_classes(bugs, classes, probabilities)\n\n if importances:\n explainer = shap.TreeExplainer(self.clf)\n shap_values = explainer.shap_values(X)\n\n importances = self.get_important_features(importance_cutoff, shap_values)\n\n return classes, importances\n\n return classes\n", "path": "bugbug/model.py"}]}
| 2,549 | 160 |
gh_patches_debug_39903
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-5514
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop Python 3.6 support
[It went end-of-life on December 2021](https://endoflife.date/python).
</issue>
<code>
[start of scrapy/utils/py36.py]
1 import warnings
2
3 from scrapy.exceptions import ScrapyDeprecationWarning
4 from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401
5
6
7 warnings.warn(
8 "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.",
9 category=ScrapyDeprecationWarning,
10 stacklevel=2,
11 )
12
[end of scrapy/utils/py36.py]
[start of setup.py]
1 from os.path import dirname, join
2 from pkg_resources import parse_version
3 from setuptools import setup, find_packages, __version__ as setuptools_version
4
5
6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:
7 version = f.read().decode('ascii').strip()
8
9
10 def has_environment_marker_platform_impl_support():
11 """Code extracted from 'pytest/setup.py'
12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31
13
14 The first known release to support environment marker with range operators
15 it is 18.5, see:
16 https://setuptools.readthedocs.io/en/latest/history.html#id235
17 """
18 return parse_version(setuptools_version) >= parse_version('18.5')
19
20
21 install_requires = [
22 'Twisted>=17.9.0',
23 'cryptography>=2.0',
24 'cssselect>=0.9.1',
25 'itemloaders>=1.0.1',
26 'parsel>=1.5.0',
27 'pyOpenSSL>=16.2.0',
28 'queuelib>=1.4.2',
29 'service_identity>=16.0.0',
30 'w3lib>=1.17.0',
31 'zope.interface>=4.1.3',
32 'protego>=0.1.15',
33 'itemadapter>=0.1.0',
34 'setuptools',
35 'tldextract',
36 ]
37 extras_require = {}
38 cpython_dependencies = [
39 'lxml>=3.5.0',
40 'PyDispatcher>=2.0.5',
41 ]
42 if has_environment_marker_platform_impl_support():
43 extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
44 extras_require[':platform_python_implementation == "PyPy"'] = [
45 # Earlier lxml versions are affected by
46 # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
47 # which was fixed in Cython 0.26, released on 2017-06-19, and used to
48 # generate the C headers of lxml release tarballs published since then, the
49 # first of which was:
50 'lxml>=4.0.0',
51 'PyPyDispatcher>=2.1.0',
52 ]
53 else:
54 install_requires.extend(cpython_dependencies)
55
56
57 setup(
58 name='Scrapy',
59 version=version,
60 url='https://scrapy.org',
61 project_urls={
62 'Documentation': 'https://docs.scrapy.org/',
63 'Source': 'https://github.com/scrapy/scrapy',
64 'Tracker': 'https://github.com/scrapy/scrapy/issues',
65 },
66 description='A high-level Web Crawling and Web Scraping framework',
67 long_description=open('README.rst').read(),
68 author='Scrapy developers',
69 maintainer='Pablo Hoffman',
70 maintainer_email='[email protected]',
71 license='BSD',
72 packages=find_packages(exclude=('tests', 'tests.*')),
73 include_package_data=True,
74 zip_safe=False,
75 entry_points={
76 'console_scripts': ['scrapy = scrapy.cmdline:execute']
77 },
78 classifiers=[
79 'Framework :: Scrapy',
80 'Development Status :: 5 - Production/Stable',
81 'Environment :: Console',
82 'Intended Audience :: Developers',
83 'License :: OSI Approved :: BSD License',
84 'Operating System :: OS Independent',
85 'Programming Language :: Python',
86 'Programming Language :: Python :: 3',
87 'Programming Language :: Python :: 3.6',
88 'Programming Language :: Python :: 3.7',
89 'Programming Language :: Python :: 3.8',
90 'Programming Language :: Python :: 3.9',
91 'Programming Language :: Python :: 3.10',
92 'Programming Language :: Python :: Implementation :: CPython',
93 'Programming Language :: Python :: Implementation :: PyPy',
94 'Topic :: Internet :: WWW/HTTP',
95 'Topic :: Software Development :: Libraries :: Application Frameworks',
96 'Topic :: Software Development :: Libraries :: Python Modules',
97 ],
98 python_requires='>=3.6',
99 install_requires=install_requires,
100 extras_require=extras_require,
101 )
102
[end of setup.py]
[start of scrapy/__init__.py]
1 """
2 Scrapy - a web crawling and web scraping framework written for Python
3 """
4
5 import pkgutil
6 import sys
7 import warnings
8
9 from twisted import version as _txv
10
11 # Declare top-level shortcuts
12 from scrapy.spiders import Spider
13 from scrapy.http import Request, FormRequest
14 from scrapy.selector import Selector
15 from scrapy.item import Item, Field
16
17
18 __all__ = [
19 '__version__', 'version_info', 'twisted_version', 'Spider',
20 'Request', 'FormRequest', 'Selector', 'Item', 'Field',
21 ]
22
23
24 # Scrapy and Twisted versions
25 __version__ = (pkgutil.get_data(__package__, "VERSION") or b"").decode("ascii").strip()
26 version_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))
27 twisted_version = (_txv.major, _txv.minor, _txv.micro)
28
29
30 # Check minimum required Python version
31 if sys.version_info < (3, 6):
32 print(f"Scrapy {__version__} requires Python 3.6+")
33 sys.exit(1)
34
35
36 # Ignore noisy twisted deprecation warnings
37 warnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')
38
39
40 del pkgutil
41 del sys
42 del warnings
43
[end of scrapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/__init__.py b/scrapy/__init__.py
--- a/scrapy/__init__.py
+++ b/scrapy/__init__.py
@@ -28,8 +28,8 @@
# Check minimum required Python version
-if sys.version_info < (3, 6):
- print(f"Scrapy {__version__} requires Python 3.6+")
+if sys.version_info < (3, 7):
+ print(f"Scrapy {__version__} requires Python 3.7+")
sys.exit(1)
diff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py
deleted file mode 100644
--- a/scrapy/utils/py36.py
+++ /dev/null
@@ -1,11 +0,0 @@
-import warnings
-
-from scrapy.exceptions import ScrapyDeprecationWarning
-from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401
-
-
-warnings.warn(
- "Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.",
- category=ScrapyDeprecationWarning,
- stacklevel=2,
-)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,35 +19,29 @@
install_requires = [
- 'Twisted>=17.9.0',
- 'cryptography>=2.0',
+ 'Twisted>=18.9.0',
+ 'cryptography>=2.8',
'cssselect>=0.9.1',
'itemloaders>=1.0.1',
'parsel>=1.5.0',
- 'pyOpenSSL>=16.2.0',
+ 'pyOpenSSL>=19.1.0',
'queuelib>=1.4.2',
'service_identity>=16.0.0',
'w3lib>=1.17.0',
- 'zope.interface>=4.1.3',
+ 'zope.interface>=5.1.0',
'protego>=0.1.15',
'itemadapter>=0.1.0',
'setuptools',
'tldextract',
+ 'lxml>=4.3.0',
]
extras_require = {}
cpython_dependencies = [
- 'lxml>=3.5.0',
'PyDispatcher>=2.0.5',
]
if has_environment_marker_platform_impl_support():
extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies
extras_require[':platform_python_implementation == "PyPy"'] = [
- # Earlier lxml versions are affected by
- # https://foss.heptapod.net/pypy/pypy/-/issues/2498,
- # which was fixed in Cython 0.26, released on 2017-06-19, and used to
- # generate the C headers of lxml release tarballs published since then, the
- # first of which was:
- 'lxml>=4.0.0',
'PyPyDispatcher>=2.1.0',
]
else:
@@ -84,7 +78,6 @@
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
@@ -95,7 +88,7 @@
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
- python_requires='>=3.6',
+ python_requires='>=3.7',
install_requires=install_requires,
extras_require=extras_require,
)
|
{"golden_diff": "diff --git a/scrapy/__init__.py b/scrapy/__init__.py\n--- a/scrapy/__init__.py\n+++ b/scrapy/__init__.py\n@@ -28,8 +28,8 @@\n \n \n # Check minimum required Python version\n-if sys.version_info < (3, 6):\n- print(f\"Scrapy {__version__} requires Python 3.6+\")\n+if sys.version_info < (3, 7):\n+ print(f\"Scrapy {__version__} requires Python 3.7+\")\n sys.exit(1)\n \n \ndiff --git a/scrapy/utils/py36.py b/scrapy/utils/py36.py\ndeleted file mode 100644\n--- a/scrapy/utils/py36.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-import warnings\n-\n-from scrapy.exceptions import ScrapyDeprecationWarning\n-from scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n-\n-\n-warnings.warn(\n- \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n- category=ScrapyDeprecationWarning,\n- stacklevel=2,\n-)\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,35 +19,29 @@\n \n \n install_requires = [\n- 'Twisted>=17.9.0',\n- 'cryptography>=2.0',\n+ 'Twisted>=18.9.0',\n+ 'cryptography>=2.8',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n- 'pyOpenSSL>=16.2.0',\n+ 'pyOpenSSL>=19.1.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n- 'zope.interface>=4.1.3',\n+ 'zope.interface>=5.1.0',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n+ 'lxml>=4.3.0',\n ]\n extras_require = {}\n cpython_dependencies = [\n- 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n ]\n if has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n- # Earlier lxml versions are affected by\n- # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n- # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n- # generate the C headers of lxml release tarballs published since then, the\n- # first of which was:\n- 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\n else:\n@@ -84,7 +78,6 @@\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n@@ -95,7 +88,7 @@\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n- python_requires='>=3.6',\n+ python_requires='>=3.7',\n install_requires=install_requires,\n extras_require=extras_require,\n )\n", "issue": "Drop Python 3.6 support\n[It went end-of-life on December 2021](https://endoflife.date/python).\n", "before_files": [{"content": "import warnings\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.utils.asyncgen import collect_asyncgen # noqa: F401\n\n\nwarnings.warn(\n \"Module `scrapy.utils.py36` is deprecated, please import from `scrapy.utils.asyncgen` instead.\",\n category=ScrapyDeprecationWarning,\n stacklevel=2,\n)\n", "path": "scrapy/utils/py36.py"}, {"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n 'setuptools',\n 'tldextract',\n]\nextras_require = {}\ncpython_dependencies = [\n 'lxml>=3.5.0',\n 'PyDispatcher>=2.0.5',\n]\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.extend(cpython_dependencies)\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}, {"content": "\"\"\"\nScrapy - a web crawling and web scraping framework written for Python\n\"\"\"\n\nimport pkgutil\nimport sys\nimport warnings\n\nfrom twisted import version as _txv\n\n# Declare top-level shortcuts\nfrom scrapy.spiders import Spider\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\nfrom scrapy.item import Item, Field\n\n\n__all__ = [\n '__version__', 'version_info', 'twisted_version', 'Spider',\n 'Request', 'FormRequest', 'Selector', 'Item', 'Field',\n]\n\n\n# Scrapy and Twisted versions\n__version__ = (pkgutil.get_data(__package__, \"VERSION\") or b\"\").decode(\"ascii\").strip()\nversion_info = tuple(int(v) if v.isdigit() else v for v in __version__.split('.'))\ntwisted_version = (_txv.major, _txv.minor, _txv.micro)\n\n\n# Check minimum required Python version\nif sys.version_info < (3, 6):\n print(f\"Scrapy {__version__} requires Python 3.6+\")\n sys.exit(1)\n\n\n# Ignore noisy twisted deprecation warnings\nwarnings.filterwarnings('ignore', category=DeprecationWarning, module='twisted')\n\n\ndel pkgutil\ndel sys\ndel warnings\n", "path": "scrapy/__init__.py"}]}
| 2,168 | 887 |
gh_patches_debug_351
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-3083
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeError: release unlocked lock
* Version: 6.8.0
* Python: 3.11.1
* OS: linux
* `pip freeze` output
```
pip freeze 4167ms
aiofiles==23.1.0
aiohttp==3.8.5
aiosignal==1.3.1
alembic==1.11.3
async-timeout==4.0.2
asyncpg==0.28.0
attrs==23.1.0
base58==2.1.1
bitarray==2.7.5
certifi==2023.5.7
charset-normalizer==3.1.0
cytoolz==0.12.1
ecs-logging==2.1.0
eth-abi==4.1.0
eth-account==0.9.0
eth-hash==0.5.2
eth-keyfile==0.6.1
eth-keys==0.4.0
eth-rlp==0.3.0
eth-typing==3.4.0
eth-utils==2.2.0
frozenlist==1.3.3
grpcio==1.57.0
grpcio-tools==1.57.0
hexbytes==0.3.1
html5tagger==1.3.0
httptools==0.5.0
idna==3.4
jsonschema==4.17.3
lru-dict==1.2.0
Mako==1.2.4
MarkupSafe==2.1.2
multidict==6.0.4
numpy==1.25.2
parsimonious==0.9.0
prometheus-client==0.17.1
protobuf==4.23.0
pycryptodome==3.18.0
pydantic==1.10.12
pyrsistent==0.19.3
pyunormalize==15.0.0
PyYAML==6.0
redis==5.0.0
regex==2023.6.3
requests==2.31.0
rlp==3.0.0
sanic==23.6.0
sanic-ext==23.6.0
sanic-routing==23.6.0
SQLAlchemy==2.0.20
toolz==0.12.0
tracerite==1.1.0
typing_extensions==4.5.0
ujson==5.7.0
urllib3==2.0.2
uvloop==0.17.0
web3==6.8.0
websockets==11.0.3
yarl==1.9.2
```
### What was wrong?
* The code which produced the error
```py
provider = AsyncHTTPProvider(request.app.config.get("ETHEREUM_MAINNET_URL"))
w3 = AsyncWeb3(provider)
contract = w3.eth.contract(
address=MAINNET_TOKEN_ADDRESS_DETECTION,
abi=single_call_balance_checker_abi,
)
address_keys = list(TOKEN_METADATA_MAP.keys())
(native_balance, balance_values) = await asyncio.gather(
w3.eth.get_balance(to_checksum_address(address)),
contract.functions.balances(
[to_checksum_address(address)],
address_keys,
).call(),
)
```
* The full output of the error
```py
File "handle_request", line 97, in handle_request
File "/app/data_service/ethereum/views/balances.py", line 54, in get_balances
(native_balance, balance_values) = await asyncio.gather(
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/eth/async_eth.py", line 435, in get_balance
return await self._get_balance(account, block_identifier)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/module.py", line 114, in caller
result = await async_w3.manager.coro_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 264, in coro_request
response = await self._coro_make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/manager.py", line 199, in _coro_make_request
return await request_func(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/gas_price_strategy.py", line 126, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/names.py", line 139, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/attrdict.py", line 69, in middleware
response = await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/formatting.py", line 165, in middleware
response = await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/buffered_gas_estimate.py", line 58, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/middleware/exception_retry_request.py", line 151, in middleware
return await make_request(method, params)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/providers/async_rpc.py", line 91, in make_request
raw_response = await async_make_post_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 239, in async_make_post_request
response = await async_get_response_from_post_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 231, in async_get_response_from_post_request
session = await async_cache_and_return_session(endpoint_uri)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py", line 141, in async_cache_and_return_session
async with async_lock(_async_session_pool, _async_session_cache_lock):
File "/usr/local/lib/python3.11/contextlib.py", line 204, in __aenter__
return await anext(self.gen)
^^^^^^^^^^^^^^^^^^^^^
File "/app/.venv/lib/python3.11/site-packages/web3/_utils/async_caching.py", line 21, in async_lock
lock.release()
```
* What type of node you were connecting to.
AsyncHTTPProvider (Infura)
### How can it be fixed?
I tried to reproduce this error locally, but it only occurs in production.
</issue>
<code>
[start of web3/_utils/async_caching.py]
1 import asyncio
2 from concurrent.futures import (
3 ThreadPoolExecutor,
4 )
5 import contextlib
6 import threading
7 from typing import (
8 AsyncGenerator,
9 )
10
11
12 @contextlib.asynccontextmanager
13 async def async_lock(
14 thread_pool: ThreadPoolExecutor, lock: threading.Lock
15 ) -> AsyncGenerator[None, None]:
16 loop = asyncio.get_event_loop()
17 try:
18 await loop.run_in_executor(thread_pool, lock.acquire)
19 yield
20 finally:
21 lock.release()
22
[end of web3/_utils/async_caching.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/web3/_utils/async_caching.py b/web3/_utils/async_caching.py
--- a/web3/_utils/async_caching.py
+++ b/web3/_utils/async_caching.py
@@ -18,4 +18,5 @@
await loop.run_in_executor(thread_pool, lock.acquire)
yield
finally:
- lock.release()
+ if lock.locked():
+ lock.release()
|
{"golden_diff": "diff --git a/web3/_utils/async_caching.py b/web3/_utils/async_caching.py\n--- a/web3/_utils/async_caching.py\n+++ b/web3/_utils/async_caching.py\n@@ -18,4 +18,5 @@\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n- lock.release()\n+ if lock.locked():\n+ lock.release()\n", "issue": "RuntimeError: release unlocked lock\n* Version: 6.8.0\r\n* Python: 3.11.1\r\n* OS: linux\r\n* `pip freeze` output\r\n\r\n```\r\n\ue0b1 pip freeze 4167ms\r\naiofiles==23.1.0\r\naiohttp==3.8.5\r\naiosignal==1.3.1\r\nalembic==1.11.3\r\nasync-timeout==4.0.2\r\nasyncpg==0.28.0\r\nattrs==23.1.0\r\nbase58==2.1.1\r\nbitarray==2.7.5\r\ncertifi==2023.5.7\r\ncharset-normalizer==3.1.0\r\ncytoolz==0.12.1\r\necs-logging==2.1.0\r\neth-abi==4.1.0\r\neth-account==0.9.0\r\neth-hash==0.5.2\r\neth-keyfile==0.6.1\r\neth-keys==0.4.0\r\neth-rlp==0.3.0\r\neth-typing==3.4.0\r\neth-utils==2.2.0\r\nfrozenlist==1.3.3\r\ngrpcio==1.57.0\r\ngrpcio-tools==1.57.0\r\nhexbytes==0.3.1\r\nhtml5tagger==1.3.0\r\nhttptools==0.5.0\r\nidna==3.4\r\njsonschema==4.17.3\r\nlru-dict==1.2.0\r\nMako==1.2.4\r\nMarkupSafe==2.1.2\r\nmultidict==6.0.4\r\nnumpy==1.25.2\r\nparsimonious==0.9.0\r\nprometheus-client==0.17.1\r\nprotobuf==4.23.0\r\npycryptodome==3.18.0\r\npydantic==1.10.12\r\npyrsistent==0.19.3\r\npyunormalize==15.0.0\r\nPyYAML==6.0\r\nredis==5.0.0\r\nregex==2023.6.3\r\nrequests==2.31.0\r\nrlp==3.0.0\r\nsanic==23.6.0\r\nsanic-ext==23.6.0\r\nsanic-routing==23.6.0\r\nSQLAlchemy==2.0.20\r\ntoolz==0.12.0\r\ntracerite==1.1.0\r\ntyping_extensions==4.5.0\r\nujson==5.7.0\r\nurllib3==2.0.2\r\nuvloop==0.17.0\r\nweb3==6.8.0\r\nwebsockets==11.0.3\r\nyarl==1.9.2\r\n```\r\n\r\n\r\n### What was wrong?\r\n\r\n\r\n* The code which produced the error\r\n```py\r\n provider = AsyncHTTPProvider(request.app.config.get(\"ETHEREUM_MAINNET_URL\"))\r\n w3 = AsyncWeb3(provider)\r\n\r\n contract = w3.eth.contract(\r\n address=MAINNET_TOKEN_ADDRESS_DETECTION,\r\n abi=single_call_balance_checker_abi,\r\n )\r\n address_keys = list(TOKEN_METADATA_MAP.keys())\r\n\r\n (native_balance, balance_values) = await asyncio.gather(\r\n w3.eth.get_balance(to_checksum_address(address)),\r\n contract.functions.balances(\r\n [to_checksum_address(address)],\r\n address_keys,\r\n ).call(),\r\n )\r\n```\r\n* The full output of the error\r\n```py\r\nFile \"handle_request\", line 97, in handle_request\r\n File \"/app/data_service/ethereum/views/balances.py\", line 54, in get_balances\r\n (native_balance, balance_values) = await asyncio.gather(\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/eth/async_eth.py\", line 435, in get_balance\r\n return await self._get_balance(account, block_identifier)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/module.py\", line 114, in caller\r\n result = await async_w3.manager.coro_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/manager.py\", line 264, in coro_request\r\n response = await self._coro_make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/manager.py\", line 199, in _coro_make_request\r\n return await request_func(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/gas_price_strategy.py\", line 126, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/names.py\", line 139, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/attrdict.py\", line 69, in middleware\r\n response = await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/formatting.py\", line 165, in middleware\r\n response = await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/buffered_gas_estimate.py\", line 58, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/middleware/exception_retry_request.py\", line 151, in middleware\r\n return await make_request(method, params)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/providers/async_rpc.py\", line 91, in make_request\r\n raw_response = await async_make_post_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 239, in async_make_post_request\r\n response = await async_get_response_from_post_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 231, in async_get_response_from_post_request\r\n session = await async_cache_and_return_session(endpoint_uri)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/request.py\", line 141, in async_cache_and_return_session\r\n async with async_lock(_async_session_pool, _async_session_cache_lock):\r\n File \"/usr/local/lib/python3.11/contextlib.py\", line 204, in __aenter__\r\n return await anext(self.gen)\r\n ^^^^^^^^^^^^^^^^^^^^^\r\n File \"/app/.venv/lib/python3.11/site-packages/web3/_utils/async_caching.py\", line 21, in async_lock\r\n lock.release()\r\n```\r\n* What type of node you were connecting to.\r\nAsyncHTTPProvider (Infura)\r\n\r\n\r\n### How can it be fixed?\r\n\r\n\r\nI tried to reproduce this error locally, but it only occurs in production. \n", "before_files": [{"content": "import asyncio\nfrom concurrent.futures import (\n ThreadPoolExecutor,\n)\nimport contextlib\nimport threading\nfrom typing import (\n AsyncGenerator,\n)\n\n\[email protected]\nasync def async_lock(\n thread_pool: ThreadPoolExecutor, lock: threading.Lock\n) -> AsyncGenerator[None, None]:\n loop = asyncio.get_event_loop()\n try:\n await loop.run_in_executor(thread_pool, lock.acquire)\n yield\n finally:\n lock.release()\n", "path": "web3/_utils/async_caching.py"}]}
| 2,435 | 95 |
gh_patches_debug_10492
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-2532
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Kubernetes Deployment Info shown on heroku
Do not try to show Kubernetes deployment info when not hosted on Kubernetes
</issue>
<code>
[start of app/views/admin/super_admin/super_admin.py]
1 import os
2
3 from flask_admin import expose
4
5 from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView
6 from ....helpers.data_getter import DataGetter
7 from app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count
8 from app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER
9 from app.helpers.kubernetes import KubernetesApi
10
11
12 class SuperAdminView(SuperAdminBaseView):
13
14 @expose('/')
15 def index_view(self):
16 events = DataGetter.get_all_events()[:5]
17 number_live_events = get_count(DataGetter.get_all_live_events())
18 number_draft_events = get_count(DataGetter.get_all_draft_events())
19 number_past_events = get_count(DataGetter.get_all_past_events())
20 super_admins = DataGetter.get_all_super_admins()
21 admins = DataGetter.get_all_admins()
22 registered_users = DataGetter.get_all_registered_users()
23 # TODO Fix function and correct this
24 organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))
25 co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))
26 track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))
27 attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))
28 accepted_sessions = DataGetter.get_all_accepted_sessions()
29 rejected_sessions = DataGetter.get_all_rejected_sessions()
30 draft_sessions = DataGetter.get_all_draft_sessions()
31 email_times = DataGetter.get_email_by_times()
32
33 commit_info = None
34 version = None
35 on_kubernetes = False
36 pods_info = None
37
38 if KubernetesApi.is_on_kubernetes():
39 on_kubernetes = True
40 kubernetes_api = KubernetesApi()
41 pods_info = kubernetes_api.get_pods()['items']
42 version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')
43 commit_info = os.getenv('BRANCH', 'development')
44 else:
45 version = get_latest_heroku_release()
46 commit_info = None
47 commit_number = None
48 if version:
49 commit_number = version['description'].split(' ')[1]
50 commit_info = get_commit_info(commit_number)
51
52 return self.render('/gentelella/admin/super_admin/widgets/index.html',
53 events=events,
54 version=version,
55 commit_info=commit_info,
56 on_kubernetes=True,
57 pods_info=pods_info,
58 number_live_events=number_live_events,
59 number_draft_events=number_draft_events,
60 number_past_events=number_past_events,
61 super_admins=super_admins,
62 admins=admins,
63 registered_users=registered_users,
64 organizers=organizers,
65 co_organizers=co_organizers,
66 track_organizers=track_organizers,
67 attendees=attendees,
68 accepted_sessions=accepted_sessions,
69 rejected_sessions=rejected_sessions,
70 draft_sessions=draft_sessions,
71 email_times=email_times)
72
[end of app/views/admin/super_admin/super_admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py
--- a/app/views/admin/super_admin/super_admin.py
+++ b/app/views/admin/super_admin/super_admin.py
@@ -53,7 +53,7 @@
events=events,
version=version,
commit_info=commit_info,
- on_kubernetes=True,
+ on_kubernetes=on_kubernetes,
pods_info=pods_info,
number_live_events=number_live_events,
number_draft_events=number_draft_events,
|
{"golden_diff": "diff --git a/app/views/admin/super_admin/super_admin.py b/app/views/admin/super_admin/super_admin.py\n--- a/app/views/admin/super_admin/super_admin.py\n+++ b/app/views/admin/super_admin/super_admin.py\n@@ -53,7 +53,7 @@\n events=events,\n version=version,\n commit_info=commit_info,\n- on_kubernetes=True,\n+ on_kubernetes=on_kubernetes,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n", "issue": "Bug: Kubernetes Deployment Info shown on heroku\nDo not try to show Kubernetes deployment info when not hosted on Kubernetes\n", "before_files": [{"content": "import os\n\nfrom flask_admin import expose\n\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView\nfrom ....helpers.data_getter import DataGetter\nfrom app.helpers.helpers import get_latest_heroku_release, get_commit_info, get_count\nfrom app.models.user import ATTENDEE,TRACK_ORGANIZER, COORGANIZER, ORGANIZER\nfrom app.helpers.kubernetes import KubernetesApi\n\n\nclass SuperAdminView(SuperAdminBaseView):\n\n @expose('/')\n def index_view(self):\n events = DataGetter.get_all_events()[:5]\n number_live_events = get_count(DataGetter.get_all_live_events())\n number_draft_events = get_count(DataGetter.get_all_draft_events())\n number_past_events = get_count(DataGetter.get_all_past_events())\n super_admins = DataGetter.get_all_super_admins()\n admins = DataGetter.get_all_admins()\n registered_users = DataGetter.get_all_registered_users()\n # TODO Fix function and correct this\n organizers = get_count(DataGetter.get_all_user_roles(ORGANIZER))\n co_organizers = get_count(DataGetter.get_all_user_roles(COORGANIZER))\n track_organizers = get_count(DataGetter.get_all_user_roles(TRACK_ORGANIZER))\n attendees = get_count(DataGetter.get_all_user_roles(ATTENDEE))\n accepted_sessions = DataGetter.get_all_accepted_sessions()\n rejected_sessions = DataGetter.get_all_rejected_sessions()\n draft_sessions = DataGetter.get_all_draft_sessions()\n email_times = DataGetter.get_email_by_times()\n\n commit_info = None\n version = None\n on_kubernetes = False\n pods_info = None\n\n if KubernetesApi.is_on_kubernetes():\n on_kubernetes = True\n kubernetes_api = KubernetesApi()\n pods_info = kubernetes_api.get_pods()['items']\n version = os.getenv('REPOSITORY', 'https://github.com/fossasia/open-event-orga-server.git')\n commit_info = os.getenv('BRANCH', 'development')\n else:\n version = get_latest_heroku_release()\n commit_info = None\n commit_number = None\n if version:\n commit_number = version['description'].split(' ')[1]\n commit_info = get_commit_info(commit_number)\n\n return self.render('/gentelella/admin/super_admin/widgets/index.html',\n events=events,\n version=version,\n commit_info=commit_info,\n on_kubernetes=True,\n pods_info=pods_info,\n number_live_events=number_live_events,\n number_draft_events=number_draft_events,\n number_past_events=number_past_events,\n super_admins=super_admins,\n admins=admins,\n registered_users=registered_users,\n organizers=organizers,\n co_organizers=co_organizers,\n track_organizers=track_organizers,\n attendees=attendees,\n accepted_sessions=accepted_sessions,\n rejected_sessions=rejected_sessions,\n draft_sessions=draft_sessions,\n email_times=email_times)\n", "path": "app/views/admin/super_admin/super_admin.py"}]}
| 1,340 | 123 |
gh_patches_debug_9975
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-852
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
# -*- coding: utf-8 -*- is not consistently used in all Python source files
- Cookiecutter version:
Cookiecutter 1.4.0
- Template project url:
none
- Python version:
3.5
- Operating System:
OS X
### Description:
# -_\- coding: utf-8 -_\- is not consistently used in all Python source files
### What I've run:
Nothing, it's an observation of an inconsistency in the project.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 import os
4 import io
5 import sys
6
7 from setuptools import setup
8
9 version = "1.4.0"
10
11 if sys.argv[-1] == 'publish':
12 os.system('python setup.py sdist upload')
13 os.system('python setup.py bdist_wheel upload')
14 sys.exit()
15
16 if sys.argv[-1] == 'tag':
17 os.system("git tag -a %s -m 'version %s'" % (version, version))
18 os.system("git push --tags")
19 sys.exit()
20
21 with io.open('README.rst', 'r', encoding='utf-8') as readme_file:
22 readme = readme_file.read()
23
24 with io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:
25 history = history_file.read().replace('.. :changelog:', '')
26
27 requirements = [
28 'future>=0.15.2',
29 'binaryornot>=0.2.0',
30 'jinja2>=2.7',
31 'click>=5.0',
32 'whichcraft>=0.4.0',
33 'poyo>=0.1.0',
34 'jinja2-time>=0.1.0'
35 ]
36
37 long_description = readme + '\n\n' + history
38
39 if sys.argv[-1] == 'readme':
40 print(long_description)
41 sys.exit()
42
43
44 setup(
45 name='cookiecutter',
46 version=version,
47 description=('A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'),
50 long_description=long_description,
51 author='Audrey Roy',
52 author_email='[email protected]',
53 url='https://github.com/audreyr/cookiecutter',
54 packages=[
55 'cookiecutter',
56 ],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={
59 'console_scripts': [
60 'cookiecutter = cookiecutter.__main__:main',
61 ]
62 },
63 include_package_data=True,
64 install_requires=requirements,
65 license='BSD',
66 zip_safe=False,
67 classifiers=[
68 'Development Status :: 5 - Production/Stable',
69 'Environment :: Console',
70 'Intended Audience :: Developers',
71 'Natural Language :: English',
72 'License :: OSI Approved :: BSD License',
73 'Programming Language :: Python',
74 'Programming Language :: Python :: 2',
75 'Programming Language :: Python :: 2.7',
76 'Programming Language :: Python :: 3',
77 'Programming Language :: Python :: 3.3',
78 'Programming Language :: Python :: 3.4',
79 'Programming Language :: Python :: 3.5',
80 'Programming Language :: Python :: Implementation :: CPython',
81 'Programming Language :: Python :: Implementation :: PyPy',
82 'Topic :: Software Development',
83 ],
84 keywords=(
85 'cookiecutter, Python, projects, project templates, Jinja2, '
86 'skeleton, scaffolding, project directory, setup.py, package, '
87 'packaging'
88 ),
89 )
90
[end of setup.py]
[start of cookiecutter/__main__.py]
1 """Allow cookiecutter to be executable through `python -m cookiecutter`."""
2 from __future__ import absolute_import
3
4 from .cli import main
5
6
7 if __name__ == "__main__": # pragma: no cover
8 main(prog_name="cookiecutter")
9
[end of cookiecutter/__main__.py]
[start of __main__.py]
1 """Allow cookiecutter to be executable from a checkout or zip file."""
2 import runpy
3
4
5 if __name__ == "__main__":
6 runpy.run_module("cookiecutter", run_name="__main__")
7
[end of __main__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/__main__.py b/__main__.py
--- a/__main__.py
+++ b/__main__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""Allow cookiecutter to be executable from a checkout or zip file."""
import runpy
diff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py
--- a/cookiecutter/__main__.py
+++ b/cookiecutter/__main__.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
"""Allow cookiecutter to be executable through `python -m cookiecutter`."""
from __future__ import absolute_import
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- coding: utf-8 -*-
import os
import io
|
{"golden_diff": "diff --git a/__main__.py b/__main__.py\n--- a/__main__.py\n+++ b/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\n import runpy\n \ndiff --git a/cookiecutter/__main__.py b/cookiecutter/__main__.py\n--- a/cookiecutter/__main__.py\n+++ b/cookiecutter/__main__.py\n@@ -1,3 +1,4 @@\n+# -*- coding: utf-8 -*-\n \"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\n from __future__ import absolute_import\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,4 +1,5 @@\n #!/usr/bin/env python\n+# -*- coding: utf-8 -*-\n \n import os\n import io\n", "issue": "# -*- coding: utf-8 -*- is not consistently used in all Python source files\n- Cookiecutter version:\n Cookiecutter 1.4.0\n- Template project url:\n none\n- Python version:\n 3.5\n- Operating System:\n OS X\n ### Description:\n # -_\\- coding: utf-8 -_\\- is not consistently used in all Python source files\n### What I've run:\n\nNothing, it's an observation of an inconsistency in the project.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport io\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.4.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith io.open('README.rst', 'r', encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\nwith io.open('HISTORY.rst', 'r', encoding='utf-8') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.4.0',\n 'poyo>=0.1.0',\n 'jinja2-time>=0.1.0'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.__main__:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}, {"content": "\"\"\"Allow cookiecutter to be executable through `python -m cookiecutter`.\"\"\"\nfrom __future__ import absolute_import\n\nfrom .cli import main\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(prog_name=\"cookiecutter\")\n", "path": "cookiecutter/__main__.py"}, {"content": "\"\"\"Allow cookiecutter to be executable from a checkout or zip file.\"\"\"\nimport runpy\n\n\nif __name__ == \"__main__\":\n runpy.run_module(\"cookiecutter\", run_name=\"__main__\")\n", "path": "__main__.py"}]}
| 1,629 | 205 |
gh_patches_debug_6498
|
rasdani/github-patches
|
git_diff
|
abey79__vpype-683
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Examples in `lreverse`'help are wrong
As title says ☝🏻
</issue>
<code>
[start of vpype_cli/layerops.py]
1 from __future__ import annotations
2
3 import random
4
5 import click
6
7 import vpype as vp
8
9 from .cli import cli
10 from .decorators import global_processor
11 from .types import LayerType, multiple_to_layer_ids, single_to_layer_id
12
13 __all__ = ("lcopy", "lmove", "ldelete", "lreverse", "lswap")
14
15
16 @cli.command(group="Layers")
17 @click.argument("sources", type=LayerType(accept_multiple=True))
18 @click.argument("dest", type=LayerType(accept_new=True))
19 @click.option(
20 "-p",
21 "--prob",
22 type=click.FloatRange(0.0, 1.0),
23 help="Path copy probability (default: 1.0).",
24 )
25 @click.option("-m", "--no-prop", is_flag=True, help="Do not copy metadata.")
26 @global_processor
27 def lcopy(document, sources, dest, prob: float | None, no_prop: bool):
28 """Copy the content of one or more layer(s) to another layer.
29
30 SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,
31 or a coma-separated, whitespace-free list of layer IDs.
32
33 DEST can be a layer ID or the string 'new', in which case a new layer with the
34 lowest available ID will be created.
35
36 If a layer is both in the source and destination, its content is not duplicated.
37
38 The `--prob` option controls the probability with which each path is copied. With a value
39 lower than 1.0, some paths will not be copied to DEST, which may be used to achieve random
40 coloring effects.
41
42 If a single source layer is specified and the `--prob` option is not used, the properties
43 of the source layer are copied to the destination layer, overwriting any existing
44 properties with the same name. This behaviour can be disabled with the `--no-prop` option.
45
46 Examples:
47 Copy layer 1 to a new layer:
48
49 vpype [...] lcopy 1 new [...] # duplicate layer 1
50
51 Make a new layer with a merged copy of layer 1 and 2:
52
53 vpype [...] lcopy 1,2 new [...] # make new layer with merged copy of layer 1 and 2
54
55 Add a merged copy of all layers to layer 1. If layer 1 previously had content, this \
56 content is not duplicated:
57
58 vpype [...] lcopy all 1 [...]
59 """
60
61 src_lids = multiple_to_layer_ids(sources, document)
62 dest_lid = single_to_layer_id(dest, document)
63
64 if dest_lid in src_lids:
65 src_lids.remove(dest_lid)
66
67 lc = vp.LineCollection()
68 for lid in src_lids:
69 if prob is not None:
70 for line in document[lid]:
71 if random.random() < prob:
72 lc.append(line)
73 else:
74 lc.extend(document[lid])
75
76 if len(lc) > 0:
77 document.add(lc, dest_lid)
78
79 if len(src_lids) == 1 and prob is None and not no_prop:
80 document.layers[dest_lid].metadata.update(document.layers[src_lids[0]].metadata)
81
82 return document
83
84
85 @cli.command(group="Layers")
86 @click.argument("sources", type=LayerType(accept_multiple=True))
87 @click.argument("dest", type=LayerType(accept_new=True))
88 @click.option(
89 "-p",
90 "--prob",
91 type=click.FloatRange(0.0, 1.0),
92 help="Path move probability (default: 1.0).",
93 )
94 @click.option("-m", "--no-prop", is_flag=True, help="Do not move metadata.")
95 @global_processor
96 def lmove(document, sources, dest, prob: float | None, no_prop: bool):
97 """Move the content of one or more layer(s) to another layer.
98
99 SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,
100 or a coma-separated, whitespace-free list of layer IDs.
101
102 DEST can be a layer ID or the string 'new', in which case a new layer with the
103 lowest available ID will be created.
104
105 Layer(s) left empty after the move are then discarded and may thus be reused by subsequent
106 commands using 'new' as destination layer.
107
108 The `--prob` option controls the probability with which each path is moved. With a value
109 lower than 1.0, some paths will not be moved to DEST, which may be used to achieve random
110 coloring effects.
111
112 If a layer is both in the source and destination, its content is not duplicated.
113
114 If a single source layer is specified and the `--prob` option is not used, the properties
115 of the source layer are moved to the destination layer, overwriting any existing
116 properties with the same name. This behaviour can be disabled with the `--no-prop` option.
117
118 Examples:
119 Merge layer 1 and 2 to layer 1 (the content of layer 1 is not duplicated):
120
121 vpype [...] lmove 1,2 1 [...] # merge layer 1 and 2 to layer 1
122 """
123
124 src_lids = multiple_to_layer_ids(sources, document)
125 dest_lid = single_to_layer_id(dest, document)
126
127 if dest_lid in document.layers:
128 dest_lc = document.layers[dest_lid].clone()
129 else:
130 dest_lc = vp.LineCollection()
131
132 move_metadata = len(src_lids) == 1 and prob is None and not no_prop
133 source_metadata = document.layers[src_lids[0]].metadata if move_metadata else {}
134
135 for lid in src_lids:
136 if prob is not None:
137 # split lines with provided probability
138 remaining_lines = vp.LineCollection()
139 moving_lines = vp.LineCollection()
140 for line in document.layers[lid]:
141 if random.random() < prob:
142 moving_lines.append(line)
143 else:
144 remaining_lines.append(line)
145
146 if len(remaining_lines) > 0:
147 document.replace(remaining_lines, lid)
148 else:
149 document.pop(lid)
150
151 if len(moving_lines) > 0:
152 dest_lc.extend(moving_lines)
153 else:
154 dest_lc.extend(document.pop(lid))
155 if move_metadata:
156 dest_lc.metadata.update(source_metadata)
157
158 if len(dest_lc) > 0:
159 document.add(dest_lc, dest_lid, with_metadata=True)
160 return document
161
162
163 @cli.command(group="Layers")
164 @click.argument("layers", type=LayerType(accept_multiple=True))
165 @click.option(
166 "-k", "--keep", is_flag=True, help="Specified layers must be kept instead of deleted."
167 )
168 @click.option(
169 "-p",
170 "--prob",
171 type=click.FloatRange(0.0, 1.0),
172 help="Path deletion probability (default: 1.0).",
173 )
174 @global_processor
175 def ldelete(document: vp.Document, layers, keep: bool, prob: float | None) -> vp.Document:
176 """Delete one or more layers.
177
178 LAYERS can be a single layer ID, the string 'all' (to delete all layers), or a
179 coma-separated, whitespace-free list of layer IDs.
180
181 If the `--keep` option is used, the specified layers are kept and, instead, all other
182 layers deleted.
183
184 The `--prob` option controls the probability with which each path is deleted. With a value
185 lower than 1.0, some paths will not be deleted.
186 """
187
188 lids = set(multiple_to_layer_ids(layers, document))
189
190 if keep:
191 lids = document.layers.keys() - lids
192
193 for lid in lids:
194 if prob is not None:
195 lc = document.layers[lid].clone()
196 for line in document[lid]:
197 if not random.random() < prob:
198 lc.append(line)
199
200 if len(lc) == 0:
201 document.pop(lid)
202 else:
203 document[lid] = lc
204 else:
205 document.pop(lid)
206
207 return document
208
209
210 @cli.command(group="Layers")
211 @click.argument("first", type=LayerType(accept_multiple=False, accept_new=False))
212 @click.argument("second", type=LayerType(accept_multiple=False, accept_new=False))
213 @click.option(
214 "-p",
215 "--prob",
216 type=click.FloatRange(0.0, 1.0),
217 help="Path deletion probability (default: 1.0).",
218 )
219 @click.option("-m", "--no-prop", is_flag=True, help="Do not move metadata.")
220 @global_processor
221 def lswap(
222 document: vp.Document, first: int, second: int, prob: float | None, no_prop: bool
223 ) -> vp.Document:
224 """Swap the content between two layers
225
226 This command swaps the content of layers FIRST and SECOND. Both FIRST and SECOND must be
227 existing layer ids.
228
229 The `--prob` option controls the probability with which each path are swapped. With a value
230 lower than 1.0, some paths will remain in their original layer.
231
232 If the `--prob` option is not used, the layer properties are swapped between layers as
233 well. This behaviour can be disabled with the `--no-prop` option.
234 """
235
236 first_lid = single_to_layer_id(first, document, must_exist=True)
237 second_lid = single_to_layer_id(second, document, must_exist=True)
238
239 if prob is None:
240 document.swap_content(first_lid, second_lid)
241 if not no_prop:
242 document.layers[first_lid].metadata, document.layers[second_lid].metadata = (
243 document.layers[second_lid].metadata,
244 document.layers[first_lid].metadata,
245 )
246 else:
247 new_first = vp.LineCollection()
248 new_second = vp.LineCollection()
249
250 for line in document.layers[first_lid]:
251 (new_second if random.random() < prob else new_first).append(line)
252 for line in document.layers[second_lid]:
253 (new_first if random.random() < prob else new_second).append(line)
254
255 document.replace(new_first, first_lid)
256 document.replace(new_second, second_lid)
257
258 return document
259
260
261 @cli.command(group="Layers")
262 @click.argument("layers", type=LayerType(accept_multiple=True, accept_new=False))
263 @global_processor
264 def lreverse(document: vp.Document, layers) -> vp.Document:
265 """Reverse the path order within one or more layers.
266
267 This command reverses the order in which paths are ordered within layer(s) LAYERS. LAYERS
268 may be a single layer ID, multiple layer IDs (coma-separated without whitespace) or `all`
269 (to refer to every exising layers).
270
271 Examples:
272 Delete layer one:
273
274 $ vpype [...] ldelete 1 [...]
275
276 Delete layers 1 and 2:
277
278 $ vpype [...] ldelete 1,2 [...]
279
280 Delete all layers:
281
282 $ vpype [...] ldelete all [...]
283 """
284
285 lids = set(multiple_to_layer_ids(layers, document))
286
287 for layer_id in lids:
288 document.layers[layer_id].reverse()
289
290 return document
291
[end of vpype_cli/layerops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vpype_cli/layerops.py b/vpype_cli/layerops.py
--- a/vpype_cli/layerops.py
+++ b/vpype_cli/layerops.py
@@ -269,17 +269,9 @@
(to refer to every exising layers).
Examples:
- Delete layer one:
+ Reverse path order in layer 1:
- $ vpype [...] ldelete 1 [...]
-
- Delete layers 1 and 2:
-
- $ vpype [...] ldelete 1,2 [...]
-
- Delete all layers:
-
- $ vpype [...] ldelete all [...]
+ $ vpype [...] lreverse 1 [...]
"""
lids = set(multiple_to_layer_ids(layers, document))
|
{"golden_diff": "diff --git a/vpype_cli/layerops.py b/vpype_cli/layerops.py\n--- a/vpype_cli/layerops.py\n+++ b/vpype_cli/layerops.py\n@@ -269,17 +269,9 @@\n (to refer to every exising layers).\n \n Examples:\n- Delete layer one:\n+ Reverse path order in layer 1:\n \n- $ vpype [...] ldelete 1 [...]\n-\n- Delete layers 1 and 2:\n-\n- $ vpype [...] ldelete 1,2 [...]\n-\n- Delete all layers:\n-\n- $ vpype [...] ldelete all [...]\n+ $ vpype [...] lreverse 1 [...]\n \"\"\"\n \n lids = set(multiple_to_layer_ids(layers, document))\n", "issue": "Examples in `lreverse`'help are wrong\nAs title says \u261d\ud83c\udffb \n", "before_files": [{"content": "from __future__ import annotations\n\nimport random\n\nimport click\n\nimport vpype as vp\n\nfrom .cli import cli\nfrom .decorators import global_processor\nfrom .types import LayerType, multiple_to_layer_ids, single_to_layer_id\n\n__all__ = (\"lcopy\", \"lmove\", \"ldelete\", \"lreverse\", \"lswap\")\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path copy probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not copy metadata.\")\n@global_processor\ndef lcopy(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Copy the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n The `--prob` option controls the probability with which each path is copied. With a value\n lower than 1.0, some paths will not be copied to DEST, which may be used to achieve random\n coloring effects.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are copied to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Copy layer 1 to a new layer:\n\n vpype [...] lcopy 1 new [...] # duplicate layer 1\n\n Make a new layer with a merged copy of layer 1 and 2:\n\n vpype [...] lcopy 1,2 new [...] # make new layer with merged copy of layer 1 and 2\n\n Add a merged copy of all layers to layer 1. If layer 1 previously had content, this \\\ncontent is not duplicated:\n\n vpype [...] lcopy all 1 [...]\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in src_lids:\n src_lids.remove(dest_lid)\n\n lc = vp.LineCollection()\n for lid in src_lids:\n if prob is not None:\n for line in document[lid]:\n if random.random() < prob:\n lc.append(line)\n else:\n lc.extend(document[lid])\n\n if len(lc) > 0:\n document.add(lc, dest_lid)\n\n if len(src_lids) == 1 and prob is None and not no_prop:\n document.layers[dest_lid].metadata.update(document.layers[src_lids[0]].metadata)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"sources\", type=LayerType(accept_multiple=True))\[email protected](\"dest\", type=LayerType(accept_new=True))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path move probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lmove(document, sources, dest, prob: float | None, no_prop: bool):\n \"\"\"Move the content of one or more layer(s) to another layer.\n\n SOURCES can be a single layer ID, the string 'all' (to copy all non-empty layers,\n or a coma-separated, whitespace-free list of layer IDs.\n\n DEST can be a layer ID or the string 'new', in which case a new layer with the\n lowest available ID will be created.\n\n Layer(s) left empty after the move are then discarded and may thus be reused by subsequent\n commands using 'new' as destination layer.\n\n The `--prob` option controls the probability with which each path is moved. With a value\n lower than 1.0, some paths will not be moved to DEST, which may be used to achieve random\n coloring effects.\n\n If a layer is both in the source and destination, its content is not duplicated.\n\n If a single source layer is specified and the `--prob` option is not used, the properties\n of the source layer are moved to the destination layer, overwriting any existing\n properties with the same name. This behaviour can be disabled with the `--no-prop` option.\n\n Examples:\n Merge layer 1 and 2 to layer 1 (the content of layer 1 is not duplicated):\n\n vpype [...] lmove 1,2 1 [...] # merge layer 1 and 2 to layer 1\n \"\"\"\n\n src_lids = multiple_to_layer_ids(sources, document)\n dest_lid = single_to_layer_id(dest, document)\n\n if dest_lid in document.layers:\n dest_lc = document.layers[dest_lid].clone()\n else:\n dest_lc = vp.LineCollection()\n\n move_metadata = len(src_lids) == 1 and prob is None and not no_prop\n source_metadata = document.layers[src_lids[0]].metadata if move_metadata else {}\n\n for lid in src_lids:\n if prob is not None:\n # split lines with provided probability\n remaining_lines = vp.LineCollection()\n moving_lines = vp.LineCollection()\n for line in document.layers[lid]:\n if random.random() < prob:\n moving_lines.append(line)\n else:\n remaining_lines.append(line)\n\n if len(remaining_lines) > 0:\n document.replace(remaining_lines, lid)\n else:\n document.pop(lid)\n\n if len(moving_lines) > 0:\n dest_lc.extend(moving_lines)\n else:\n dest_lc.extend(document.pop(lid))\n if move_metadata:\n dest_lc.metadata.update(source_metadata)\n\n if len(dest_lc) > 0:\n document.add(dest_lc, dest_lid, with_metadata=True)\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True))\[email protected](\n \"-k\", \"--keep\", is_flag=True, help=\"Specified layers must be kept instead of deleted.\"\n)\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\n@global_processor\ndef ldelete(document: vp.Document, layers, keep: bool, prob: float | None) -> vp.Document:\n \"\"\"Delete one or more layers.\n\n LAYERS can be a single layer ID, the string 'all' (to delete all layers), or a\n coma-separated, whitespace-free list of layer IDs.\n\n If the `--keep` option is used, the specified layers are kept and, instead, all other\n layers deleted.\n\n The `--prob` option controls the probability with which each path is deleted. With a value\n lower than 1.0, some paths will not be deleted.\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n if keep:\n lids = document.layers.keys() - lids\n\n for lid in lids:\n if prob is not None:\n lc = document.layers[lid].clone()\n for line in document[lid]:\n if not random.random() < prob:\n lc.append(line)\n\n if len(lc) == 0:\n document.pop(lid)\n else:\n document[lid] = lc\n else:\n document.pop(lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"first\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\"second\", type=LayerType(accept_multiple=False, accept_new=False))\[email protected](\n \"-p\",\n \"--prob\",\n type=click.FloatRange(0.0, 1.0),\n help=\"Path deletion probability (default: 1.0).\",\n)\[email protected](\"-m\", \"--no-prop\", is_flag=True, help=\"Do not move metadata.\")\n@global_processor\ndef lswap(\n document: vp.Document, first: int, second: int, prob: float | None, no_prop: bool\n) -> vp.Document:\n \"\"\"Swap the content between two layers\n\n This command swaps the content of layers FIRST and SECOND. Both FIRST and SECOND must be\n existing layer ids.\n\n The `--prob` option controls the probability with which each path are swapped. With a value\n lower than 1.0, some paths will remain in their original layer.\n\n If the `--prob` option is not used, the layer properties are swapped between layers as\n well. This behaviour can be disabled with the `--no-prop` option.\n \"\"\"\n\n first_lid = single_to_layer_id(first, document, must_exist=True)\n second_lid = single_to_layer_id(second, document, must_exist=True)\n\n if prob is None:\n document.swap_content(first_lid, second_lid)\n if not no_prop:\n document.layers[first_lid].metadata, document.layers[second_lid].metadata = (\n document.layers[second_lid].metadata,\n document.layers[first_lid].metadata,\n )\n else:\n new_first = vp.LineCollection()\n new_second = vp.LineCollection()\n\n for line in document.layers[first_lid]:\n (new_second if random.random() < prob else new_first).append(line)\n for line in document.layers[second_lid]:\n (new_first if random.random() < prob else new_second).append(line)\n\n document.replace(new_first, first_lid)\n document.replace(new_second, second_lid)\n\n return document\n\n\[email protected](group=\"Layers\")\[email protected](\"layers\", type=LayerType(accept_multiple=True, accept_new=False))\n@global_processor\ndef lreverse(document: vp.Document, layers) -> vp.Document:\n \"\"\"Reverse the path order within one or more layers.\n\n This command reverses the order in which paths are ordered within layer(s) LAYERS. LAYERS\n may be a single layer ID, multiple layer IDs (coma-separated without whitespace) or `all`\n (to refer to every exising layers).\n\n Examples:\n Delete layer one:\n\n $ vpype [...] ldelete 1 [...]\n\n Delete layers 1 and 2:\n\n $ vpype [...] ldelete 1,2 [...]\n\n Delete all layers:\n\n $ vpype [...] ldelete all [...]\n \"\"\"\n\n lids = set(multiple_to_layer_ids(layers, document))\n\n for layer_id in lids:\n document.layers[layer_id].reverse()\n\n return document\n", "path": "vpype_cli/layerops.py"}]}
| 3,774 | 175 |
gh_patches_debug_50127
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-4481
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
responsetypes.py ResponseTypes.from_headers typo as 'Content-type'
Shall this line:
https://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74
be changed to below?
`
content_type=headers[b'Content-Type'],
`
I guess this typo is with all the versions until today.
</issue>
<code>
[start of scrapy/responsetypes.py]
1 """
2 This module implements a class which returns the appropriate Response class
3 based on different criteria.
4 """
5 from mimetypes import MimeTypes
6 from pkgutil import get_data
7 from io import StringIO
8
9 from scrapy.http import Response
10 from scrapy.utils.misc import load_object
11 from scrapy.utils.python import binary_is_text, to_bytes, to_unicode
12
13
14 class ResponseTypes:
15
16 CLASSES = {
17 'text/html': 'scrapy.http.HtmlResponse',
18 'application/atom+xml': 'scrapy.http.XmlResponse',
19 'application/rdf+xml': 'scrapy.http.XmlResponse',
20 'application/rss+xml': 'scrapy.http.XmlResponse',
21 'application/xhtml+xml': 'scrapy.http.HtmlResponse',
22 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',
23 'application/xml': 'scrapy.http.XmlResponse',
24 'application/json': 'scrapy.http.TextResponse',
25 'application/x-json': 'scrapy.http.TextResponse',
26 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',
27 'application/javascript': 'scrapy.http.TextResponse',
28 'application/x-javascript': 'scrapy.http.TextResponse',
29 'text/xml': 'scrapy.http.XmlResponse',
30 'text/*': 'scrapy.http.TextResponse',
31 }
32
33 def __init__(self):
34 self.classes = {}
35 self.mimetypes = MimeTypes()
36 mimedata = get_data('scrapy', 'mime.types').decode('utf8')
37 self.mimetypes.readfp(StringIO(mimedata))
38 for mimetype, cls in self.CLASSES.items():
39 self.classes[mimetype] = load_object(cls)
40
41 def from_mimetype(self, mimetype):
42 """Return the most appropriate Response class for the given mimetype"""
43 if mimetype is None:
44 return Response
45 elif mimetype in self.classes:
46 return self.classes[mimetype]
47 else:
48 basetype = "%s/*" % mimetype.split('/')[0]
49 return self.classes.get(basetype, Response)
50
51 def from_content_type(self, content_type, content_encoding=None):
52 """Return the most appropriate Response class from an HTTP Content-Type
53 header """
54 if content_encoding:
55 return Response
56 mimetype = to_unicode(content_type).split(';')[0].strip().lower()
57 return self.from_mimetype(mimetype)
58
59 def from_content_disposition(self, content_disposition):
60 try:
61 filename = to_unicode(content_disposition,
62 encoding='latin-1', errors='replace').split(';')[1].split('=')[1]
63 filename = filename.strip('"\'')
64 return self.from_filename(filename)
65 except IndexError:
66 return Response
67
68 def from_headers(self, headers):
69 """Return the most appropriate Response class by looking at the HTTP
70 headers"""
71 cls = Response
72 if b'Content-Type' in headers:
73 cls = self.from_content_type(
74 content_type=headers[b'Content-type'],
75 content_encoding=headers.get(b'Content-Encoding')
76 )
77 if cls is Response and b'Content-Disposition' in headers:
78 cls = self.from_content_disposition(headers[b'Content-Disposition'])
79 return cls
80
81 def from_filename(self, filename):
82 """Return the most appropriate Response class from a file name"""
83 mimetype, encoding = self.mimetypes.guess_type(filename)
84 if mimetype and not encoding:
85 return self.from_mimetype(mimetype)
86 else:
87 return Response
88
89 def from_body(self, body):
90 """Try to guess the appropriate response based on the body content.
91 This method is a bit magic and could be improved in the future, but
92 it's not meant to be used except for special cases where response types
93 cannot be guess using more straightforward methods."""
94 chunk = body[:5000]
95 chunk = to_bytes(chunk)
96 if not binary_is_text(chunk):
97 return self.from_mimetype('application/octet-stream')
98 elif b"<html>" in chunk.lower():
99 return self.from_mimetype('text/html')
100 elif b"<?xml" in chunk.lower():
101 return self.from_mimetype('text/xml')
102 else:
103 return self.from_mimetype('text')
104
105 def from_args(self, headers=None, url=None, filename=None, body=None):
106 """Guess the most appropriate Response class based on
107 the given arguments."""
108 cls = Response
109 if headers is not None:
110 cls = self.from_headers(headers)
111 if cls is Response and url is not None:
112 cls = self.from_filename(url)
113 if cls is Response and filename is not None:
114 cls = self.from_filename(filename)
115 if cls is Response and body is not None:
116 cls = self.from_body(body)
117 return cls
118
119
120 responsetypes = ResponseTypes()
121
[end of scrapy/responsetypes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py
--- a/scrapy/responsetypes.py
+++ b/scrapy/responsetypes.py
@@ -71,7 +71,7 @@
cls = Response
if b'Content-Type' in headers:
cls = self.from_content_type(
- content_type=headers[b'Content-type'],
+ content_type=headers[b'Content-Type'],
content_encoding=headers.get(b'Content-Encoding')
)
if cls is Response and b'Content-Disposition' in headers:
|
{"golden_diff": "diff --git a/scrapy/responsetypes.py b/scrapy/responsetypes.py\n--- a/scrapy/responsetypes.py\n+++ b/scrapy/responsetypes.py\n@@ -71,7 +71,7 @@\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n- content_type=headers[b'Content-type'],\n+ content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n", "issue": "responsetypes.py ResponseTypes.from_headers typo as 'Content-type'\nShall this line:\r\n\r\nhttps://github.com/scrapy/scrapy/blob/8845773d44329194ee73fa4985a94e768bf664e7/scrapy/responsetypes.py#L74\r\n\r\nbe changed to below?\r\n`\r\ncontent_type=headers[b'Content-Type'],\r\n`\r\nI guess this typo is with all the versions until today.\n", "before_files": [{"content": "\"\"\"\nThis module implements a class which returns the appropriate Response class\nbased on different criteria.\n\"\"\"\nfrom mimetypes import MimeTypes\nfrom pkgutil import get_data\nfrom io import StringIO\n\nfrom scrapy.http import Response\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.python import binary_is_text, to_bytes, to_unicode\n\n\nclass ResponseTypes:\n\n CLASSES = {\n 'text/html': 'scrapy.http.HtmlResponse',\n 'application/atom+xml': 'scrapy.http.XmlResponse',\n 'application/rdf+xml': 'scrapy.http.XmlResponse',\n 'application/rss+xml': 'scrapy.http.XmlResponse',\n 'application/xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/vnd.wap.xhtml+xml': 'scrapy.http.HtmlResponse',\n 'application/xml': 'scrapy.http.XmlResponse',\n 'application/json': 'scrapy.http.TextResponse',\n 'application/x-json': 'scrapy.http.TextResponse',\n 'application/json-amazonui-streaming': 'scrapy.http.TextResponse',\n 'application/javascript': 'scrapy.http.TextResponse',\n 'application/x-javascript': 'scrapy.http.TextResponse',\n 'text/xml': 'scrapy.http.XmlResponse',\n 'text/*': 'scrapy.http.TextResponse',\n }\n\n def __init__(self):\n self.classes = {}\n self.mimetypes = MimeTypes()\n mimedata = get_data('scrapy', 'mime.types').decode('utf8')\n self.mimetypes.readfp(StringIO(mimedata))\n for mimetype, cls in self.CLASSES.items():\n self.classes[mimetype] = load_object(cls)\n\n def from_mimetype(self, mimetype):\n \"\"\"Return the most appropriate Response class for the given mimetype\"\"\"\n if mimetype is None:\n return Response\n elif mimetype in self.classes:\n return self.classes[mimetype]\n else:\n basetype = \"%s/*\" % mimetype.split('/')[0]\n return self.classes.get(basetype, Response)\n\n def from_content_type(self, content_type, content_encoding=None):\n \"\"\"Return the most appropriate Response class from an HTTP Content-Type\n header \"\"\"\n if content_encoding:\n return Response\n mimetype = to_unicode(content_type).split(';')[0].strip().lower()\n return self.from_mimetype(mimetype)\n\n def from_content_disposition(self, content_disposition):\n try:\n filename = to_unicode(content_disposition,\n encoding='latin-1', errors='replace').split(';')[1].split('=')[1]\n filename = filename.strip('\"\\'')\n return self.from_filename(filename)\n except IndexError:\n return Response\n\n def from_headers(self, headers):\n \"\"\"Return the most appropriate Response class by looking at the HTTP\n headers\"\"\"\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-type'],\n content_encoding=headers.get(b'Content-Encoding')\n )\n if cls is Response and b'Content-Disposition' in headers:\n cls = self.from_content_disposition(headers[b'Content-Disposition'])\n return cls\n\n def from_filename(self, filename):\n \"\"\"Return the most appropriate Response class from a file name\"\"\"\n mimetype, encoding = self.mimetypes.guess_type(filename)\n if mimetype and not encoding:\n return self.from_mimetype(mimetype)\n else:\n return Response\n\n def from_body(self, body):\n \"\"\"Try to guess the appropriate response based on the body content.\n This method is a bit magic and could be improved in the future, but\n it's not meant to be used except for special cases where response types\n cannot be guess using more straightforward methods.\"\"\"\n chunk = body[:5000]\n chunk = to_bytes(chunk)\n if not binary_is_text(chunk):\n return self.from_mimetype('application/octet-stream')\n elif b\"<html>\" in chunk.lower():\n return self.from_mimetype('text/html')\n elif b\"<?xml\" in chunk.lower():\n return self.from_mimetype('text/xml')\n else:\n return self.from_mimetype('text')\n\n def from_args(self, headers=None, url=None, filename=None, body=None):\n \"\"\"Guess the most appropriate Response class based on\n the given arguments.\"\"\"\n cls = Response\n if headers is not None:\n cls = self.from_headers(headers)\n if cls is Response and url is not None:\n cls = self.from_filename(url)\n if cls is Response and filename is not None:\n cls = self.from_filename(filename)\n if cls is Response and body is not None:\n cls = self.from_body(body)\n return cls\n\n\nresponsetypes = ResponseTypes()\n", "path": "scrapy/responsetypes.py"}]}
| 1,893 | 121 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.