problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_13191
|
rasdani/github-patches
|
git_diff
|
davanstrien__flyswot-156
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
catch no files found before running prediction function
</issue>
<code>
[start of src/flyswot/inference.py]
1 """Inference functionality"""
2 import csv
3 import mimetypes
4 import time
5 from abc import ABC
6 from abc import abstractmethod
7 from dataclasses import asdict
8 from dataclasses import dataclass
9 from datetime import datetime
10 from datetime import timedelta
11 from pathlib import Path
12 from typing import Iterable
13 from typing import Iterator
14 from typing import List
15 from typing import Union
16
17 import numpy as np
18 import onnxruntime as rt # type: ignore
19 import typer
20 from PIL import Image # type: ignore
21 from rich.table import Table
22 from toolz import itertoolz
23
24 from flyswot import core
25 from flyswot import models
26 from flyswot.console import console
27
28 app = typer.Typer()
29
30
31 @dataclass
32 class ImagePredictionItem:
33 """Prediction for an image.
34
35 Attributes:
36 path: The Path to the image
37 predicted_label: The predicted label i.e. the argmax value for the prediction tensor
38 condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor
39 """
40
41 path: Path
42 predicted_label: str
43 confidence: float
44
45 def __post_init__(self) -> Union[Path, None]:
46 """attempt to get absolute path"""
47 try:
48 self.path: Path = self.path.absolute()
49 except AttributeError:
50 pass
51
52
53 @dataclass
54 class PredictionBatch:
55 """Container for ImagePredictionItems"""
56
57 batch: List[ImagePredictionItem]
58
59 def __post_init__(self):
60 """Returns a list of all predicted labels in batch"""
61 self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)
62
63
64 image_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith("image/")}
65
66
67 @app.command()
68 def predict_image(
69 image: Path = typer.Argument(..., readable=True, resolve_path=True)
70 ) -> None:
71 """Predict a single image"""
72 pass # pragma: no cover
73
74
75 @app.command(name="directory")
76 def predict_directory(
77 directory: Path = typer.Argument(
78 ...,
79 readable=True,
80 resolve_path=True,
81 help="Directory to start searching for images from",
82 ),
83 csv_save_dir: Path = typer.Argument(
84 ...,
85 writable=True,
86 resolve_path=True,
87 help="Directory used to store the csv report",
88 ),
89 pattern: str = typer.Option("fse", help="Pattern used to filter image filenames"),
90 bs: int = typer.Option(16, help="Batch Size"),
91 image_format: str = typer.Option(
92 ".tif", help="Image format for flyswot to use for predictions"
93 ),
94 check_latest: bool = typer.Option(True, help="Use latest available model"),
95 ):
96 """Predicts against all images stored under DIRECTORY which match PATTERN in the filename.
97
98 By default searches for filenames containing 'fse'.
99
100 Creates a CSV report saved to `csv_save_dir`
101 """
102 start_time = time.perf_counter()
103 model_dir = models.ensure_model_dir()
104 # TODO add load learner function that can be passed a model name
105 model_parts = models.ensure_model(model_dir, check_latest)
106 model = model_parts.model
107 vocab = models.load_vocab(model_parts.vocab)
108 onnxinference = OnnxInferenceSession(model, vocab)
109 files = list(core.get_image_files_from_pattern(directory, pattern, image_format))
110 typer.echo(f"Found {len(files)} files matching {pattern} in {directory}")
111 csv_fname = create_csv_fname(csv_save_dir)
112 create_csv_header(csv_fname)
113 with typer.progressbar(length=len(files)) as progress:
114 all_preds = []
115 predictions = []
116 for batch in itertoolz.partition_all(bs, files):
117 batch_predictions = onnxinference.predict_batch(batch, bs)
118 all_preds.append(batch_predictions.batch_labels)
119 predictions.append(batch_predictions)
120 progress.update(len(batch))
121 write_batch_preds_to_csv(csv_fname, batch_predictions)
122 all_preds = list(itertoolz.concat(all_preds))
123 typer.echo(f"CSV report stored in {csv_fname}")
124 delta = timedelta(seconds=time.perf_counter() - start_time)
125 typer.echo(f"Time taken to run: {str(delta)}")
126 print_table(all_preds)
127
128
129 def print_table(decoded) -> None:
130 """Prints table summary of predicted labels"""
131 table = Table(show_header=True, title="Prediction summary")
132 table.add_column(
133 "Class",
134 )
135 table.add_column("Count")
136 table.add_column("Percentage")
137 total = len(decoded)
138 frequencies = itertoolz.frequencies(decoded)
139 for is_last_element, var in core.signal_last(frequencies.items()):
140 key, value = var
141 count = value
142 percentage = round((count / total) * 100, 2)
143 if is_last_element:
144 table.add_row(key, str(count), f"{percentage}", end_section=True)
145 table.add_row("Total", str(total), "")
146 else:
147 table.add_row(key, str(count), f"{percentage}")
148 console.print(table)
149
150
151 def create_csv_fname(csv_directory: Path) -> Path:
152 """Creates a csv filename"""
153 date_now = datetime.now()
154 date_now = date_now.strftime("%Y_%m_%d_%H_%M")
155 fname = Path(date_now + ".csv")
156 return Path(csv_directory / fname)
157
158
159 def create_csv_header(csv_path: Path) -> None:
160 """Creates a header for csv `csv_path`"""
161 with open(csv_path, mode="w", newline="") as csv_file:
162 field_names = ["path", "directory", "predicted_label", "confidence"]
163 writer = csv.DictWriter(csv_file, fieldnames=field_names)
164 writer.writeheader()
165
166
167 def write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:
168 """Appends `predictions` batch to `csv_path`"""
169 with open(csv_fpath, mode="a", newline="") as csv_file:
170 field_names = ["path", "directory", "predicted_label", "confidence"]
171 writer = csv.DictWriter(csv_file, fieldnames=field_names)
172 for pred in predictions.batch:
173 row = asdict(pred)
174 row["directory"] = pred.path.parent
175 writer.writerow(row)
176
177
178 class InferenceSession(ABC):
179 """Abstract class for inference sessions"""
180
181 @abstractmethod
182 def __init__(self, model: Path, vocab: List):
183 """Inference Sessions should init from a model file and vocab"""
184 self.model = model
185 self.vocab = vocab
186
187 @abstractmethod
188 def predict_image(self, image: Path):
189 """Predict a single image"""
190 pass
191
192 @abstractmethod
193 def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):
194 """Predict a batch"""
195 pass
196
197
198 def softmax(x):
199 """return softmax of `x`"""
200 x = x.reshape(-1)
201 e_x = np.exp(x - np.max(x))
202 return e_x / e_x.sum(axis=0)
203
204
205 # class FastaiInferenceModel(InferenceSession):
206 # def __init__(self, model):
207 # self.model = model
208 # self.learn = load_learner(model)
209
210 # def predict_image(self, image: Path) -> Any:
211 # return self.learn.predict(image)
212
213 # def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:
214 # test_dl = self.learn.dls.test_dl(batch, bs=bs)
215 # vocab = dict(enumerate(self.learn.dls.vocab))
216 # with self.learn.no_bar():
217 # fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)
218 # prediction_tensors: Iterable[Any] = fastai_preds[0]
219 # prediction_items = []
220 # for file, pred in zip(batch, prediction_tensors):
221 # arg_max = int(np.array(pred).argmax())
222 # predicted_label = vocab[int(arg_max)]
223 # confidence = float(np.array(pred).max())
224 # prediction_items.append(
225 # ImagePredictionItem(file, predicted_label, confidence)
226 # )
227 # return PredictionBatch(prediction_items)
228
229
230 class OnnxInferenceSession(InferenceSession):
231 """onnx inference session"""
232
233 def __init__(self, model: Path, vocab: Path):
234 """Create onnx session"""
235 self.model = model
236 self.session = rt.InferenceSession(str(model))
237
238 self.vocab = vocab
239 self.vocab_mapping = dict(enumerate(self.vocab))
240
241 def _load_vocab(self, vocab: Path) -> List:
242 with open(vocab, "r") as f:
243 return [item.strip("\n") for item in f.readlines()]
244
245 def predict_image(self, image: Path):
246 """Predict a single image"""
247 img = self._load_image(image)
248 raw_result = self.session.run(["output"], {"image": img})
249 pred = self._postprocess(raw_result)
250 arg_max = int(np.array(pred).argmax())
251 predicted_label = self.vocab_mapping[int(arg_max)]
252 confidence = float(np.array(pred).max())
253 return ImagePredictionItem(image, predicted_label, confidence)
254
255 def _preprocess(self, input_data: np.ndarray) -> np.ndarray:
256 # converts the input data into the float32 input for onnx
257 img_data = input_data.astype("float32")
258
259 # normalize
260 mean_vec = np.array([0.485, 0.456, 0.406])
261 stddev_vec = np.array([0.229, 0.224, 0.225])
262 norm_img_data = np.zeros(img_data.shape).astype("float32")
263 for i in range(img_data.shape[0]):
264 norm_img_data[i, :, :] = (
265 img_data[i, :, :] / 255 - mean_vec[i]
266 ) / stddev_vec[i]
267
268 # add batch channel
269 norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype("float32")
270 return norm_img_data
271
272 def _load_image(self, file: Path) -> np.ndarray:
273 """loads image and carries out preprocessing for inference"""
274 image = Image.open(file, mode="r")
275 image = image.resize((512, 512), Image.BILINEAR)
276 image_data = np.array(image).transpose(2, 0, 1)
277 return self._preprocess(image_data)
278
279 def _postprocess(self, result: List):
280 """process results from onnx session"""
281 return softmax(np.array(result)).tolist()
282
283 def predict_batch(self, batch: Iterable[Path], bs: int):
284 """predicts a batch of images"""
285 prediction_items = [self.predict_image(file) for file in batch]
286 return PredictionBatch(prediction_items)
287
288
289 if __name__ == "__main__":
290 app()
291
[end of src/flyswot/inference.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py
--- a/src/flyswot/inference.py
+++ b/src/flyswot/inference.py
@@ -107,6 +107,11 @@
vocab = models.load_vocab(model_parts.vocab)
onnxinference = OnnxInferenceSession(model, vocab)
files = list(core.get_image_files_from_pattern(directory, pattern, image_format))
+ if not files:
+ typer.echo(
+ f"Didn't find any files maching {pattern} in {directory}, please check the inputs to flyswot"
+ )
+ raise typer.Exit(code=1)
typer.echo(f"Found {len(files)} files matching {pattern} in {directory}")
csv_fname = create_csv_fname(csv_save_dir)
create_csv_header(csv_fname)
|
{"golden_diff": "diff --git a/src/flyswot/inference.py b/src/flyswot/inference.py\n--- a/src/flyswot/inference.py\n+++ b/src/flyswot/inference.py\n@@ -107,6 +107,11 @@\n vocab = models.load_vocab(model_parts.vocab)\n onnxinference = OnnxInferenceSession(model, vocab)\n files = list(core.get_image_files_from_pattern(directory, pattern, image_format))\n+ if not files:\n+ typer.echo(\n+ f\"Didn't find any files maching {pattern} in {directory}, please check the inputs to flyswot\"\n+ )\n+ raise typer.Exit(code=1)\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n", "issue": "catch no files found before running prediction function\n\n", "before_files": [{"content": "\"\"\"Inference functionality\"\"\"\nimport csv\nimport mimetypes\nimport time\nfrom abc import ABC\nfrom abc import abstractmethod\nfrom dataclasses import asdict\nfrom dataclasses import dataclass\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Union\n\nimport numpy as np\nimport onnxruntime as rt # type: ignore\nimport typer\nfrom PIL import Image # type: ignore\nfrom rich.table import Table\nfrom toolz import itertoolz\n\nfrom flyswot import core\nfrom flyswot import models\nfrom flyswot.console import console\n\napp = typer.Typer()\n\n\n@dataclass\nclass ImagePredictionItem:\n \"\"\"Prediction for an image.\n\n Attributes:\n path: The Path to the image\n predicted_label: The predicted label i.e. the argmax value for the prediction tensor\n condidence: The confidence for `predicted_label` i.e. the max value for prediction tensor\n \"\"\"\n\n path: Path\n predicted_label: str\n confidence: float\n\n def __post_init__(self) -> Union[Path, None]:\n \"\"\"attempt to get absolute path\"\"\"\n try:\n self.path: Path = self.path.absolute()\n except AttributeError:\n pass\n\n\n@dataclass\nclass PredictionBatch:\n \"\"\"Container for ImagePredictionItems\"\"\"\n\n batch: List[ImagePredictionItem]\n\n def __post_init__(self):\n \"\"\"Returns a list of all predicted labels in batch\"\"\"\n self.batch_labels: Iterator[str] = (item.predicted_label for item in self.batch)\n\n\nimage_extensions = {k for k, v in mimetypes.types_map.items() if v.startswith(\"image/\")}\n\n\[email protected]()\ndef predict_image(\n image: Path = typer.Argument(..., readable=True, resolve_path=True)\n) -> None:\n \"\"\"Predict a single image\"\"\"\n pass # pragma: no cover\n\n\[email protected](name=\"directory\")\ndef predict_directory(\n directory: Path = typer.Argument(\n ...,\n readable=True,\n resolve_path=True,\n help=\"Directory to start searching for images from\",\n ),\n csv_save_dir: Path = typer.Argument(\n ...,\n writable=True,\n resolve_path=True,\n help=\"Directory used to store the csv report\",\n ),\n pattern: str = typer.Option(\"fse\", help=\"Pattern used to filter image filenames\"),\n bs: int = typer.Option(16, help=\"Batch Size\"),\n image_format: str = typer.Option(\n \".tif\", help=\"Image format for flyswot to use for predictions\"\n ),\n check_latest: bool = typer.Option(True, help=\"Use latest available model\"),\n):\n \"\"\"Predicts against all images stored under DIRECTORY which match PATTERN in the filename.\n\n By default searches for filenames containing 'fse'.\n\n Creates a CSV report saved to `csv_save_dir`\n \"\"\"\n start_time = time.perf_counter()\n model_dir = models.ensure_model_dir()\n # TODO add load learner function that can be passed a model name\n model_parts = models.ensure_model(model_dir, check_latest)\n model = model_parts.model\n vocab = models.load_vocab(model_parts.vocab)\n onnxinference = OnnxInferenceSession(model, vocab)\n files = list(core.get_image_files_from_pattern(directory, pattern, image_format))\n typer.echo(f\"Found {len(files)} files matching {pattern} in {directory}\")\n csv_fname = create_csv_fname(csv_save_dir)\n create_csv_header(csv_fname)\n with typer.progressbar(length=len(files)) as progress:\n all_preds = []\n predictions = []\n for batch in itertoolz.partition_all(bs, files):\n batch_predictions = onnxinference.predict_batch(batch, bs)\n all_preds.append(batch_predictions.batch_labels)\n predictions.append(batch_predictions)\n progress.update(len(batch))\n write_batch_preds_to_csv(csv_fname, batch_predictions)\n all_preds = list(itertoolz.concat(all_preds))\n typer.echo(f\"CSV report stored in {csv_fname}\")\n delta = timedelta(seconds=time.perf_counter() - start_time)\n typer.echo(f\"Time taken to run: {str(delta)}\")\n print_table(all_preds)\n\n\ndef print_table(decoded) -> None:\n \"\"\"Prints table summary of predicted labels\"\"\"\n table = Table(show_header=True, title=\"Prediction summary\")\n table.add_column(\n \"Class\",\n )\n table.add_column(\"Count\")\n table.add_column(\"Percentage\")\n total = len(decoded)\n frequencies = itertoolz.frequencies(decoded)\n for is_last_element, var in core.signal_last(frequencies.items()):\n key, value = var\n count = value\n percentage = round((count / total) * 100, 2)\n if is_last_element:\n table.add_row(key, str(count), f\"{percentage}\", end_section=True)\n table.add_row(\"Total\", str(total), \"\")\n else:\n table.add_row(key, str(count), f\"{percentage}\")\n console.print(table)\n\n\ndef create_csv_fname(csv_directory: Path) -> Path:\n \"\"\"Creates a csv filename\"\"\"\n date_now = datetime.now()\n date_now = date_now.strftime(\"%Y_%m_%d_%H_%M\")\n fname = Path(date_now + \".csv\")\n return Path(csv_directory / fname)\n\n\ndef create_csv_header(csv_path: Path) -> None:\n \"\"\"Creates a header for csv `csv_path`\"\"\"\n with open(csv_path, mode=\"w\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n writer.writeheader()\n\n\ndef write_batch_preds_to_csv(csv_fpath: Path, predictions: PredictionBatch) -> None:\n \"\"\"Appends `predictions` batch to `csv_path`\"\"\"\n with open(csv_fpath, mode=\"a\", newline=\"\") as csv_file:\n field_names = [\"path\", \"directory\", \"predicted_label\", \"confidence\"]\n writer = csv.DictWriter(csv_file, fieldnames=field_names)\n for pred in predictions.batch:\n row = asdict(pred)\n row[\"directory\"] = pred.path.parent\n writer.writerow(row)\n\n\nclass InferenceSession(ABC):\n \"\"\"Abstract class for inference sessions\"\"\"\n\n @abstractmethod\n def __init__(self, model: Path, vocab: List):\n \"\"\"Inference Sessions should init from a model file and vocab\"\"\"\n self.model = model\n self.vocab = vocab\n\n @abstractmethod\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n pass\n\n @abstractmethod\n def predict_batch(self, model: Path, batch: Iterable[Path], bs: int):\n \"\"\"Predict a batch\"\"\"\n pass\n\n\ndef softmax(x):\n \"\"\"return softmax of `x`\"\"\"\n x = x.reshape(-1)\n e_x = np.exp(x - np.max(x))\n return e_x / e_x.sum(axis=0)\n\n\n# class FastaiInferenceModel(InferenceSession):\n# def __init__(self, model):\n# self.model = model\n# self.learn = load_learner(model)\n\n# def predict_image(self, image: Path) -> Any:\n# return self.learn.predict(image)\n\n# def predict_batch(self, batch: Iterable[Path], bs: int) -> PredictionBatch:\n# test_dl = self.learn.dls.test_dl(batch, bs=bs)\n# vocab = dict(enumerate(self.learn.dls.vocab))\n# with self.learn.no_bar():\n# fastai_preds: Any = self.learn.get_preds(dl=test_dl, with_decoded=True)\n# prediction_tensors: Iterable[Any] = fastai_preds[0]\n# prediction_items = []\n# for file, pred in zip(batch, prediction_tensors):\n# arg_max = int(np.array(pred).argmax())\n# predicted_label = vocab[int(arg_max)]\n# confidence = float(np.array(pred).max())\n# prediction_items.append(\n# ImagePredictionItem(file, predicted_label, confidence)\n# )\n# return PredictionBatch(prediction_items)\n\n\nclass OnnxInferenceSession(InferenceSession):\n \"\"\"onnx inference session\"\"\"\n\n def __init__(self, model: Path, vocab: Path):\n \"\"\"Create onnx session\"\"\"\n self.model = model\n self.session = rt.InferenceSession(str(model))\n\n self.vocab = vocab\n self.vocab_mapping = dict(enumerate(self.vocab))\n\n def _load_vocab(self, vocab: Path) -> List:\n with open(vocab, \"r\") as f:\n return [item.strip(\"\\n\") for item in f.readlines()]\n\n def predict_image(self, image: Path):\n \"\"\"Predict a single image\"\"\"\n img = self._load_image(image)\n raw_result = self.session.run([\"output\"], {\"image\": img})\n pred = self._postprocess(raw_result)\n arg_max = int(np.array(pred).argmax())\n predicted_label = self.vocab_mapping[int(arg_max)]\n confidence = float(np.array(pred).max())\n return ImagePredictionItem(image, predicted_label, confidence)\n\n def _preprocess(self, input_data: np.ndarray) -> np.ndarray:\n # converts the input data into the float32 input for onnx\n img_data = input_data.astype(\"float32\")\n\n # normalize\n mean_vec = np.array([0.485, 0.456, 0.406])\n stddev_vec = np.array([0.229, 0.224, 0.225])\n norm_img_data = np.zeros(img_data.shape).astype(\"float32\")\n for i in range(img_data.shape[0]):\n norm_img_data[i, :, :] = (\n img_data[i, :, :] / 255 - mean_vec[i]\n ) / stddev_vec[i]\n\n # add batch channel\n norm_img_data = norm_img_data.reshape(1, 3, 512, 512).astype(\"float32\")\n return norm_img_data\n\n def _load_image(self, file: Path) -> np.ndarray:\n \"\"\"loads image and carries out preprocessing for inference\"\"\"\n image = Image.open(file, mode=\"r\")\n image = image.resize((512, 512), Image.BILINEAR)\n image_data = np.array(image).transpose(2, 0, 1)\n return self._preprocess(image_data)\n\n def _postprocess(self, result: List):\n \"\"\"process results from onnx session\"\"\"\n return softmax(np.array(result)).tolist()\n\n def predict_batch(self, batch: Iterable[Path], bs: int):\n \"\"\"predicts a batch of images\"\"\"\n prediction_items = [self.predict_image(file) for file in batch]\n return PredictionBatch(prediction_items)\n\n\nif __name__ == \"__main__\":\n app()\n", "path": "src/flyswot/inference.py"}]}
| 3,683 | 191 |
gh_patches_debug_7406
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-1191
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Integração do SAPL 3.1 e Portal Modelo
</issue>
<code>
[start of sapl/base/templatetags/common_tags.py]
1 from compressor.utils import get_class
2 from django import template
3
4 from sapl.base.models import AppConfig
5 from sapl.materia.models import DocumentoAcessorio, MateriaLegislativa
6 from sapl.norma.models import NormaJuridica
7 from sapl.parlamentares.models import Filiacao
8
9 register = template.Library()
10
11
12 @register.simple_tag
13 def field_verbose_name(instance, field_name):
14 return instance._meta.get_field(field_name).verbose_name
15
16
17 @register.simple_tag
18 def fieldclass_verbose_name(class_name, field_name):
19 cls = get_class(class_name)
20 return cls._meta.get_field(field_name).verbose_name
21
22
23 @register.simple_tag
24 def model_verbose_name(class_name):
25 model = get_class(class_name)
26 return model._meta.verbose_name
27
28
29 @register.simple_tag
30 def model_verbose_name_plural(class_name):
31 model = get_class(class_name)
32 return model._meta.verbose_name_plural
33
34
35 @register.filter
36 def lookup(d, key):
37 return d[key] if key in d else []
38
39
40 @register.filter
41 def isinst(value, class_str):
42 classe = value.__class__.__name__
43 return classe == class_str
44
45
46 @register.filter
47 def get_add_perm(value, arg):
48 perm = value
49 view = arg
50
51 try:
52 nome_app = view.__class__.model._meta.app_label
53 except AttributeError:
54 return None
55 nome_model = view.__class__.model.__name__.lower()
56 can_add = '.add_' + nome_model
57
58 return perm.__contains__(nome_app + can_add)
59
60
61 @register.filter
62 def get_change_perm(value, arg):
63 perm = value
64 view = arg
65
66 try:
67 nome_app = view.__class__.model._meta.app_label
68 except AttributeError:
69 return None
70 nome_model = view.__class__.model.__name__.lower()
71 can_change = '.change_' + nome_model
72
73 return perm.__contains__(nome_app + can_change)
74
75
76 @register.filter
77 def get_delete_perm(value, arg):
78 perm = value
79 view = arg
80
81 try:
82 nome_app = view.__class__.model._meta.app_label
83 except AttributeError:
84 return None
85 nome_model = view.__class__.model.__name__.lower()
86 can_delete = '.delete_' + nome_model
87
88 return perm.__contains__(nome_app + can_delete)
89
90
91 @register.filter
92 def ultima_filiacao(value):
93 parlamentar = value
94
95 ultima_filiacao = Filiacao.objects.filter(
96 parlamentar=parlamentar).order_by('-data').first()
97
98 if ultima_filiacao:
99 return ultima_filiacao.partido
100 else:
101 return None
102
103
104 @register.filter
105 def get_config_attr(attribute):
106 return AppConfig.attr(attribute)
107
108
109 @register.filter
110 def str2intabs(value):
111 if not isinstance(value, str):
112 return ''
113 try:
114 v = int(value)
115 v = abs(v)
116 return v
117 except:
118 return ''
119
120
121 @register.filter
122 def url(value):
123 if value.startswith('http://') or value.startswith('https://'):
124 return True
125 return False
126
127
128 @register.filter
129 def cronometro_to_seconds(value):
130 if not AppConfig.attr('cronometro_' + value):
131 return 0
132
133 m, s, x = AppConfig.attr(
134 'cronometro_' + value).isoformat().split(':')
135
136 return 60 * int(m) + int(s)
137
138
139 @register.filter
140 def to_list_pk(object_list):
141 return [o.pk for o in object_list]
142
143
144 @register.filter
145 def search_get_model(object):
146 if type(object) == MateriaLegislativa:
147 return 'm'
148 elif type(object) == DocumentoAcessorio:
149 return 'd'
150 elif type(object) == NormaJuridica:
151 return 'n'
152
153 return None
154
[end of sapl/base/templatetags/common_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sapl/base/templatetags/common_tags.py b/sapl/base/templatetags/common_tags.py
--- a/sapl/base/templatetags/common_tags.py
+++ b/sapl/base/templatetags/common_tags.py
@@ -117,6 +117,23 @@
except:
return ''
[email protected]
+def has_iframe(request):
+
+ iframe = request.session.get('iframe', False)
+ if not iframe and 'iframe' in request.GET:
+ ival = request.GET['iframe']
+ if ival and int(ival) == 1:
+ request.session['iframe'] = True
+ return True
+ elif 'iframe' in request.GET:
+ ival = request.GET['iframe']
+ if ival and int(ival) == 0:
+ del request.session['iframe']
+ return False
+
+ return iframe
+
@register.filter
def url(value):
|
{"golden_diff": "diff --git a/sapl/base/templatetags/common_tags.py b/sapl/base/templatetags/common_tags.py\n--- a/sapl/base/templatetags/common_tags.py\n+++ b/sapl/base/templatetags/common_tags.py\n@@ -117,6 +117,23 @@\n except:\n return ''\n \[email protected]\n+def has_iframe(request):\n+\n+ iframe = request.session.get('iframe', False)\n+ if not iframe and 'iframe' in request.GET:\n+ ival = request.GET['iframe']\n+ if ival and int(ival) == 1:\n+ request.session['iframe'] = True\n+ return True\n+ elif 'iframe' in request.GET:\n+ ival = request.GET['iframe']\n+ if ival and int(ival) == 0:\n+ del request.session['iframe']\n+ return False\n+\n+ return iframe\n+\n \n @register.filter\n def url(value):\n", "issue": "Integra\u00e7\u00e3o do SAPL 3.1 e Portal Modelo\n\n", "before_files": [{"content": "from compressor.utils import get_class\nfrom django import template\n\nfrom sapl.base.models import AppConfig\nfrom sapl.materia.models import DocumentoAcessorio, MateriaLegislativa\nfrom sapl.norma.models import NormaJuridica\nfrom sapl.parlamentares.models import Filiacao\n\nregister = template.Library()\n\n\[email protected]_tag\ndef field_verbose_name(instance, field_name):\n return instance._meta.get_field(field_name).verbose_name\n\n\[email protected]_tag\ndef fieldclass_verbose_name(class_name, field_name):\n cls = get_class(class_name)\n return cls._meta.get_field(field_name).verbose_name\n\n\[email protected]_tag\ndef model_verbose_name(class_name):\n model = get_class(class_name)\n return model._meta.verbose_name\n\n\[email protected]_tag\ndef model_verbose_name_plural(class_name):\n model = get_class(class_name)\n return model._meta.verbose_name_plural\n\n\[email protected]\ndef lookup(d, key):\n return d[key] if key in d else []\n\n\[email protected]\ndef isinst(value, class_str):\n classe = value.__class__.__name__\n return classe == class_str\n\n\[email protected]\ndef get_add_perm(value, arg):\n perm = value\n view = arg\n\n try:\n nome_app = view.__class__.model._meta.app_label\n except AttributeError:\n return None\n nome_model = view.__class__.model.__name__.lower()\n can_add = '.add_' + nome_model\n\n return perm.__contains__(nome_app + can_add)\n\n\[email protected]\ndef get_change_perm(value, arg):\n perm = value\n view = arg\n\n try:\n nome_app = view.__class__.model._meta.app_label\n except AttributeError:\n return None\n nome_model = view.__class__.model.__name__.lower()\n can_change = '.change_' + nome_model\n\n return perm.__contains__(nome_app + can_change)\n\n\[email protected]\ndef get_delete_perm(value, arg):\n perm = value\n view = arg\n\n try:\n nome_app = view.__class__.model._meta.app_label\n except AttributeError:\n return None\n nome_model = view.__class__.model.__name__.lower()\n can_delete = '.delete_' + nome_model\n\n return perm.__contains__(nome_app + can_delete)\n\n\[email protected]\ndef ultima_filiacao(value):\n parlamentar = value\n\n ultima_filiacao = Filiacao.objects.filter(\n parlamentar=parlamentar).order_by('-data').first()\n\n if ultima_filiacao:\n return ultima_filiacao.partido\n else:\n return None\n\n\[email protected]\ndef get_config_attr(attribute):\n return AppConfig.attr(attribute)\n\n\[email protected]\ndef str2intabs(value):\n if not isinstance(value, str):\n return ''\n try:\n v = int(value)\n v = abs(v)\n return v\n except:\n return ''\n\n\[email protected]\ndef url(value):\n if value.startswith('http://') or value.startswith('https://'):\n return True\n return False\n\n\[email protected]\ndef cronometro_to_seconds(value):\n if not AppConfig.attr('cronometro_' + value):\n return 0\n\n m, s, x = AppConfig.attr(\n 'cronometro_' + value).isoformat().split(':')\n\n return 60 * int(m) + int(s)\n\n\[email protected]\ndef to_list_pk(object_list):\n return [o.pk for o in object_list]\n\n\[email protected]\ndef search_get_model(object):\n if type(object) == MateriaLegislativa:\n return 'm'\n elif type(object) == DocumentoAcessorio:\n return 'd'\n elif type(object) == NormaJuridica:\n return 'n'\n\n return None\n", "path": "sapl/base/templatetags/common_tags.py"}]}
| 1,778 | 218 |
gh_patches_debug_29041
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-1699
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary ping event
**Environment**:
- CTFd Version/Commit: 3.1.1, latest commit
- Operating System: any
- Web Browser and Version: any
in the comment you said "Immediately yield a ping event to force Response headers to be set", but this event seems to lies inside the while True loop, which results to an unnecessary ping event every 5 seconds.
I believe that's an unintended behavior, though it doesn't break anything.
https://github.com/CTFd/CTFd/blob/4c31dc23e8cfa0308367732d603b16e01871b00e/CTFd/utils/events/__init__.py#L57-L67
</issue>
<code>
[start of CTFd/utils/events/__init__.py]
1 import json
2 from collections import defaultdict
3 from queue import Queue
4
5 from gevent import Timeout, spawn
6 from tenacity import retry, wait_exponential
7
8 from CTFd.cache import cache
9 from CTFd.utils import string_types
10
11
12 class ServerSentEvent(object):
13 def __init__(self, data, type=None, id=None):
14 self.data = data
15 self.type = type
16 self.id = id
17
18 def __str__(self):
19 if isinstance(self.data, string_types):
20 data = self.data
21 else:
22 data = json.dumps(self.data)
23 lines = ["data:{value}".format(value=line) for line in data.splitlines()]
24 if self.type:
25 lines.insert(0, "event:{value}".format(value=self.type))
26 if self.id:
27 lines.append("id:{value}".format(value=self.id))
28 return "\n".join(lines) + "\n\n"
29
30 def to_dict(self):
31 d = {"data": self.data}
32 if self.type:
33 d["type"] = self.type
34 if self.id:
35 d["id"] = self.id
36 return d
37
38
39 class EventManager(object):
40 def __init__(self):
41 self.clients = {}
42
43 def publish(self, data, type=None, channel="ctf"):
44 event = ServerSentEvent(data, type=type)
45 message = event.to_dict()
46 for client in list(self.clients.values()):
47 client[channel].put(message)
48 return len(self.clients)
49
50 def listen(self):
51 pass
52
53 def subscribe(self, channel="ctf"):
54 q = defaultdict(Queue)
55 self.clients[id(q)] = q
56 try:
57 while True:
58 try:
59 # Immediately yield a ping event to force Response headers to be set
60 # or else some reverse proxies will incorrectly buffer SSE
61 yield ServerSentEvent(data="", type="ping")
62
63 with Timeout(5):
64 message = q[channel].get()
65 yield ServerSentEvent(**message)
66 except Timeout:
67 yield ServerSentEvent(data="", type="ping")
68 finally:
69 del self.clients[id(q)]
70 del q
71
72
73 class RedisEventManager(EventManager):
74 def __init__(self):
75 super(EventManager, self).__init__()
76 self.client = cache.cache._write_client
77 self.clients = {}
78
79 def publish(self, data, type=None, channel="ctf"):
80 event = ServerSentEvent(data, type=type)
81 message = json.dumps(event.to_dict())
82 return self.client.publish(message=message, channel=channel)
83
84 def listen(self, channel="ctf"):
85 @retry(wait=wait_exponential(min=1, max=30))
86 def _listen():
87 while True:
88 pubsub = self.client.pubsub()
89 pubsub.subscribe(channel)
90 try:
91 while True:
92 message = pubsub.get_message(
93 ignore_subscribe_messages=True, timeout=5
94 )
95 if message:
96 if message["type"] == "message":
97 event = json.loads(message["data"])
98 for client in list(self.clients.values()):
99 client[channel].put(event)
100 finally:
101 pubsub.close()
102
103 spawn(_listen)
104
105 def subscribe(self, channel="ctf"):
106 q = defaultdict(Queue)
107 self.clients[id(q)] = q
108 try:
109 while True:
110 try:
111 # Immediately yield a ping event to force Response headers to be set
112 # or else some reverse proxies will incorrectly buffer SSE
113 yield ServerSentEvent(data="", type="ping")
114
115 with Timeout(5):
116 message = q[channel].get()
117 yield ServerSentEvent(**message)
118 except Timeout:
119 yield ServerSentEvent(data="", type="ping")
120 finally:
121 del self.clients[id(q)]
122 del q
123
[end of CTFd/utils/events/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/utils/events/__init__.py b/CTFd/utils/events/__init__.py
--- a/CTFd/utils/events/__init__.py
+++ b/CTFd/utils/events/__init__.py
@@ -54,12 +54,11 @@
q = defaultdict(Queue)
self.clients[id(q)] = q
try:
+ # Immediately yield a ping event to force Response headers to be set
+ # or else some reverse proxies will incorrectly buffer SSE
+ yield ServerSentEvent(data="", type="ping")
while True:
try:
- # Immediately yield a ping event to force Response headers to be set
- # or else some reverse proxies will incorrectly buffer SSE
- yield ServerSentEvent(data="", type="ping")
-
with Timeout(5):
message = q[channel].get()
yield ServerSentEvent(**message)
@@ -106,12 +105,11 @@
q = defaultdict(Queue)
self.clients[id(q)] = q
try:
+ # Immediately yield a ping event to force Response headers to be set
+ # or else some reverse proxies will incorrectly buffer SSE
+ yield ServerSentEvent(data="", type="ping")
while True:
try:
- # Immediately yield a ping event to force Response headers to be set
- # or else some reverse proxies will incorrectly buffer SSE
- yield ServerSentEvent(data="", type="ping")
-
with Timeout(5):
message = q[channel].get()
yield ServerSentEvent(**message)
|
{"golden_diff": "diff --git a/CTFd/utils/events/__init__.py b/CTFd/utils/events/__init__.py\n--- a/CTFd/utils/events/__init__.py\n+++ b/CTFd/utils/events/__init__.py\n@@ -54,12 +54,11 @@\n q = defaultdict(Queue)\n self.clients[id(q)] = q\n try:\n+ # Immediately yield a ping event to force Response headers to be set\n+ # or else some reverse proxies will incorrectly buffer SSE\n+ yield ServerSentEvent(data=\"\", type=\"ping\")\n while True:\n try:\n- # Immediately yield a ping event to force Response headers to be set\n- # or else some reverse proxies will incorrectly buffer SSE\n- yield ServerSentEvent(data=\"\", type=\"ping\")\n-\n with Timeout(5):\n message = q[channel].get()\n yield ServerSentEvent(**message)\n@@ -106,12 +105,11 @@\n q = defaultdict(Queue)\n self.clients[id(q)] = q\n try:\n+ # Immediately yield a ping event to force Response headers to be set\n+ # or else some reverse proxies will incorrectly buffer SSE\n+ yield ServerSentEvent(data=\"\", type=\"ping\")\n while True:\n try:\n- # Immediately yield a ping event to force Response headers to be set\n- # or else some reverse proxies will incorrectly buffer SSE\n- yield ServerSentEvent(data=\"\", type=\"ping\")\n-\n with Timeout(5):\n message = q[channel].get()\n yield ServerSentEvent(**message)\n", "issue": "Unnecessary ping event\n**Environment**:\r\n\r\n- CTFd Version/Commit: 3.1.1, latest commit\r\n- Operating System: any\r\n- Web Browser and Version: any\r\n\r\nin the comment you said \"Immediately yield a ping event to force Response headers to be set\", but this event seems to lies inside the while True loop, which results to an unnecessary ping event every 5 seconds.\r\nI believe that's an unintended behavior, though it doesn't break anything.\r\n\r\nhttps://github.com/CTFd/CTFd/blob/4c31dc23e8cfa0308367732d603b16e01871b00e/CTFd/utils/events/__init__.py#L57-L67\n", "before_files": [{"content": "import json\nfrom collections import defaultdict\nfrom queue import Queue\n\nfrom gevent import Timeout, spawn\nfrom tenacity import retry, wait_exponential\n\nfrom CTFd.cache import cache\nfrom CTFd.utils import string_types\n\n\nclass ServerSentEvent(object):\n def __init__(self, data, type=None, id=None):\n self.data = data\n self.type = type\n self.id = id\n\n def __str__(self):\n if isinstance(self.data, string_types):\n data = self.data\n else:\n data = json.dumps(self.data)\n lines = [\"data:{value}\".format(value=line) for line in data.splitlines()]\n if self.type:\n lines.insert(0, \"event:{value}\".format(value=self.type))\n if self.id:\n lines.append(\"id:{value}\".format(value=self.id))\n return \"\\n\".join(lines) + \"\\n\\n\"\n\n def to_dict(self):\n d = {\"data\": self.data}\n if self.type:\n d[\"type\"] = self.type\n if self.id:\n d[\"id\"] = self.id\n return d\n\n\nclass EventManager(object):\n def __init__(self):\n self.clients = {}\n\n def publish(self, data, type=None, channel=\"ctf\"):\n event = ServerSentEvent(data, type=type)\n message = event.to_dict()\n for client in list(self.clients.values()):\n client[channel].put(message)\n return len(self.clients)\n\n def listen(self):\n pass\n\n def subscribe(self, channel=\"ctf\"):\n q = defaultdict(Queue)\n self.clients[id(q)] = q\n try:\n while True:\n try:\n # Immediately yield a ping event to force Response headers to be set\n # or else some reverse proxies will incorrectly buffer SSE\n yield ServerSentEvent(data=\"\", type=\"ping\")\n\n with Timeout(5):\n message = q[channel].get()\n yield ServerSentEvent(**message)\n except Timeout:\n yield ServerSentEvent(data=\"\", type=\"ping\")\n finally:\n del self.clients[id(q)]\n del q\n\n\nclass RedisEventManager(EventManager):\n def __init__(self):\n super(EventManager, self).__init__()\n self.client = cache.cache._write_client\n self.clients = {}\n\n def publish(self, data, type=None, channel=\"ctf\"):\n event = ServerSentEvent(data, type=type)\n message = json.dumps(event.to_dict())\n return self.client.publish(message=message, channel=channel)\n\n def listen(self, channel=\"ctf\"):\n @retry(wait=wait_exponential(min=1, max=30))\n def _listen():\n while True:\n pubsub = self.client.pubsub()\n pubsub.subscribe(channel)\n try:\n while True:\n message = pubsub.get_message(\n ignore_subscribe_messages=True, timeout=5\n )\n if message:\n if message[\"type\"] == \"message\":\n event = json.loads(message[\"data\"])\n for client in list(self.clients.values()):\n client[channel].put(event)\n finally:\n pubsub.close()\n\n spawn(_listen)\n\n def subscribe(self, channel=\"ctf\"):\n q = defaultdict(Queue)\n self.clients[id(q)] = q\n try:\n while True:\n try:\n # Immediately yield a ping event to force Response headers to be set\n # or else some reverse proxies will incorrectly buffer SSE\n yield ServerSentEvent(data=\"\", type=\"ping\")\n\n with Timeout(5):\n message = q[channel].get()\n yield ServerSentEvent(**message)\n except Timeout:\n yield ServerSentEvent(data=\"\", type=\"ping\")\n finally:\n del self.clients[id(q)]\n del q\n", "path": "CTFd/utils/events/__init__.py"}]}
| 1,773 | 340 |
gh_patches_debug_36465
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-464
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Provide a free text field for the "Other" license and remove the CC-noDerives license
</issue>
<code>
[start of ckanext-metadata_fields/ckanext/metadata_fields/plugin.py]
1 '''
2 Created on Apr 10, 2014
3
4 @author:alexandru-m-g
5 '''
6 import logging
7
8 import ckan.plugins as plugins
9 import ckan.plugins.toolkit as tk
10 from routes.mapper import SubMapper
11
12 import ckanext.metadata_fields.custom_validator as vd
13 import ckanext.metadata_fields.update as update
14
15 def list_of_all_groups():
16 groups = tk.get_action('group_list')(data_dict={'all_fields': True})
17 return groups
18
19
20 class HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):
21 plugins.implements(plugins.IConfigurer, inherit=False)
22 plugins.implements(plugins.IRoutes, inherit=True)
23 plugins.implements(plugins.IDatasetForm, inherit=False)
24 plugins.implements(plugins.ITemplateHelpers)
25 plugins.implements(plugins.IActions)
26
27 def update_config(self, config):
28 tk.add_template_directory(config, 'templates')
29
30 def before_map(self, map):
31 with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:
32 m.connect('add dataset', '/dataset/new', action='new')
33 m.connect('/dataset/{action}/{id}',
34 requirements=dict(action='|'.join([
35 'new_metadata',
36 'new_resource',
37 ])))
38 return map
39
40 def is_fallback(self):
41 return True
42
43 def package_types(self):
44 # default - no specific package type
45 return []
46
47 def _modify_package_schema(self, schema):
48
49 schema.update({
50 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required
51 'package_creator': [tk.get_validator('not_empty'),
52 tk.get_converter('convert_to_extras')],
53 'groups_list': [vd.groups_not_empty],
54 'caveats' : [tk.get_validator('ignore_missing'),
55 tk.get_converter('convert_to_extras')],
56 'dataset_source' : [tk.get_validator('not_empty'),
57 tk.get_converter('convert_to_extras')],
58 'dataset_date' : [tk.get_validator('ignore_missing'),
59 tk.get_converter('convert_to_extras')],
60 'methodology' : [tk.get_validator('ignore_missing'),
61 tk.get_converter('convert_to_extras')],
62 })
63
64 return schema
65
66
67 def create_package_schema(self):
68 schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()
69 schema = self._modify_package_schema(schema)
70 return schema
71
72 def update_package_schema(self):
73 schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()
74 schema = self._modify_package_schema(schema)
75 return schema
76
77 def show_package_schema(self):
78 schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()
79 schema.update({
80 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required
81 'package_creator': [tk.get_converter('convert_from_extras'),
82 tk.get_validator('ignore_missing')],
83 'caveats' : [tk.get_converter('convert_from_extras'),
84 tk.get_validator('ignore_missing')],
85 'dataset_source' : [tk.get_converter('convert_from_extras'),
86 tk.get_validator('ignore_missing')],
87 'dataset_date' : [tk.get_converter('convert_from_extras'),
88 tk.get_validator('ignore_missing')],
89 'methodology' : [tk.get_converter('convert_from_extras'),
90 tk.get_validator('ignore_missing')],
91 })
92 return schema
93
94
95 def get_helpers(self):
96 return {'list_of_all_groups': list_of_all_groups}
97
98 def get_actions(self):
99 return {'package_update': update.package_update}
100
101
102
[end of ckanext-metadata_fields/ckanext/metadata_fields/plugin.py]
[start of ckanext-hdx_theme/ckanext/hdx_theme/licenses.py]
1 '''
2 Created on May 12, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 from ckan.common import _
8 from ckan.model.license import DefaultLicense
9
10
11 class LicenseCreativeCommonsIntergovernmentalOrgs(DefaultLicense):
12 # domain_content = True
13 # domain_data = True
14 id = "cc-by-igo"
15 is_okd_compliant = False
16 url = "http://creativecommons.org/licenses/by/3.0/igo/legalcode"
17
18 @property
19 def title(self):
20 return _("Creative Commons Attribution for Intergovernmental Organisations")
21
22 class LicenseCreativeCommonsNoDerives(DefaultLicense):
23 # domain_content = True
24 # domain_data = True
25 id = "cc-by-nd"
26 is_okd_compliant = False
27 url = "http://creativecommons.org/licenses/by-nd/3.0/legalcode"
28
29 @property
30 def title(self):
31 return _("Creative Commons Attribution-NoDerives")
32
33 class LicenseOtherPublicDomainNoRestrictions(DefaultLicense):
34 # domain_content = True
35 id = "other-pd-nr"
36 is_generic = True
37 is_okd_compliant = True
38
39 @property
40 def title(self):
41 return _("Public Domain / No Restrictions")
42
43 class LicenseHdxOther(DefaultLicense):
44 # domain_content = True
45 id = "hdx-other"
46 # is_generic = True
47 # is_okd_compliant = True
48
49 @property
50 def title(self):
51 return _("Other")
52
53
[end of ckanext-hdx_theme/ckanext/hdx_theme/licenses.py]
[start of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
1 import ckanext.hdx_theme.licenses as hdx_licenses
2 from beaker.cache import cache_regions
3
4 import ckan.plugins as plugins
5 import ckan.plugins.toolkit as toolkit
6 import ckan.model.package as package
7 import ckan.model.license as license
8 import version;
9
10 cache_regions.update({
11 'hdx_memory_cache':{
12 'expire': 172800, # 2 days
13 'type':'memory',
14 'key_length': 250
15 }
16 })
17
18 def _generate_license_list():
19 package.Package._license_register = license.LicenseRegister()
20 package.Package._license_register.licenses = [
21 license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),
22 license.License(license.LicenseCreativeCommonsAttribution()),
23 license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
24 license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),
25 license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
26 license.License(hdx_licenses.LicenseHdxOther())
27 ]
28
29 class HDXThemePlugin(plugins.SingletonPlugin):
30 plugins.implements(plugins.IConfigurer)
31 plugins.implements(plugins.IRoutes, inherit=True)
32 plugins.implements(plugins.ITemplateHelpers)
33 plugins.implements(plugins.IActions)
34
35 def update_config(self, config):
36 toolkit.add_template_directory(config, 'templates')
37 toolkit.add_public_directory(config, 'public')
38 toolkit.add_resource('fanstatic', 'hdx_theme')
39
40 def before_map(self, map):
41 map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')
42 map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')
43 map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')
44 map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')
45 map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')
46 map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')
47
48 map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')
49
50 # this is actually a HACK to force the customization of the license list.
51 # the license list should be changed to be based on a JSON rest service
52 _generate_license_list()
53
54 return map
55
56 def get_helpers(self):
57 from ckanext.hdx_theme import helpers as hdx_helpers
58 return {
59 'is_downloadable': hdx_helpers.is_downloadable,
60 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,
61 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,
62 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,
63 'get_last_revision_package':hdx_helpers.get_last_revision_package,
64 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,
65 'get_last_revision_group':hdx_helpers.get_last_revision_group,
66 'get_group_followers':hdx_helpers.get_group_followers,
67 'get_group_members':hdx_helpers.get_group_members,
68 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,
69 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,
70 'hdx_version':hdx_helpers.hdx_version,
71 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,
72 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items
73 }
74
75 def get_actions(self):
76 from ckanext.hdx_theme import actions as hdx_actions
77 return {
78 'organization_list_for_user':hdx_actions.organization_list_for_user
79 }
80
81
82
83
[end of ckanext-hdx_theme/ckanext/hdx_theme/plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py
@@ -40,6 +40,16 @@
def title(self):
return _("Public Domain / No Restrictions")
+class LicenseHdxMultiple(DefaultLicense):
+# domain_content = True
+ id = "hdx-multi"
+# is_generic = True
+# is_okd_compliant = True
+
+ @property
+ def title(self):
+ return _("Multiple Licenses")
+
class LicenseHdxOther(DefaultLicense):
# domain_content = True
id = "hdx-other"
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py
@@ -23,6 +23,7 @@
license.License(license.LicenseCreativeCommonsAttributionShareAlike()),
license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),
license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),
+ license.License(hdx_licenses.LicenseHdxMultiple()),
license.License(hdx_licenses.LicenseHdxOther())
]
diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py
@@ -59,6 +59,8 @@
tk.get_converter('convert_to_extras')],
'methodology' : [tk.get_validator('ignore_missing'),
tk.get_converter('convert_to_extras')],
+ 'license_other' : [tk.get_validator('ignore_missing'),
+ tk.get_converter('convert_to_extras')],
})
return schema
@@ -88,6 +90,8 @@
tk.get_validator('ignore_missing')],
'methodology' : [tk.get_converter('convert_from_extras'),
tk.get_validator('ignore_missing')],
+ 'license_other' : [tk.get_converter('convert_from_extras'),
+ tk.get_validator('ignore_missing')],
})
return schema
|
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/licenses.py\n@@ -40,6 +40,16 @@\n def title(self):\n return _(\"Public Domain / No Restrictions\")\n \n+class LicenseHdxMultiple(DefaultLicense):\n+# domain_content = True\n+ id = \"hdx-multi\"\n+# is_generic = True\n+# is_okd_compliant = True\n+\n+ @property\n+ def title(self):\n+ return _(\"Multiple Licenses\")\n+\n class LicenseHdxOther(DefaultLicense):\n # domain_content = True\n id = \"hdx-other\"\ndiff --git a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/plugin.py\n@@ -23,6 +23,7 @@\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n+ license.License(hdx_licenses.LicenseHdxMultiple()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n \ndiff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n@@ -59,6 +59,8 @@\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n+ 'license_other' : [tk.get_validator('ignore_missing'),\n+ tk.get_converter('convert_to_extras')],\n })\n \n return schema\n@@ -88,6 +90,8 @@\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n+ 'license_other' : [tk.get_converter('convert_from_extras'),\n+ tk.get_validator('ignore_missing')],\n })\n return schema\n", "issue": "Provide a free text field for the \"Other\" license and remove the CC-noDerives license\n\n", "before_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}, {"content": "'''\nCreated on May 12, 2014\n\n@author: alexandru-m-g\n'''\n\nfrom ckan.common import _\nfrom ckan.model.license import DefaultLicense\n\n\nclass LicenseCreativeCommonsIntergovernmentalOrgs(DefaultLicense):\n# domain_content = True\n# domain_data = True\n id = \"cc-by-igo\"\n is_okd_compliant = False\n url = \"http://creativecommons.org/licenses/by/3.0/igo/legalcode\"\n\n @property\n def title(self):\n return _(\"Creative Commons Attribution for Intergovernmental Organisations\")\n \nclass LicenseCreativeCommonsNoDerives(DefaultLicense):\n# domain_content = True\n# domain_data = True\n id = \"cc-by-nd\"\n is_okd_compliant = False\n url = \"http://creativecommons.org/licenses/by-nd/3.0/legalcode\"\n\n @property\n def title(self):\n return _(\"Creative Commons Attribution-NoDerives\")\n \nclass LicenseOtherPublicDomainNoRestrictions(DefaultLicense):\n# domain_content = True\n id = \"other-pd-nr\"\n is_generic = True\n is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Public Domain / No Restrictions\")\n\nclass LicenseHdxOther(DefaultLicense):\n# domain_content = True\n id = \"hdx-other\"\n# is_generic = True\n# is_okd_compliant = True\n\n @property\n def title(self):\n return _(\"Other\")\n\n ", "path": "ckanext-hdx_theme/ckanext/hdx_theme/licenses.py"}, {"content": "import ckanext.hdx_theme.licenses as hdx_licenses\nfrom beaker.cache import cache_regions\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nimport ckan.model.package as package\nimport ckan.model.license as license\nimport version;\n\ncache_regions.update({\n 'hdx_memory_cache':{\n 'expire': 172800, # 2 days\n 'type':'memory',\n 'key_length': 250\n }\n })\n\ndef _generate_license_list():\n package.Package._license_register = license.LicenseRegister() \n package.Package._license_register.licenses = [\n license.License(hdx_licenses.LicenseCreativeCommonsIntergovernmentalOrgs()),\n license.License(license.LicenseCreativeCommonsAttribution()),\n license.License(license.LicenseCreativeCommonsAttributionShareAlike()),\n license.License(hdx_licenses.LicenseCreativeCommonsNoDerives()),\n license.License(hdx_licenses.LicenseOtherPublicDomainNoRestrictions()),\n license.License(hdx_licenses.LicenseHdxOther())\n ]\n\nclass HDXThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'hdx_theme')\n\n def before_map(self, map):\n map.connect('home', '/', controller='ckanext.hdx_theme.splash_page:SplashPageController', action='index')\n map.connect('/count/dataset', controller='ckanext.hdx_theme.count:CountController', action='dataset')\n map.connect('/count/country', controller='ckanext.hdx_theme.count:CountController', action='country')\n map.connect('/count/source', controller='ckanext.hdx_theme.count:CountController', action='source')\n map.connect('/user/logged_in', controller='ckanext.hdx_theme.login:LoginController', action='logged_in')\n map.connect('/contribute', controller='ckanext.hdx_theme.login:LoginController', action='contribute')\n \n map.connect('/count/test', controller='ckanext.hdx_theme.count:CountController', action='test')\n \n # this is actually a HACK to force the customization of the license list.\n # the license list should be changed to be based on a JSON rest service\n _generate_license_list()\n \n return map\n\n def get_helpers(self):\n from ckanext.hdx_theme import helpers as hdx_helpers\n return {\n 'is_downloadable': hdx_helpers.is_downloadable,\n 'get_facet_items_dict':hdx_helpers.get_facet_items_dict,\n 'get_last_modifier_user': hdx_helpers.get_last_modifier_user,\n 'get_filtered_params_list':hdx_helpers.get_filtered_params_list,\n 'get_last_revision_package':hdx_helpers.get_last_revision_package,\n 'get_last_modifier_user':hdx_helpers.get_last_modifier_user,\n 'get_last_revision_group':hdx_helpers.get_last_revision_group,\n 'get_group_followers':hdx_helpers.get_group_followers,\n 'get_group_members':hdx_helpers.get_group_members,\n 'markdown_extract_strip':hdx_helpers.markdown_extract_strip,\n 'render_date_from_concat_str':hdx_helpers.render_date_from_concat_str,\n 'hdx_version':hdx_helpers.hdx_version,\n 'hdx_build_nav_icon_with_message':hdx_helpers.hdx_build_nav_icon_with_message,\n 'hdx_num_of_new_related_items':hdx_helpers.hdx_num_of_new_related_items\n }\n \n def get_actions(self):\n from ckanext.hdx_theme import actions as hdx_actions\n return {\n 'organization_list_for_user':hdx_actions.organization_list_for_user\n }\n \n \n\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/plugin.py"}]}
| 3,071 | 588 |
gh_patches_debug_2780
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-40614
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
asa_config Python3 Compatibility Issue for "backup"
<!---
Verify first that your issue/request is not already reported on GitHub.
THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.
Also test if the latest release, and devel branch are affected too.
ALWAYS add information AFTER (OUTSIDE) these html comments.
Otherwise it may end up being automatically closed by our bot. -->
##### SUMMARY
"backup" in asa_config fails on Python 3.6.3 with Ansible 2.5.2. Same issue as [36717](https://github.com/ansible/ansible/issues/36717) but for asa_config.
Changing line 58 of asa_config.py from:` for key in result.keys()`
To either: `for key in result.copy().keys():`
Or: `for key in list(result)`
Should sort this out for py2 or py3.
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
<!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature.
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path-->
asa_config
##### ANSIBLE VERSION
<!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below -->
```
ansible 2.5.2
config file = /home/ignw/my_network_as_code/ansible.cfg
configured module search path = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules']
ansible python module location = /usr/local/lib/python3.6/dist-packages/ansible
executable location = /usr/local/bin/ansible
python version = 3.6.3 (default, Oct 3 2017, 21:45:48) [GCC 7.2.0]
```
##### CONFIGURATION
<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).-->
DEFAULT_ACTION_PLUGIN_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/plug
DEFAULT_HOST_LIST(/home/ignw/my_network_as_code/ansible.cfg) = ['/home/ignw/my_network_as_code/inventory']
DEFAULT_MODULE_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules']
HOST_KEY_CHECKING(/home/ignw/my_network_as_code/ansible.cfg) = False
RETRY_FILES_ENABLED(/home/ignw/my_network_as_code/ansible.cfg) = False
##### OS / ENVIRONMENT
<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.-->
Distributor ID: Ubuntu
Description: Ubuntu 17.10
Release: 17.10
Codename: artful
Network device (Cisco ASAv):
Cisco Adaptive Security Appliance Software Version 9.9(2)
Firepower Extensible Operating System Version 2.3(1.84)
Device Manager Version 7.9(2)
Compiled on Sun 25-Mar-18 17:34 PDT by builders
System image file is "boot:/asa992-smp-k8.bin"
Config file at boot was "startup-config"
Hardware: ASAv, 1024 MB RAM, CPU Clarkdale 2300 MHz,
Model Id: ASAv5
Internal ATA Compact Flash, 1024MB
Slot 1: ATA Compact Flash, 8192MB
BIOS Flash Firmware Hub @ 0x0, 0KB
##### STEPS TO REPRODUCE
<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used. -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: Backup Cisco ASA Configurations
connection: local
hosts: cisco-asa
gather_facts: no
vars:
creds:
host: "{{ ansible_host }}"
username: "{{ username }}"
password: "{{ username }}"
authorize: yes
auth_pass: "{{ enable_password }}"
tags: asa
tasks:
- asa_config:
provider: "{{ creds }}"
backup: yes
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
Backup of configuration to be placed in backup directory
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes below -->
```
<10.0.0.8> <10.0.0.8> ssh connection has completed successfully
<10.0.0.8> connection to remote device started successfully
<10.0.0.8> local domain socket listeners started successfully
<10.0.0.8>
<10.0.0.8> local domain socket path is /home/ignw/.ansible/pc/8617761c70
<10.0.0.8> socket_path: /home/ignw/.ansible/pc/8617761c70
Using module file /usr/local/lib/python3.6/dist-packages/ansible/modules/network/asa/asa_config.py
<10.0.0.8> ESTABLISH LOCAL CONNECTION FOR USER: ignw
<10.0.0.8> EXEC /bin/sh -c 'echo ~ && sleep 0'
<10.0.0.8> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `" && echo ansible-tmp-1526941893.6014657-134187020317411="` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `" ) && sleep 0'
<10.0.0.8> PUT /home/ignw/.ansible/tmp/ansible-local-24856l3y7x_n7/tmpq9jw7ue_ TO /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py
<10.0.0.8> EXEC /bin/sh -c 'chmod u+x /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0'
<10.0.0.8> EXEC /bin/sh -c '/usr/bin/python3 /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0'
<10.0.0.8> EXEC /bin/sh -c 'rm -f -r /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ > /dev/null 2>&1 && sleep 0'
The full traceback is:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py", line 138, in run
res = self._execute()
File "/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py", line 558, in _execute
result = self._handler.run(task_vars=variables)
File "/usr/local/lib/python3.6/dist-packages/ansible/plugins/action/asa_config.py", line 58, in run
for key in result.keys().copy():
AttributeError: 'dict_keys' object has no attribute 'copy'
fatal: [acme-sea-asa1]: FAILED! => {
"msg": "Unexpected failure during module execution.",
"stdout": ""
}
PLAY RECAP *************************************************************************************************************************
acme-sea-asa1 : ok=0 changed=0 unreachable=0 failed=1
```
</issue>
<code>
[start of lib/ansible/plugins/action/asa_config.py]
1 #
2 # (c) 2017, Red Hat, Inc.
3 #
4 # This file is part of Ansible
5 #
6 # Ansible is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # Ansible is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
18 #
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 import os
23 import re
24 import time
25 import glob
26
27 from ansible.plugins.action.asa import ActionModule as _ActionModule
28 from ansible.module_utils._text import to_text
29 from ansible.module_utils.six.moves.urllib.parse import urlsplit
30 from ansible.utils.vars import merge_hash
31
32 PRIVATE_KEYS_RE = re.compile('__.+__')
33
34
35 class ActionModule(_ActionModule):
36
37 def run(self, tmp=None, task_vars=None):
38
39 if self._task.args.get('src'):
40 try:
41 self._handle_template()
42 except ValueError as exc:
43 return dict(failed=True, msg=exc.message)
44
45 result = super(ActionModule, self).run(tmp, task_vars)
46 del tmp # tmp no longer has any effect
47
48 if self._task.args.get('backup') and result.get('__backup__'):
49 # User requested backup and no error occurred in module.
50 # NOTE: If there is a parameter error, _backup key may not be in results.
51 filepath = self._write_backup(task_vars['inventory_hostname'],
52 result['__backup__'])
53
54 result['backup_path'] = filepath
55
56 # strip out any keys that have two leading and two trailing
57 # underscore characters
58 for key in result.keys():
59 if PRIVATE_KEYS_RE.match(key):
60 del result[key]
61
62 return result
63
64 def _get_working_path(self):
65 cwd = self._loader.get_basedir()
66 if self._task._role is not None:
67 cwd = self._task._role._role_path
68 return cwd
69
70 def _write_backup(self, host, contents):
71 backup_path = self._get_working_path() + '/backup'
72 if not os.path.exists(backup_path):
73 os.mkdir(backup_path)
74 for fn in glob.glob('%s/%s*' % (backup_path, host)):
75 os.remove(fn)
76 tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
77 filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
78 open(filename, 'w').write(contents)
79 return filename
80
81 def _handle_template(self):
82 src = self._task.args.get('src')
83 working_path = self._get_working_path()
84
85 if os.path.isabs(src) or urlsplit('src').scheme:
86 source = src
87 else:
88 source = self._loader.path_dwim_relative(working_path, 'templates', src)
89 if not source:
90 source = self._loader.path_dwim_relative(working_path, src)
91
92 if not os.path.exists(source):
93 raise ValueError('path specified in src not found')
94
95 try:
96 with open(source, 'r') as f:
97 template_data = to_text(f.read())
98 except IOError:
99 return dict(failed=True, msg='unable to load src file')
100
101 # Create a template search path in the following order:
102 # [working_path, self_role_path, dependent_role_paths, dirname(source)]
103 searchpath = [working_path]
104 if self._task._role is not None:
105 searchpath.append(self._task._role._role_path)
106 if hasattr(self._task, "_block:"):
107 dep_chain = self._task._block.get_dep_chain()
108 if dep_chain is not None:
109 for role in dep_chain:
110 searchpath.append(role._role_path)
111 searchpath.append(os.path.dirname(source))
112 self._templar.environment.loader.searchpath = searchpath
113 self._task.args['src'] = self._templar.template(template_data)
114
[end of lib/ansible/plugins/action/asa_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/plugins/action/asa_config.py b/lib/ansible/plugins/action/asa_config.py
--- a/lib/ansible/plugins/action/asa_config.py
+++ b/lib/ansible/plugins/action/asa_config.py
@@ -55,7 +55,7 @@
# strip out any keys that have two leading and two trailing
# underscore characters
- for key in result.keys():
+ for key in list(result):
if PRIVATE_KEYS_RE.match(key):
del result[key]
|
{"golden_diff": "diff --git a/lib/ansible/plugins/action/asa_config.py b/lib/ansible/plugins/action/asa_config.py\n--- a/lib/ansible/plugins/action/asa_config.py\n+++ b/lib/ansible/plugins/action/asa_config.py\n@@ -55,7 +55,7 @@\n \n # strip out any keys that have two leading and two trailing\n # underscore characters\n- for key in result.keys():\n+ for key in list(result):\n if PRIVATE_KEYS_RE.match(key):\n del result[key]\n", "issue": "asa_config Python3 Compatibility Issue for \"backup\"\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nTHIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.\r\nAlso test if the latest release, and devel branch are affected too.\r\nALWAYS add information AFTER (OUTSIDE) these html comments.\r\nOtherwise it may end up being automatically closed by our bot. -->\r\n\r\n##### SUMMARY\r\n\"backup\" in asa_config fails on Python 3.6.3 with Ansible 2.5.2. Same issue as [36717](https://github.com/ansible/ansible/issues/36717) but for asa_config.\r\n\r\nChanging line 58 of asa_config.py from:` for key in result.keys()`\r\nTo either: `for key in result.copy().keys():`\r\nOr: `for key in list(result)`\r\n\r\nShould sort this out for py2 or py3.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature.\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path-->\r\nasa_config\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.5.2\r\n config file = /home/ignw/my_network_as_code/ansible.cfg\r\n configured module search path = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules']\r\n ansible python module location = /usr/local/lib/python3.6/dist-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.6.3 (default, Oct 3 2017, 21:45:48) [GCC 7.2.0]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).-->\r\nDEFAULT_ACTION_PLUGIN_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/plug\r\nDEFAULT_HOST_LIST(/home/ignw/my_network_as_code/ansible.cfg) = ['/home/ignw/my_network_as_code/inventory']\r\nDEFAULT_MODULE_PATH(/home/ignw/my_network_as_code/ansible.cfg) = ['/usr/local/lib/python3.6/dist-packages/napalm_ansible/modules']\r\nHOST_KEY_CHECKING(/home/ignw/my_network_as_code/ansible.cfg) = False\r\nRETRY_FILES_ENABLED(/home/ignw/my_network_as_code/ansible.cfg) = False\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.-->\r\nDistributor ID:\tUbuntu\r\nDescription:\tUbuntu 17.10\r\nRelease:\t17.10\r\nCodename:\tartful\r\n\r\nNetwork device (Cisco ASAv):\r\nCisco Adaptive Security Appliance Software Version 9.9(2)\r\nFirepower Extensible Operating System Version 2.3(1.84)\r\nDevice Manager Version 7.9(2)\r\n\r\nCompiled on Sun 25-Mar-18 17:34 PDT by builders\r\nSystem image file is \"boot:/asa992-smp-k8.bin\"\r\nConfig file at boot was \"startup-config\"\r\n\r\nHardware: ASAv, 1024 MB RAM, CPU Clarkdale 2300 MHz,\r\nModel Id: ASAv5\r\nInternal ATA Compact Flash, 1024MB\r\nSlot 1: ATA Compact Flash, 8192MB\r\nBIOS Flash Firmware Hub @ 0x0, 0KB\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: Backup Cisco ASA Configurations\r\n connection: local\r\n hosts: cisco-asa\r\n gather_facts: no\r\n vars:\r\n creds:\r\n host: \"{{ ansible_host }}\"\r\n username: \"{{ username }}\"\r\n password: \"{{ username }}\"\r\n authorize: yes\r\n auth_pass: \"{{ enable_password }}\"\r\n tags: asa\r\n tasks:\r\n - asa_config:\r\n provider: \"{{ creds }}\"\r\n backup: yes\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nBackup of configuration to be placed in backup directory\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\n<10.0.0.8> <10.0.0.8> ssh connection has completed successfully\r\n<10.0.0.8> connection to remote device started successfully\r\n<10.0.0.8> local domain socket listeners started successfully\r\n<10.0.0.8>\r\n<10.0.0.8> local domain socket path is /home/ignw/.ansible/pc/8617761c70\r\n<10.0.0.8> socket_path: /home/ignw/.ansible/pc/8617761c70\r\nUsing module file /usr/local/lib/python3.6/dist-packages/ansible/modules/network/asa/asa_config.py\r\n<10.0.0.8> ESTABLISH LOCAL CONNECTION FOR USER: ignw\r\n<10.0.0.8> EXEC /bin/sh -c 'echo ~ && sleep 0'\r\n<10.0.0.8> EXEC /bin/sh -c '( umask 77 && mkdir -p \"` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `\" && echo ansible-tmp-1526941893.6014657-134187020317411=\"` echo /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411 `\" ) && sleep 0'\r\n<10.0.0.8> PUT /home/ignw/.ansible/tmp/ansible-local-24856l3y7x_n7/tmpq9jw7ue_ TO /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py\r\n<10.0.0.8> EXEC /bin/sh -c 'chmod u+x /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0'\r\n<10.0.0.8> EXEC /bin/sh -c '/usr/bin/python3 /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/asa_config.py && sleep 0'\r\n<10.0.0.8> EXEC /bin/sh -c 'rm -f -r /home/ignw/.ansible/tmp/ansible-tmp-1526941893.6014657-134187020317411/ > /dev/null 2>&1 && sleep 0'\r\nThe full traceback is:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py\", line 138, in run\r\n res = self._execute()\r\n File \"/usr/local/lib/python3.6/dist-packages/ansible/executor/task_executor.py\", line 558, in _execute\r\n result = self._handler.run(task_vars=variables)\r\n File \"/usr/local/lib/python3.6/dist-packages/ansible/plugins/action/asa_config.py\", line 58, in run\r\n for key in result.keys().copy():\r\nAttributeError: 'dict_keys' object has no attribute 'copy'\r\n\r\nfatal: [acme-sea-asa1]: FAILED! => {\r\n \"msg\": \"Unexpected failure during module execution.\",\r\n \"stdout\": \"\"\r\n}\r\n\r\nPLAY RECAP *************************************************************************************************************************\r\nacme-sea-asa1 : ok=0 changed=0 unreachable=0 failed=1\r\n```\r\n\n", "before_files": [{"content": "#\n# (c) 2017, Red Hat, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\nimport re\nimport time\nimport glob\n\nfrom ansible.plugins.action.asa import ActionModule as _ActionModule\nfrom ansible.module_utils._text import to_text\nfrom ansible.module_utils.six.moves.urllib.parse import urlsplit\nfrom ansible.utils.vars import merge_hash\n\nPRIVATE_KEYS_RE = re.compile('__.+__')\n\n\nclass ActionModule(_ActionModule):\n\n def run(self, tmp=None, task_vars=None):\n\n if self._task.args.get('src'):\n try:\n self._handle_template()\n except ValueError as exc:\n return dict(failed=True, msg=exc.message)\n\n result = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n if self._task.args.get('backup') and result.get('__backup__'):\n # User requested backup and no error occurred in module.\n # NOTE: If there is a parameter error, _backup key may not be in results.\n filepath = self._write_backup(task_vars['inventory_hostname'],\n result['__backup__'])\n\n result['backup_path'] = filepath\n\n # strip out any keys that have two leading and two trailing\n # underscore characters\n for key in result.keys():\n if PRIVATE_KEYS_RE.match(key):\n del result[key]\n\n return result\n\n def _get_working_path(self):\n cwd = self._loader.get_basedir()\n if self._task._role is not None:\n cwd = self._task._role._role_path\n return cwd\n\n def _write_backup(self, host, contents):\n backup_path = self._get_working_path() + '/backup'\n if not os.path.exists(backup_path):\n os.mkdir(backup_path)\n for fn in glob.glob('%s/%s*' % (backup_path, host)):\n os.remove(fn)\n tstamp = time.strftime(\"%Y-%m-%d@%H:%M:%S\", time.localtime(time.time()))\n filename = '%s/%s_config.%s' % (backup_path, host, tstamp)\n open(filename, 'w').write(contents)\n return filename\n\n def _handle_template(self):\n src = self._task.args.get('src')\n working_path = self._get_working_path()\n\n if os.path.isabs(src) or urlsplit('src').scheme:\n source = src\n else:\n source = self._loader.path_dwim_relative(working_path, 'templates', src)\n if not source:\n source = self._loader.path_dwim_relative(working_path, src)\n\n if not os.path.exists(source):\n raise ValueError('path specified in src not found')\n\n try:\n with open(source, 'r') as f:\n template_data = to_text(f.read())\n except IOError:\n return dict(failed=True, msg='unable to load src file')\n\n # Create a template search path in the following order:\n # [working_path, self_role_path, dependent_role_paths, dirname(source)]\n searchpath = [working_path]\n if self._task._role is not None:\n searchpath.append(self._task._role._role_path)\n if hasattr(self._task, \"_block:\"):\n dep_chain = self._task._block.get_dep_chain()\n if dep_chain is not None:\n for role in dep_chain:\n searchpath.append(role._role_path)\n searchpath.append(os.path.dirname(source))\n self._templar.environment.loader.searchpath = searchpath\n self._task.args['src'] = self._templar.template(template_data)\n", "path": "lib/ansible/plugins/action/asa_config.py"}]}
| 3,830 | 109 |
gh_patches_debug_6853
|
rasdani/github-patches
|
git_diff
|
encode__httpx-2355
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multipart doesn't support tuple data value
This works:
```python
client.post(
url,
data={"foo": ("1", "2")}, # tuple
)
```
This works:
```python
client.post(
url,
data={"foo": ["1", "2"]}, # list
files={"test": b"test"},
)
```
This fails:
```python
client.post(
url,
data={"foo": ("1", "2")}, # tuple
files={"test": b"test"},
)
```
<details>
<summary>Traceback</summary>
```
File "httpx/_client.py", line 356, in build_request
return Request(
File "httpx/_models.py", line 336, in __init__
headers, stream = encode_request(content, data, files, json)
File "httpx/_content.py", line 210, in encode_request
return encode_multipart_data(data or {}, files, boundary)
File "httpx/_content.py", line 155, in encode_multipart_data
multipart = MultipartStream(data=data, files=files, boundary=boundary)
File "httpx/_multipart.py", line 188, in __init__
self.fields = list(self._iter_fields(data, files))
File "httpx/_multipart.py", line 198, in _iter_fields
yield DataField(name=name, value=value)
File "httpx/_multipart.py", line 36, in __init__
raise TypeError(
TypeError: Invalid type for value. Expected primitive type, got <class 'tuple'>: ('1', '2')
```
</details>
I guess this line:
https://github.com/encode/httpx/blob/93de1980fa77f15c6b23cbaf2422c0a812caf243/httpx/_multipart.py#L194
should be implemented in the same way as this line:
https://github.com/encode/httpx/blob/93de1980fa77f15c6b23cbaf2422c0a812caf243/httpx/_content.py#L141
</issue>
<code>
[start of httpx/_multipart.py]
1 import binascii
2 import io
3 import os
4 import typing
5 from pathlib import Path
6
7 from ._types import (
8 AsyncByteStream,
9 FileContent,
10 FileTypes,
11 RequestFiles,
12 SyncByteStream,
13 )
14 from ._utils import (
15 format_form_param,
16 guess_content_type,
17 peek_filelike_length,
18 primitive_value_to_str,
19 to_bytes,
20 )
21
22
23 def get_multipart_boundary_from_content_type(
24 content_type: typing.Optional[bytes],
25 ) -> typing.Optional[bytes]:
26 if not content_type or not content_type.startswith(b"multipart/form-data"):
27 return None
28 # parse boundary according to
29 # https://www.rfc-editor.org/rfc/rfc2046#section-5.1.1
30 if b";" in content_type:
31 for section in content_type.split(b";"):
32 if section.strip().lower().startswith(b"boundary="):
33 return section.strip()[len(b"boundary=") :].strip(b'"')
34 return None
35
36
37 class DataField:
38 """
39 A single form field item, within a multipart form field.
40 """
41
42 def __init__(
43 self, name: str, value: typing.Union[str, bytes, int, float, None]
44 ) -> None:
45 if not isinstance(name, str):
46 raise TypeError(
47 f"Invalid type for name. Expected str, got {type(name)}: {name!r}"
48 )
49 if value is not None and not isinstance(value, (str, bytes, int, float)):
50 raise TypeError(
51 f"Invalid type for value. Expected primitive type, got {type(value)}: {value!r}"
52 )
53 self.name = name
54 self.value: typing.Union[str, bytes] = (
55 value if isinstance(value, bytes) else primitive_value_to_str(value)
56 )
57
58 def render_headers(self) -> bytes:
59 if not hasattr(self, "_headers"):
60 name = format_form_param("name", self.name)
61 self._headers = b"".join(
62 [b"Content-Disposition: form-data; ", name, b"\r\n\r\n"]
63 )
64
65 return self._headers
66
67 def render_data(self) -> bytes:
68 if not hasattr(self, "_data"):
69 self._data = to_bytes(self.value)
70
71 return self._data
72
73 def get_length(self) -> int:
74 headers = self.render_headers()
75 data = self.render_data()
76 return len(headers) + len(data)
77
78 def render(self) -> typing.Iterator[bytes]:
79 yield self.render_headers()
80 yield self.render_data()
81
82
83 class FileField:
84 """
85 A single file field item, within a multipart form field.
86 """
87
88 CHUNK_SIZE = 64 * 1024
89
90 def __init__(self, name: str, value: FileTypes) -> None:
91 self.name = name
92
93 fileobj: FileContent
94
95 headers: typing.Dict[str, str] = {}
96 content_type: typing.Optional[str] = None
97
98 # This large tuple based API largely mirror's requests' API
99 # It would be good to think of better APIs for this that we could include in httpx 2.0
100 # since variable length tuples (especially of 4 elements) are quite unwieldly
101 if isinstance(value, tuple):
102 if len(value) == 2:
103 # neither the 3rd parameter (content_type) nor the 4th (headers) was included
104 filename, fileobj = value # type: ignore
105 elif len(value) == 3:
106 filename, fileobj, content_type = value # type: ignore
107 else:
108 # all 4 parameters included
109 filename, fileobj, content_type, headers = value # type: ignore
110 else:
111 filename = Path(str(getattr(value, "name", "upload"))).name
112 fileobj = value
113
114 if content_type is None:
115 content_type = guess_content_type(filename)
116
117 has_content_type_header = any("content-type" in key.lower() for key in headers)
118 if content_type is not None and not has_content_type_header:
119 # note that unlike requests, we ignore the content_type
120 # provided in the 3rd tuple element if it is also included in the headers
121 # requests does the opposite (it overwrites the header with the 3rd tuple element)
122 headers["Content-Type"] = content_type
123
124 if isinstance(fileobj, (str, io.StringIO)):
125 raise TypeError(f"Expected bytes or bytes-like object got: {type(fileobj)}")
126
127 self.filename = filename
128 self.file = fileobj
129 self.headers = headers
130
131 def get_length(self) -> int:
132 headers = self.render_headers()
133
134 if isinstance(self.file, (str, bytes)):
135 return len(headers) + len(to_bytes(self.file))
136
137 # Let's do our best not to read `file` into memory.
138 file_length = peek_filelike_length(self.file)
139 if file_length is None:
140 # As a last resort, read file and cache contents for later.
141 assert not hasattr(self, "_data")
142 self._data = to_bytes(self.file.read())
143 file_length = len(self._data)
144
145 return len(headers) + file_length
146
147 def render_headers(self) -> bytes:
148 if not hasattr(self, "_headers"):
149 parts = [
150 b"Content-Disposition: form-data; ",
151 format_form_param("name", self.name),
152 ]
153 if self.filename:
154 filename = format_form_param("filename", self.filename)
155 parts.extend([b"; ", filename])
156 for header_name, header_value in self.headers.items():
157 key, val = f"\r\n{header_name}: ".encode(), header_value.encode()
158 parts.extend([key, val])
159 parts.append(b"\r\n\r\n")
160 self._headers = b"".join(parts)
161
162 return self._headers
163
164 def render_data(self) -> typing.Iterator[bytes]:
165 if isinstance(self.file, (str, bytes)):
166 yield to_bytes(self.file)
167 return
168
169 if hasattr(self, "_data"):
170 # Already rendered.
171 yield self._data
172 return
173
174 if hasattr(self.file, "seek"):
175 self.file.seek(0)
176
177 chunk = self.file.read(self.CHUNK_SIZE)
178 while chunk:
179 yield to_bytes(chunk)
180 chunk = self.file.read(self.CHUNK_SIZE)
181
182 def render(self) -> typing.Iterator[bytes]:
183 yield self.render_headers()
184 yield from self.render_data()
185
186
187 class MultipartStream(SyncByteStream, AsyncByteStream):
188 """
189 Request content as streaming multipart encoded form data.
190 """
191
192 def __init__(
193 self, data: dict, files: RequestFiles, boundary: typing.Optional[bytes] = None
194 ) -> None:
195 if boundary is None:
196 boundary = binascii.hexlify(os.urandom(16))
197
198 self.boundary = boundary
199 self.content_type = "multipart/form-data; boundary=%s" % boundary.decode(
200 "ascii"
201 )
202 self.fields = list(self._iter_fields(data, files))
203
204 def _iter_fields(
205 self, data: dict, files: RequestFiles
206 ) -> typing.Iterator[typing.Union[FileField, DataField]]:
207 for name, value in data.items():
208 if isinstance(value, list):
209 for item in value:
210 yield DataField(name=name, value=item)
211 else:
212 yield DataField(name=name, value=value)
213
214 file_items = files.items() if isinstance(files, typing.Mapping) else files
215 for name, value in file_items:
216 yield FileField(name=name, value=value)
217
218 def iter_chunks(self) -> typing.Iterator[bytes]:
219 for field in self.fields:
220 yield b"--%s\r\n" % self.boundary
221 yield from field.render()
222 yield b"\r\n"
223 yield b"--%s--\r\n" % self.boundary
224
225 def iter_chunks_lengths(self) -> typing.Iterator[int]:
226 boundary_length = len(self.boundary)
227 # Follow closely what `.iter_chunks()` does.
228 for field in self.fields:
229 yield 2 + boundary_length + 2
230 yield field.get_length()
231 yield 2
232 yield 2 + boundary_length + 4
233
234 def get_content_length(self) -> int:
235 return sum(self.iter_chunks_lengths())
236
237 # Content stream interface.
238
239 def get_headers(self) -> typing.Dict[str, str]:
240 content_length = str(self.get_content_length())
241 content_type = self.content_type
242 return {"Content-Length": content_length, "Content-Type": content_type}
243
244 def __iter__(self) -> typing.Iterator[bytes]:
245 for chunk in self.iter_chunks():
246 yield chunk
247
248 async def __aiter__(self) -> typing.AsyncIterator[bytes]:
249 for chunk in self.iter_chunks():
250 yield chunk
251
[end of httpx/_multipart.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/httpx/_multipart.py b/httpx/_multipart.py
--- a/httpx/_multipart.py
+++ b/httpx/_multipart.py
@@ -205,7 +205,7 @@
self, data: dict, files: RequestFiles
) -> typing.Iterator[typing.Union[FileField, DataField]]:
for name, value in data.items():
- if isinstance(value, list):
+ if isinstance(value, (tuple, list)):
for item in value:
yield DataField(name=name, value=item)
else:
|
{"golden_diff": "diff --git a/httpx/_multipart.py b/httpx/_multipart.py\n--- a/httpx/_multipart.py\n+++ b/httpx/_multipart.py\n@@ -205,7 +205,7 @@\n self, data: dict, files: RequestFiles\n ) -> typing.Iterator[typing.Union[FileField, DataField]]:\n for name, value in data.items():\n- if isinstance(value, list):\n+ if isinstance(value, (tuple, list)):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n", "issue": "Multipart doesn't support tuple data value\nThis works:\r\n\r\n```python\r\nclient.post(\r\n url,\r\n data={\"foo\": (\"1\", \"2\")}, # tuple\r\n)\r\n```\r\n\r\nThis works:\r\n\r\n```python\r\nclient.post(\r\n url,\r\n data={\"foo\": [\"1\", \"2\"]}, # list\r\n files={\"test\": b\"test\"},\r\n)\r\n```\r\n\r\nThis fails:\r\n\r\n```python\r\nclient.post(\r\n url,\r\n data={\"foo\": (\"1\", \"2\")}, # tuple\r\n files={\"test\": b\"test\"},\r\n)\r\n```\r\n\r\n<details>\r\n<summary>Traceback</summary>\r\n\r\n```\r\n File \"httpx/_client.py\", line 356, in build_request\r\n return Request(\r\n File \"httpx/_models.py\", line 336, in __init__\r\n headers, stream = encode_request(content, data, files, json)\r\n File \"httpx/_content.py\", line 210, in encode_request\r\n return encode_multipart_data(data or {}, files, boundary)\r\n File \"httpx/_content.py\", line 155, in encode_multipart_data\r\n multipart = MultipartStream(data=data, files=files, boundary=boundary)\r\n File \"httpx/_multipart.py\", line 188, in __init__\r\n self.fields = list(self._iter_fields(data, files))\r\n File \"httpx/_multipart.py\", line 198, in _iter_fields\r\n yield DataField(name=name, value=value)\r\n File \"httpx/_multipart.py\", line 36, in __init__\r\n raise TypeError(\r\nTypeError: Invalid type for value. Expected primitive type, got <class 'tuple'>: ('1', '2')\r\n```\r\n\r\n</details>\r\n\r\nI guess this line:\r\n\r\nhttps://github.com/encode/httpx/blob/93de1980fa77f15c6b23cbaf2422c0a812caf243/httpx/_multipart.py#L194\r\n\r\nshould be implemented in the same way as this line:\r\n\r\nhttps://github.com/encode/httpx/blob/93de1980fa77f15c6b23cbaf2422c0a812caf243/httpx/_content.py#L141\n", "before_files": [{"content": "import binascii\nimport io\nimport os\nimport typing\nfrom pathlib import Path\n\nfrom ._types import (\n AsyncByteStream,\n FileContent,\n FileTypes,\n RequestFiles,\n SyncByteStream,\n)\nfrom ._utils import (\n format_form_param,\n guess_content_type,\n peek_filelike_length,\n primitive_value_to_str,\n to_bytes,\n)\n\n\ndef get_multipart_boundary_from_content_type(\n content_type: typing.Optional[bytes],\n) -> typing.Optional[bytes]:\n if not content_type or not content_type.startswith(b\"multipart/form-data\"):\n return None\n # parse boundary according to\n # https://www.rfc-editor.org/rfc/rfc2046#section-5.1.1\n if b\";\" in content_type:\n for section in content_type.split(b\";\"):\n if section.strip().lower().startswith(b\"boundary=\"):\n return section.strip()[len(b\"boundary=\") :].strip(b'\"')\n return None\n\n\nclass DataField:\n \"\"\"\n A single form field item, within a multipart form field.\n \"\"\"\n\n def __init__(\n self, name: str, value: typing.Union[str, bytes, int, float, None]\n ) -> None:\n if not isinstance(name, str):\n raise TypeError(\n f\"Invalid type for name. Expected str, got {type(name)}: {name!r}\"\n )\n if value is not None and not isinstance(value, (str, bytes, int, float)):\n raise TypeError(\n f\"Invalid type for value. Expected primitive type, got {type(value)}: {value!r}\"\n )\n self.name = name\n self.value: typing.Union[str, bytes] = (\n value if isinstance(value, bytes) else primitive_value_to_str(value)\n )\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n name = format_form_param(\"name\", self.name)\n self._headers = b\"\".join(\n [b\"Content-Disposition: form-data; \", name, b\"\\r\\n\\r\\n\"]\n )\n\n return self._headers\n\n def render_data(self) -> bytes:\n if not hasattr(self, \"_data\"):\n self._data = to_bytes(self.value)\n\n return self._data\n\n def get_length(self) -> int:\n headers = self.render_headers()\n data = self.render_data()\n return len(headers) + len(data)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield self.render_data()\n\n\nclass FileField:\n \"\"\"\n A single file field item, within a multipart form field.\n \"\"\"\n\n CHUNK_SIZE = 64 * 1024\n\n def __init__(self, name: str, value: FileTypes) -> None:\n self.name = name\n\n fileobj: FileContent\n\n headers: typing.Dict[str, str] = {}\n content_type: typing.Optional[str] = None\n\n # This large tuple based API largely mirror's requests' API\n # It would be good to think of better APIs for this that we could include in httpx 2.0\n # since variable length tuples (especially of 4 elements) are quite unwieldly\n if isinstance(value, tuple):\n if len(value) == 2:\n # neither the 3rd parameter (content_type) nor the 4th (headers) was included\n filename, fileobj = value # type: ignore\n elif len(value) == 3:\n filename, fileobj, content_type = value # type: ignore\n else:\n # all 4 parameters included\n filename, fileobj, content_type, headers = value # type: ignore\n else:\n filename = Path(str(getattr(value, \"name\", \"upload\"))).name\n fileobj = value\n\n if content_type is None:\n content_type = guess_content_type(filename)\n\n has_content_type_header = any(\"content-type\" in key.lower() for key in headers)\n if content_type is not None and not has_content_type_header:\n # note that unlike requests, we ignore the content_type\n # provided in the 3rd tuple element if it is also included in the headers\n # requests does the opposite (it overwrites the header with the 3rd tuple element)\n headers[\"Content-Type\"] = content_type\n\n if isinstance(fileobj, (str, io.StringIO)):\n raise TypeError(f\"Expected bytes or bytes-like object got: {type(fileobj)}\")\n\n self.filename = filename\n self.file = fileobj\n self.headers = headers\n\n def get_length(self) -> int:\n headers = self.render_headers()\n\n if isinstance(self.file, (str, bytes)):\n return len(headers) + len(to_bytes(self.file))\n\n # Let's do our best not to read `file` into memory.\n file_length = peek_filelike_length(self.file)\n if file_length is None:\n # As a last resort, read file and cache contents for later.\n assert not hasattr(self, \"_data\")\n self._data = to_bytes(self.file.read())\n file_length = len(self._data)\n\n return len(headers) + file_length\n\n def render_headers(self) -> bytes:\n if not hasattr(self, \"_headers\"):\n parts = [\n b\"Content-Disposition: form-data; \",\n format_form_param(\"name\", self.name),\n ]\n if self.filename:\n filename = format_form_param(\"filename\", self.filename)\n parts.extend([b\"; \", filename])\n for header_name, header_value in self.headers.items():\n key, val = f\"\\r\\n{header_name}: \".encode(), header_value.encode()\n parts.extend([key, val])\n parts.append(b\"\\r\\n\\r\\n\")\n self._headers = b\"\".join(parts)\n\n return self._headers\n\n def render_data(self) -> typing.Iterator[bytes]:\n if isinstance(self.file, (str, bytes)):\n yield to_bytes(self.file)\n return\n\n if hasattr(self, \"_data\"):\n # Already rendered.\n yield self._data\n return\n\n if hasattr(self.file, \"seek\"):\n self.file.seek(0)\n\n chunk = self.file.read(self.CHUNK_SIZE)\n while chunk:\n yield to_bytes(chunk)\n chunk = self.file.read(self.CHUNK_SIZE)\n\n def render(self) -> typing.Iterator[bytes]:\n yield self.render_headers()\n yield from self.render_data()\n\n\nclass MultipartStream(SyncByteStream, AsyncByteStream):\n \"\"\"\n Request content as streaming multipart encoded form data.\n \"\"\"\n\n def __init__(\n self, data: dict, files: RequestFiles, boundary: typing.Optional[bytes] = None\n ) -> None:\n if boundary is None:\n boundary = binascii.hexlify(os.urandom(16))\n\n self.boundary = boundary\n self.content_type = \"multipart/form-data; boundary=%s\" % boundary.decode(\n \"ascii\"\n )\n self.fields = list(self._iter_fields(data, files))\n\n def _iter_fields(\n self, data: dict, files: RequestFiles\n ) -> typing.Iterator[typing.Union[FileField, DataField]]:\n for name, value in data.items():\n if isinstance(value, list):\n for item in value:\n yield DataField(name=name, value=item)\n else:\n yield DataField(name=name, value=value)\n\n file_items = files.items() if isinstance(files, typing.Mapping) else files\n for name, value in file_items:\n yield FileField(name=name, value=value)\n\n def iter_chunks(self) -> typing.Iterator[bytes]:\n for field in self.fields:\n yield b\"--%s\\r\\n\" % self.boundary\n yield from field.render()\n yield b\"\\r\\n\"\n yield b\"--%s--\\r\\n\" % self.boundary\n\n def iter_chunks_lengths(self) -> typing.Iterator[int]:\n boundary_length = len(self.boundary)\n # Follow closely what `.iter_chunks()` does.\n for field in self.fields:\n yield 2 + boundary_length + 2\n yield field.get_length()\n yield 2\n yield 2 + boundary_length + 4\n\n def get_content_length(self) -> int:\n return sum(self.iter_chunks_lengths())\n\n # Content stream interface.\n\n def get_headers(self) -> typing.Dict[str, str]:\n content_length = str(self.get_content_length())\n content_type = self.content_type\n return {\"Content-Length\": content_length, \"Content-Type\": content_type}\n\n def __iter__(self) -> typing.Iterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n\n async def __aiter__(self) -> typing.AsyncIterator[bytes]:\n for chunk in self.iter_chunks():\n yield chunk\n", "path": "httpx/_multipart.py"}]}
| 3,591 | 121 |
gh_patches_debug_971
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1204
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issue with requests dependency
I found that commit 95d9306d2a1fd22dffb12a0548abf2d2f744ed9d excludes requests 2.11 for a bug that is fixed now on requests 2.11.1. And that's giving me a version conflict with another of the modules on my project:
```
pkg_resources.ContextualVersionConflict: (requests 2.11.1 (..............), Requirement.parse('requests<2.11,>=2.5.2'), {'docker-py'})
```
Can we allow requests 2.11.1 ?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 from setuptools import setup
6
7
8 ROOT_DIR = os.path.dirname(__file__)
9 SOURCE_DIR = os.path.join(ROOT_DIR)
10
11 requirements = [
12 'requests >= 2.5.2, < 2.11',
13 'six >= 1.4.0',
14 'websocket-client >= 0.32.0',
15 'docker-pycreds >= 0.2.1'
16 ]
17
18 if sys.platform == 'win32':
19 requirements.append('pypiwin32 >= 219')
20
21 extras_require = {
22 ':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
23 ':python_version < "3.3"': 'ipaddress >= 1.0.16',
24 }
25
26 version = None
27 exec(open('docker/version.py').read())
28
29 with open('./test-requirements.txt') as test_reqs_txt:
30 test_requirements = [line for line in test_reqs_txt]
31
32
33 setup(
34 name="docker-py",
35 version=version,
36 description="Python client for Docker.",
37 url='https://github.com/docker/docker-py/',
38 packages=[
39 'docker', 'docker.api', 'docker.auth', 'docker.transport',
40 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',
41 'docker.types',
42 ],
43 install_requires=requirements,
44 tests_require=test_requirements,
45 extras_require=extras_require,
46 zip_safe=False,
47 test_suite='tests',
48 classifiers=[
49 'Development Status :: 4 - Beta',
50 'Environment :: Other Environment',
51 'Intended Audience :: Developers',
52 'Operating System :: OS Independent',
53 'Programming Language :: Python',
54 'Programming Language :: Python :: 2',
55 'Programming Language :: Python :: 2.6',
56 'Programming Language :: Python :: 2.7',
57 'Programming Language :: Python :: 3',
58 'Programming Language :: Python :: 3.3',
59 'Programming Language :: Python :: 3.4',
60 'Programming Language :: Python :: 3.5',
61 'Topic :: Utilities',
62 'License :: OSI Approved :: Apache Software License',
63 ],
64 )
65
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
- 'requests >= 2.5.2, < 2.11',
+ 'requests >= 2.5.2',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
'docker-pycreds >= 0.2.1'
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n SOURCE_DIR = os.path.join(ROOT_DIR)\n \n requirements = [\n- 'requests >= 2.5.2, < 2.11',\n+ 'requests >= 2.5.2',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n", "issue": "Issue with requests dependency\nI found that commit 95d9306d2a1fd22dffb12a0548abf2d2f744ed9d excludes requests 2.11 for a bug that is fixed now on requests 2.11.1. And that's giving me a version conflict with another of the modules on my project:\n\n```\npkg_resources.ContextualVersionConflict: (requests 2.11.1 (..............), Requirement.parse('requests<2.11,>=2.5.2'), {'docker-py'})\n```\n\nCan we allow requests 2.11.1 ?\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\nROOT_DIR = os.path.dirname(__file__)\nSOURCE_DIR = os.path.join(ROOT_DIR)\n\nrequirements = [\n 'requests >= 2.5.2, < 2.11',\n 'six >= 1.4.0',\n 'websocket-client >= 0.32.0',\n 'docker-pycreds >= 0.2.1'\n]\n\nif sys.platform == 'win32':\n requirements.append('pypiwin32 >= 219')\n\nextras_require = {\n ':python_version < \"3.5\"': 'backports.ssl_match_hostname >= 3.5',\n ':python_version < \"3.3\"': 'ipaddress >= 1.0.16',\n}\n\nversion = None\nexec(open('docker/version.py').read())\n\nwith open('./test-requirements.txt') as test_reqs_txt:\n test_requirements = [line for line in test_reqs_txt]\n\n\nsetup(\n name=\"docker-py\",\n version=version,\n description=\"Python client for Docker.\",\n url='https://github.com/docker/docker-py/',\n packages=[\n 'docker', 'docker.api', 'docker.auth', 'docker.transport',\n 'docker.utils', 'docker.utils.ports', 'docker.ssladapter',\n 'docker.types',\n ],\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require=extras_require,\n zip_safe=False,\n test_suite='tests',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Other Environment',\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: Apache Software License',\n ],\n)\n", "path": "setup.py"}]}
| 1,266 | 112 |
gh_patches_debug_6506
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-4142
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Sensitivity module] Deleted zones are still present in the open-air export
A clause is missing to exclude deleted sensitive areas in OpenAir API queryset.
assigned to my self.
[Sensitivity module] Deleted zones are still present in the open-air export
A clause is missing to exclude deleted sensitive areas in OpenAir API queryset.
assigned to my self.
</issue>
<code>
[start of geotrek/sensitivity/views.py]
1 import json
2 import logging
3 from datetime import datetime
4
5 from django.conf import settings
6 from django.contrib.gis.db.models.functions import Transform
7 from django.http import HttpResponse
8 from django.utils.translation import gettext_lazy as _
9 from django.views.generic import ListView
10 from django.views.generic.detail import BaseDetailView
11 from mapentity.views import (MapEntityCreate, MapEntityUpdate, MapEntityList, MapEntityDetail,
12 MapEntityDelete, MapEntityFormat, LastModifiedMixin)
13
14 from geotrek.authent.decorators import same_structure_required
15 from geotrek.common.mixins.views import CustomColumnsMixin
16 from geotrek.common.permissions import PublicOrReadPermMixin
17 from geotrek.common.viewsets import GeotrekMapentityViewSet
18 from .filters import SensitiveAreaFilterSet
19 from .forms import SensitiveAreaForm, RegulatorySensitiveAreaForm
20 from .models import SensitiveArea, Species, SportPractice
21 from .serializers import SensitiveAreaSerializer, SensitiveAreaGeojsonSerializer
22
23
24 logger = logging.getLogger(__name__)
25
26
27 class SensitiveAreaList(CustomColumnsMixin, MapEntityList):
28 queryset = SensitiveArea.objects.existing()
29 filterform = SensitiveAreaFilterSet
30 mandatory_columns = ['id', 'species']
31 default_extra_columns = ['category']
32
33
34 class SensitiveAreaFormatList(MapEntityFormat, SensitiveAreaList):
35 mandatory_columns = ['id']
36 default_extra_columns = [
37 'species', 'published', 'description', 'contact', 'radius', 'pretty_period', 'pretty_practices',
38 ]
39
40
41 class SensitiveAreaDetail(MapEntityDetail):
42 queryset = SensitiveArea.objects.existing()
43
44 def get_context_data(self, *args, **kwargs):
45 context = super().get_context_data(*args, **kwargs)
46 context['can_edit'] = self.object.same_structure(self.request.user)
47 return context
48
49
50 class SensitiveAreaRadiiMixin:
51 def get_context_data(self, *args, **kwargs):
52 context = super().get_context_data(*args, **kwargs)
53 species = Species.objects.filter(category=Species.SPECIES)
54 context['radii'] = json.dumps({
55 str(s.id): settings.SENSITIVITY_DEFAULT_RADIUS if s.radius is None else s.radius for s in species
56 })
57 return context
58
59
60 class SensitiveAreaCreate(SensitiveAreaRadiiMixin, MapEntityCreate):
61 model = SensitiveArea
62
63 def get_form_class(self):
64 if self.request.GET.get('category') == str(Species.REGULATORY):
65 return RegulatorySensitiveAreaForm
66 return SensitiveAreaForm
67
68
69 class SensitiveAreaUpdate(SensitiveAreaRadiiMixin, MapEntityUpdate):
70 queryset = SensitiveArea.objects.existing()
71
72 def get_form_class(self):
73 if self.object.species.category == Species.REGULATORY:
74 return RegulatorySensitiveAreaForm
75 return SensitiveAreaForm
76
77 @same_structure_required('sensitivity:sensitivearea_detail')
78 def dispatch(self, *args, **kwargs):
79 return super().dispatch(*args, **kwargs)
80
81
82 class SensitiveAreaDelete(MapEntityDelete):
83 model = SensitiveArea
84
85 @same_structure_required('sensitivity:sensitivearea_detail')
86 def dispatch(self, *args, **kwargs):
87 return super().dispatch(*args, **kwargs)
88
89
90 class SensitiveAreaViewSet(GeotrekMapentityViewSet):
91 model = SensitiveArea
92 serializer_class = SensitiveAreaSerializer
93 geojson_serializer_class = SensitiveAreaGeojsonSerializer
94 filterset_class = SensitiveAreaFilterSet
95 mapentity_list_class = SensitiveAreaList
96
97 def get_queryset(self):
98 qs = self.model.objects.existing().select_related('species')
99 if self.format_kwarg == 'geojson':
100 qs = qs.annotate(api_geom=Transform('geom', settings.API_SRID))
101 qs = qs.only('id', 'species')
102 return qs
103
104
105 class SensitiveAreaKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):
106 queryset = SensitiveArea.objects.existing()
107
108 def render_to_response(self, context):
109 area = self.get_object()
110 response = HttpResponse(area.kml(),
111 content_type='application/vnd.google-earth.kml+xml')
112 return response
113
114
115 class SensitiveAreaOpenAirDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):
116 queryset = SensitiveArea.objects.existing()
117
118 def render_to_response(self, context):
119 area = self.get_object()
120 file_header = """* This file has been produced from GeoTrek sensitivity (https://geotrek.fr/) module from website {scheme}://{domain}
121 * Using pyopenair library (https://github.com/lpoaura/pyopenair)
122 * This file was created on: {timestamp}\n\n""".format(scheme=self.request.scheme, domain=self.request.headers['host'], timestamp=datetime.now())
123 is_aerial = area.species.practices.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES).exists()
124 if is_aerial and area.openair():
125 result = file_header + area.openair()
126 response = HttpResponse(result, content_type='application/octet-stream; charset=UTF-8')
127 response['Content-Disposition'] = 'inline; filename=sensitivearea_openair_' + str(area.id) + '.txt'
128 return response
129 else:
130 message = _('This is not an aerial area')
131 response = HttpResponse(message, content_type='text/plain; charset=UTF-8')
132
133 return response
134
135
136 class SensitiveAreaOpenAirList(PublicOrReadPermMixin, ListView):
137
138 def get_queryset(self):
139 aerial_practice = SportPractice.objects.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES)
140 return SensitiveArea.objects.filter(
141 species__practices__in=aerial_practice, published=True
142 ).select_related('species')
143
144 def render_to_response(self, context):
145 areas = self.get_queryset()
146 file_header = """* This file has been produced from GeoTrek sensitivity (https://geotrek.fr/) module from website {scheme}://{domain}
147 * Using pyopenair library (https://github.com/lpoaura/pyopenair)
148 * This file was created on: {timestamp}\n\n""".format(scheme=self.request.scheme, domain=self.request.headers['host'], timestamp=datetime.now())
149 airspace_list = [a.openair() for a in areas if a.openair()]
150 airspace_core = '\n\n'.join(airspace_list)
151 airspace_file = file_header + airspace_core
152 response = HttpResponse(airspace_file, content_type='application/octet-stream; charset=UTF-8')
153 response['Content-Disposition'] = 'inline; filename=sensitivearea_openair.txt'
154 return response
155
[end of geotrek/sensitivity/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/geotrek/sensitivity/views.py b/geotrek/sensitivity/views.py
--- a/geotrek/sensitivity/views.py
+++ b/geotrek/sensitivity/views.py
@@ -137,7 +137,7 @@
def get_queryset(self):
aerial_practice = SportPractice.objects.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES)
- return SensitiveArea.objects.filter(
+ return SensitiveArea.objects.existing().filter(
species__practices__in=aerial_practice, published=True
).select_related('species')
|
{"golden_diff": "diff --git a/geotrek/sensitivity/views.py b/geotrek/sensitivity/views.py\n--- a/geotrek/sensitivity/views.py\n+++ b/geotrek/sensitivity/views.py\n@@ -137,7 +137,7 @@\n \n def get_queryset(self):\n aerial_practice = SportPractice.objects.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES)\n- return SensitiveArea.objects.filter(\n+ return SensitiveArea.objects.existing().filter(\n species__practices__in=aerial_practice, published=True\n ).select_related('species')\n", "issue": "[Sensitivity module] Deleted zones are still present in the open-air export\nA clause is missing to exclude deleted sensitive areas in OpenAir API queryset.\r\n\r\nassigned to my self.\n[Sensitivity module] Deleted zones are still present in the open-air export\nA clause is missing to exclude deleted sensitive areas in OpenAir API queryset.\r\n\r\nassigned to my self.\n", "before_files": [{"content": "import json\nimport logging\nfrom datetime import datetime\n\nfrom django.conf import settings\nfrom django.contrib.gis.db.models.functions import Transform\nfrom django.http import HttpResponse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import BaseDetailView\nfrom mapentity.views import (MapEntityCreate, MapEntityUpdate, MapEntityList, MapEntityDetail,\n MapEntityDelete, MapEntityFormat, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import same_structure_required\nfrom geotrek.common.mixins.views import CustomColumnsMixin\nfrom geotrek.common.permissions import PublicOrReadPermMixin\nfrom geotrek.common.viewsets import GeotrekMapentityViewSet\nfrom .filters import SensitiveAreaFilterSet\nfrom .forms import SensitiveAreaForm, RegulatorySensitiveAreaForm\nfrom .models import SensitiveArea, Species, SportPractice\nfrom .serializers import SensitiveAreaSerializer, SensitiveAreaGeojsonSerializer\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass SensitiveAreaList(CustomColumnsMixin, MapEntityList):\n queryset = SensitiveArea.objects.existing()\n filterform = SensitiveAreaFilterSet\n mandatory_columns = ['id', 'species']\n default_extra_columns = ['category']\n\n\nclass SensitiveAreaFormatList(MapEntityFormat, SensitiveAreaList):\n mandatory_columns = ['id']\n default_extra_columns = [\n 'species', 'published', 'description', 'contact', 'radius', 'pretty_period', 'pretty_practices',\n ]\n\n\nclass SensitiveAreaDetail(MapEntityDetail):\n queryset = SensitiveArea.objects.existing()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['can_edit'] = self.object.same_structure(self.request.user)\n return context\n\n\nclass SensitiveAreaRadiiMixin:\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n species = Species.objects.filter(category=Species.SPECIES)\n context['radii'] = json.dumps({\n str(s.id): settings.SENSITIVITY_DEFAULT_RADIUS if s.radius is None else s.radius for s in species\n })\n return context\n\n\nclass SensitiveAreaCreate(SensitiveAreaRadiiMixin, MapEntityCreate):\n model = SensitiveArea\n\n def get_form_class(self):\n if self.request.GET.get('category') == str(Species.REGULATORY):\n return RegulatorySensitiveAreaForm\n return SensitiveAreaForm\n\n\nclass SensitiveAreaUpdate(SensitiveAreaRadiiMixin, MapEntityUpdate):\n queryset = SensitiveArea.objects.existing()\n\n def get_form_class(self):\n if self.object.species.category == Species.REGULATORY:\n return RegulatorySensitiveAreaForm\n return SensitiveAreaForm\n\n @same_structure_required('sensitivity:sensitivearea_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass SensitiveAreaDelete(MapEntityDelete):\n model = SensitiveArea\n\n @same_structure_required('sensitivity:sensitivearea_detail')\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n\nclass SensitiveAreaViewSet(GeotrekMapentityViewSet):\n model = SensitiveArea\n serializer_class = SensitiveAreaSerializer\n geojson_serializer_class = SensitiveAreaGeojsonSerializer\n filterset_class = SensitiveAreaFilterSet\n mapentity_list_class = SensitiveAreaList\n\n def get_queryset(self):\n qs = self.model.objects.existing().select_related('species')\n if self.format_kwarg == 'geojson':\n qs = qs.annotate(api_geom=Transform('geom', settings.API_SRID))\n qs = qs.only('id', 'species')\n return qs\n\n\nclass SensitiveAreaKMLDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = SensitiveArea.objects.existing()\n\n def render_to_response(self, context):\n area = self.get_object()\n response = HttpResponse(area.kml(),\n content_type='application/vnd.google-earth.kml+xml')\n return response\n\n\nclass SensitiveAreaOpenAirDetail(LastModifiedMixin, PublicOrReadPermMixin, BaseDetailView):\n queryset = SensitiveArea.objects.existing()\n\n def render_to_response(self, context):\n area = self.get_object()\n file_header = \"\"\"* This file has been produced from GeoTrek sensitivity (https://geotrek.fr/) module from website {scheme}://{domain}\n* Using pyopenair library (https://github.com/lpoaura/pyopenair)\n* This file was created on: {timestamp}\\n\\n\"\"\".format(scheme=self.request.scheme, domain=self.request.headers['host'], timestamp=datetime.now())\n is_aerial = area.species.practices.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES).exists()\n if is_aerial and area.openair():\n result = file_header + area.openair()\n response = HttpResponse(result, content_type='application/octet-stream; charset=UTF-8')\n response['Content-Disposition'] = 'inline; filename=sensitivearea_openair_' + str(area.id) + '.txt'\n return response\n else:\n message = _('This is not an aerial area')\n response = HttpResponse(message, content_type='text/plain; charset=UTF-8')\n\n return response\n\n\nclass SensitiveAreaOpenAirList(PublicOrReadPermMixin, ListView):\n\n def get_queryset(self):\n aerial_practice = SportPractice.objects.filter(name__in=settings.SENSITIVITY_OPENAIR_SPORT_PRACTICES)\n return SensitiveArea.objects.filter(\n species__practices__in=aerial_practice, published=True\n ).select_related('species')\n\n def render_to_response(self, context):\n areas = self.get_queryset()\n file_header = \"\"\"* This file has been produced from GeoTrek sensitivity (https://geotrek.fr/) module from website {scheme}://{domain}\n* Using pyopenair library (https://github.com/lpoaura/pyopenair)\n* This file was created on: {timestamp}\\n\\n\"\"\".format(scheme=self.request.scheme, domain=self.request.headers['host'], timestamp=datetime.now())\n airspace_list = [a.openair() for a in areas if a.openair()]\n airspace_core = '\\n\\n'.join(airspace_list)\n airspace_file = file_header + airspace_core\n response = HttpResponse(airspace_file, content_type='application/octet-stream; charset=UTF-8')\n response['Content-Disposition'] = 'inline; filename=sensitivearea_openair.txt'\n return response\n", "path": "geotrek/sensitivity/views.py"}]}
| 2,398 | 129 |
gh_patches_debug_63551
|
rasdani/github-patches
|
git_diff
|
falconry__falcon-602
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hoist HTTPStatus into falcon top-level namespace
I.e., add an import line to `falcon/__init__.py`
</issue>
<code>
[start of falcon/__init__.py]
1 # Copyright 2013 by Rackspace Hosting, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 HTTP_METHODS = (
16 'CONNECT',
17 'DELETE',
18 'GET',
19 'HEAD',
20 'OPTIONS',
21 'PATCH',
22 'POST',
23 'PUT',
24 'TRACE',
25 )
26
27 DEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'
28
29
30 # Hoist classes and functions into the falcon namespace
31 from falcon.version import __version__ # NOQA
32 from falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA
33 from falcon.status_codes import * # NOQA
34 from falcon.errors import * # NOQA
35 from falcon.redirects import * # NOQA
36 from falcon.http_error import HTTPError # NOQA
37 from falcon.util import * # NOQA
38 from falcon.hooks import before, after # NOQA
39 from falcon.request import Request, RequestOptions # NOQA
40 from falcon.response import Response # NOQA
41
[end of falcon/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/falcon/__init__.py b/falcon/__init__.py
--- a/falcon/__init__.py
+++ b/falcon/__init__.py
@@ -34,6 +34,7 @@
from falcon.errors import * # NOQA
from falcon.redirects import * # NOQA
from falcon.http_error import HTTPError # NOQA
+from falcon.http_status import HTTPStatus # NOQA
from falcon.util import * # NOQA
from falcon.hooks import before, after # NOQA
from falcon.request import Request, RequestOptions # NOQA
|
{"golden_diff": "diff --git a/falcon/__init__.py b/falcon/__init__.py\n--- a/falcon/__init__.py\n+++ b/falcon/__init__.py\n@@ -34,6 +34,7 @@\n from falcon.errors import * # NOQA\n from falcon.redirects import * # NOQA\n from falcon.http_error import HTTPError # NOQA\n+from falcon.http_status import HTTPStatus # NOQA\n from falcon.util import * # NOQA\n from falcon.hooks import before, after # NOQA\n from falcon.request import Request, RequestOptions # NOQA\n", "issue": "Hoist HTTPStatus into falcon top-level namespace\nI.e., add an import line to `falcon/__init__.py`\n\n", "before_files": [{"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nHTTP_METHODS = (\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n)\n\nDEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'\n\n\n# Hoist classes and functions into the falcon namespace\nfrom falcon.version import __version__ # NOQA\nfrom falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA\nfrom falcon.status_codes import * # NOQA\nfrom falcon.errors import * # NOQA\nfrom falcon.redirects import * # NOQA\nfrom falcon.http_error import HTTPError # NOQA\nfrom falcon.util import * # NOQA\nfrom falcon.hooks import before, after # NOQA\nfrom falcon.request import Request, RequestOptions # NOQA\nfrom falcon.response import Response # NOQA\n", "path": "falcon/__init__.py"}]}
| 969 | 136 |
gh_patches_debug_13977
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-374
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OS to usage stats collection
</issue>
<code>
[start of parsl/dataflow/usage_tracking/usage.py]
1 import uuid
2 import time
3 import hashlib
4 import os
5 import getpass
6 import json
7 import logging
8 import socket
9 import sys
10 import multiprocessing as mp
11
12 from parsl.dataflow.states import States
13 from parsl.version import VERSION as PARSL_VERSION
14
15 logger = logging.getLogger(__name__)
16
17
18 def async_process(fn):
19 """ Decorator function to launch a function as a separate process """
20
21 def run(*args, **kwargs):
22 proc = mp.Process(target=fn, args=args, kwargs=kwargs)
23 proc.start()
24 return proc
25
26 return run
27
28
29 @async_process
30 def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
31 """Send UDP messages to usage tracker asynchronously
32
33 This multiprocessing based messenger was written to overcome the limitations
34 of signalling/terminating a thread that is blocked on a system call. This
35 messenger is created as a separate process, and initialized with 2 queues,
36 to_send to receive messages to be sent to the internet.
37
38 Args:
39 - domain_name (str) : Domain name string
40 - UDP_IP (str) : IP address YYY.YYY.YYY.YYY
41 - UDP_PORT (int) : UDP port to send out on
42 - sock_timeout (int) : Socket timeout
43 - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
44 """
45 try:
46 if message is None:
47 raise ValueError("message was none")
48
49 encoded_message = bytes(message, "utf-8")
50
51 if encoded_message is None:
52 raise ValueError("utf-8 encoding of message failed")
53
54 if domain_name:
55 try:
56 UDP_IP = socket.gethostbyname(domain_name)
57 except Exception:
58 # (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
59 pass
60
61 if UDP_IP is None:
62 raise Exception("UDP_IP is None")
63
64 if UDP_PORT is None:
65 raise Exception("UDP_PORT is None")
66
67 sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
68 sock.settimeout(sock_timeout)
69 sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
70 sock.close()
71
72 except socket.timeout:
73 logger.debug("Failed to send usage tracking data: socket timeout")
74 except OSError as e:
75 logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
76 except Exception as e:
77 logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
78
79
80 class UsageTracker (object):
81 """Anonymized Usage Tracking for Parsl.
82
83 Client for this is here : https://github.com/Parsl/parsl_tracking
84 This issue captures the discussion that went into functionality
85 implemented here : https://github.com/Parsl/parsl/issues/34
86
87 """
88
89 def __init__(self, dfk, ip='52.3.111.203', port=50077,
90 domain_name='tracking.parsl-project.org'):
91 """Initialize usage tracking unless the user has opted-out.
92
93 We will try to resolve the hostname specified in kwarg:domain_name
94 and if that fails attempt to use the kwarg:ip. Determining the
95 IP and sending message is threaded to avoid slowing down DFK
96 initialization.
97
98 Tracks usage stats by inspecting the internal state of the dfk.
99
100 Args:
101 - dfk (DFK object) : Data Flow Kernel object
102
103 KWargs:
104 - ip (string) : IP address
105 - port (int) : Port number, Default:50077
106 - domain_name (string) : Domain name, will override IP
107 Default: tracking.parsl-project.org
108 """
109
110 self.domain_name = domain_name
111 self.ip = ip
112 # The sock timeout will only apply to UDP send and not domain resolution
113 self.sock_timeout = 5
114 self.UDP_PORT = port
115 self.UDP_IP = None
116 self.procs = []
117 self.dfk = dfk
118 self.config = self.dfk.config
119 self.uuid = str(uuid.uuid4())
120 self.parsl_version = PARSL_VERSION
121 self.python_version = "{}.{}.{}".format(sys.version_info.major,
122 sys.version_info.minor,
123 sys.version_info.micro)
124 self.test_mode, self.tracking_enabled = self.check_tracking_enabled()
125 logger.debug("Tracking status: {}".format(self.tracking_enabled))
126 logger.debug("Testing mode : {}".format(self.test_mode))
127 self.initialized = False # Once first message is sent this will be True
128
129 def check_tracking_enabled(self):
130 """By default tracking is enabled.
131
132 If Test mode is set via env variable PARSL_TESTING, a test flag is set
133
134 Tracking is disabled if :
135 1. config["globals"]["usageTracking"] is set to False (Bool)
136 2. Environment variable PARSL_TRACKING is set to false (case insensitive)
137
138 """
139 track = True # By default we track usage
140 test = False # By default we are not in testing mode
141
142 testvar = str(os.environ.get("PARSL_TESTING", 'None')).lower()
143 if testvar == 'true':
144 test = True
145
146 if not self.config.usage_tracking:
147 track = False
148
149 envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
150 if envvar == "false":
151 track = False
152
153 return test, track
154
155 def construct_start_message(self):
156 """Collect preliminary run info at the start of the DFK.
157
158 Returns :
159 - Message dict dumped as json string, ready for UDP
160 """
161 uname = getpass.getuser().encode('latin1')
162 hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
163 hname = socket.gethostname().encode('latin1')
164 hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
165 message = {'uuid': self.uuid,
166 'uname': hashed_username,
167 'hname': hashed_hostname,
168 'test': self.test_mode,
169 'parsl_v': self.parsl_version,
170 'python_v': self.python_version,
171 'start': time.time()}
172
173 return json.dumps(message)
174
175 def construct_end_message(self):
176 """Collect the final run information at the time of DFK cleanup.
177
178 Returns:
179 - Message dict dumped as json string, ready for UDP
180 """
181 app_count = self.dfk.task_count
182
183 site_count = len([x for x in self.dfk.config.executors if x.managed])
184
185 failed_states = (States.failed, States.dep_fail)
186 app_fails = len([t for t in self.dfk.tasks if
187 self.dfk.tasks[t]['status'] in failed_states])
188
189 message = {'uuid': self.uuid,
190 'end': time.time(),
191 't_apps': app_count,
192 'sites': site_count,
193 'c_time': None,
194 'failed': app_fails,
195 'test': self.test_mode,
196 }
197
198 return json.dumps(message)
199
200 def send_UDP_message(self, message):
201 """Send UDP message."""
202 x = 0
203 if self.tracking_enabled:
204 try:
205 proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
206 self.procs.append(proc)
207 except Exception as e:
208 logger.debug("Usage tracking failed: {}".format(e))
209 else:
210 x = -1
211
212 return x
213
214 def send_message(self):
215 """Send message over UDP.
216
217 If tracking is disables, the bytes_sent will always be set to -1
218
219 Returns:
220 (bytes_sent, time_taken)
221 """
222 start = time.time()
223 message = None
224 if not self.initialized:
225 message = self.construct_start_message()
226 self.initialized = True
227 else:
228 message = self.construct_end_message()
229
230 self.send_UDP_message(message)
231 end = time.time()
232
233 return end - start
234
235 def __del__(self):
236 return self.close()
237
238 def close(self):
239 """We terminate (SIGTERM) the processes added to the self.procs list """
240 for proc in self.procs:
241 proc.terminate()
242
243
244 if __name__ == '__main__':
245
246 from parsl import *
247
248 set_stream_logger()
249 workers = ThreadPoolExecutor(max_workers=4)
250 dfk = DataFlowKernel(executors=[workers])
251
252 dfk.cleanup()
253
[end of parsl/dataflow/usage_tracking/usage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py
--- a/parsl/dataflow/usage_tracking/usage.py
+++ b/parsl/dataflow/usage_tracking/usage.py
@@ -7,6 +7,7 @@
import logging
import socket
import sys
+import platform
import multiprocessing as mp
from parsl.dataflow.states import States
@@ -168,6 +169,8 @@
'test': self.test_mode,
'parsl_v': self.parsl_version,
'python_v': self.python_version,
+ 'os': platform.system(),
+ 'os_v': platform.release(),
'start': time.time()}
return json.dumps(message)
|
{"golden_diff": "diff --git a/parsl/dataflow/usage_tracking/usage.py b/parsl/dataflow/usage_tracking/usage.py\n--- a/parsl/dataflow/usage_tracking/usage.py\n+++ b/parsl/dataflow/usage_tracking/usage.py\n@@ -7,6 +7,7 @@\n import logging\n import socket\n import sys\n+import platform\n import multiprocessing as mp\n \n from parsl.dataflow.states import States\n@@ -168,6 +169,8 @@\n 'test': self.test_mode,\n 'parsl_v': self.parsl_version,\n 'python_v': self.python_version,\n+ 'os': platform.system(),\n+ 'os_v': platform.release(),\n 'start': time.time()}\n \n return json.dumps(message)\n", "issue": "Add OS to usage stats collection\n\n", "before_files": [{"content": "import uuid\nimport time\nimport hashlib\nimport os\nimport getpass\nimport json\nimport logging\nimport socket\nimport sys\nimport multiprocessing as mp\n\nfrom parsl.dataflow.states import States\nfrom parsl.version import VERSION as PARSL_VERSION\n\nlogger = logging.getLogger(__name__)\n\n\ndef async_process(fn):\n \"\"\" Decorator function to launch a function as a separate process \"\"\"\n\n def run(*args, **kwargs):\n proc = mp.Process(target=fn, args=args, kwargs=kwargs)\n proc.start()\n return proc\n\n return run\n\n\n@async_process\ndef udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):\n \"\"\"Send UDP messages to usage tracker asynchronously\n\n This multiprocessing based messenger was written to overcome the limitations\n of signalling/terminating a thread that is blocked on a system call. This\n messenger is created as a separate process, and initialized with 2 queues,\n to_send to receive messages to be sent to the internet.\n\n Args:\n - domain_name (str) : Domain name string\n - UDP_IP (str) : IP address YYY.YYY.YYY.YYY\n - UDP_PORT (int) : UDP port to send out on\n - sock_timeout (int) : Socket timeout\n - to_send (multiprocessing.Queue) : Queue of outgoing messages to internet\n \"\"\"\n try:\n if message is None:\n raise ValueError(\"message was none\")\n\n encoded_message = bytes(message, \"utf-8\")\n\n if encoded_message is None:\n raise ValueError(\"utf-8 encoding of message failed\")\n\n if domain_name:\n try:\n UDP_IP = socket.gethostbyname(domain_name)\n except Exception:\n # (False, \"Domain lookup failed, defaulting to {0}\".format(UDP_IP))\n pass\n\n if UDP_IP is None:\n raise Exception(\"UDP_IP is None\")\n\n if UDP_PORT is None:\n raise Exception(\"UDP_PORT is None\")\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP\n sock.settimeout(sock_timeout)\n sock.sendto(bytes(message, \"utf-8\"), (UDP_IP, UDP_PORT))\n sock.close()\n\n except socket.timeout:\n logger.debug(\"Failed to send usage tracking data: socket timeout\")\n except OSError as e:\n logger.debug(\"Failed to send usage tracking data: OSError: {}\".format(e))\n except Exception as e:\n logger.debug(\"Failed to send usage tracking data: Exception: {}\".format(e))\n\n\nclass UsageTracker (object):\n \"\"\"Anonymized Usage Tracking for Parsl.\n\n Client for this is here : https://github.com/Parsl/parsl_tracking\n This issue captures the discussion that went into functionality\n implemented here : https://github.com/Parsl/parsl/issues/34\n\n \"\"\"\n\n def __init__(self, dfk, ip='52.3.111.203', port=50077,\n domain_name='tracking.parsl-project.org'):\n \"\"\"Initialize usage tracking unless the user has opted-out.\n\n We will try to resolve the hostname specified in kwarg:domain_name\n and if that fails attempt to use the kwarg:ip. Determining the\n IP and sending message is threaded to avoid slowing down DFK\n initialization.\n\n Tracks usage stats by inspecting the internal state of the dfk.\n\n Args:\n - dfk (DFK object) : Data Flow Kernel object\n\n KWargs:\n - ip (string) : IP address\n - port (int) : Port number, Default:50077\n - domain_name (string) : Domain name, will override IP\n Default: tracking.parsl-project.org\n \"\"\"\n\n self.domain_name = domain_name\n self.ip = ip\n # The sock timeout will only apply to UDP send and not domain resolution\n self.sock_timeout = 5\n self.UDP_PORT = port\n self.UDP_IP = None\n self.procs = []\n self.dfk = dfk\n self.config = self.dfk.config\n self.uuid = str(uuid.uuid4())\n self.parsl_version = PARSL_VERSION\n self.python_version = \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n self.test_mode, self.tracking_enabled = self.check_tracking_enabled()\n logger.debug(\"Tracking status: {}\".format(self.tracking_enabled))\n logger.debug(\"Testing mode : {}\".format(self.test_mode))\n self.initialized = False # Once first message is sent this will be True\n\n def check_tracking_enabled(self):\n \"\"\"By default tracking is enabled.\n\n If Test mode is set via env variable PARSL_TESTING, a test flag is set\n\n Tracking is disabled if :\n 1. config[\"globals\"][\"usageTracking\"] is set to False (Bool)\n 2. Environment variable PARSL_TRACKING is set to false (case insensitive)\n\n \"\"\"\n track = True # By default we track usage\n test = False # By default we are not in testing mode\n\n testvar = str(os.environ.get(\"PARSL_TESTING\", 'None')).lower()\n if testvar == 'true':\n test = True\n\n if not self.config.usage_tracking:\n track = False\n\n envvar = str(os.environ.get(\"PARSL_TRACKING\", True)).lower()\n if envvar == \"false\":\n track = False\n\n return test, track\n\n def construct_start_message(self):\n \"\"\"Collect preliminary run info at the start of the DFK.\n\n Returns :\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n uname = getpass.getuser().encode('latin1')\n hashed_username = hashlib.sha256(uname).hexdigest()[0:10]\n hname = socket.gethostname().encode('latin1')\n hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]\n message = {'uuid': self.uuid,\n 'uname': hashed_username,\n 'hname': hashed_hostname,\n 'test': self.test_mode,\n 'parsl_v': self.parsl_version,\n 'python_v': self.python_version,\n 'start': time.time()}\n\n return json.dumps(message)\n\n def construct_end_message(self):\n \"\"\"Collect the final run information at the time of DFK cleanup.\n\n Returns:\n - Message dict dumped as json string, ready for UDP\n \"\"\"\n app_count = self.dfk.task_count\n\n site_count = len([x for x in self.dfk.config.executors if x.managed])\n\n failed_states = (States.failed, States.dep_fail)\n app_fails = len([t for t in self.dfk.tasks if\n self.dfk.tasks[t]['status'] in failed_states])\n\n message = {'uuid': self.uuid,\n 'end': time.time(),\n 't_apps': app_count,\n 'sites': site_count,\n 'c_time': None,\n 'failed': app_fails,\n 'test': self.test_mode,\n }\n\n return json.dumps(message)\n\n def send_UDP_message(self, message):\n \"\"\"Send UDP message.\"\"\"\n x = 0\n if self.tracking_enabled:\n try:\n proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)\n self.procs.append(proc)\n except Exception as e:\n logger.debug(\"Usage tracking failed: {}\".format(e))\n else:\n x = -1\n\n return x\n\n def send_message(self):\n \"\"\"Send message over UDP.\n\n If tracking is disables, the bytes_sent will always be set to -1\n\n Returns:\n (bytes_sent, time_taken)\n \"\"\"\n start = time.time()\n message = None\n if not self.initialized:\n message = self.construct_start_message()\n self.initialized = True\n else:\n message = self.construct_end_message()\n\n self.send_UDP_message(message)\n end = time.time()\n\n return end - start\n\n def __del__(self):\n return self.close()\n\n def close(self):\n \"\"\"We terminate (SIGTERM) the processes added to the self.procs list \"\"\"\n for proc in self.procs:\n proc.terminate()\n\n\nif __name__ == '__main__':\n\n from parsl import *\n\n set_stream_logger()\n workers = ThreadPoolExecutor(max_workers=4)\n dfk = DataFlowKernel(executors=[workers])\n\n dfk.cleanup()\n", "path": "parsl/dataflow/usage_tracking/usage.py"}]}
| 3,064 | 167 |
gh_patches_debug_18108
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-1355
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
refactor: Remove dependency on jQuery
We should replace the `$(...)` with vanilla JS.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import re
3 import os
4 import urllib.request
5 import zipfile
6 import shutil
7
8 from setuptools import setup, find_packages
9 from codecs import open
10
11 requires = ["click", "cookiecutter", "networkx", "numpy", "pandas", "tornado", "tqdm"]
12
13 extras_require = {
14 "dev": ["black", "coverage", "flake8", "pytest >= 4.6", "pytest-cov", "sphinx"],
15 "docs": ["sphinx", "ipython"],
16 }
17
18 version = ""
19 with open("mesa/__init__.py") as fd:
20 version = re.search(
21 r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE
22 ).group(1)
23
24 with open("README.rst", "rb", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Ensure JS dependencies are downloaded
28 external_dir = "mesa/visualization/templates/external"
29 # We use a different path for single-file JS because some of them are loaded
30 # the same way as Mesa JS files
31 external_dir_single = "mesa/visualization/templates/js/external"
32 # First, ensure that the external directories exists
33 os.makedirs(external_dir, exist_ok=True)
34 os.makedirs(external_dir_single, exist_ok=True)
35
36
37 def ensure_JS_dep(dirname, url):
38 dst_path = os.path.join(external_dir, dirname)
39 if os.path.isdir(dst_path):
40 # Do nothing if already downloaded
41 return
42 print(f"Downloading the {dirname} dependency from the internet...")
43 zip_file = dirname + ".zip"
44 urllib.request.urlretrieve(url, zip_file)
45 with zipfile.ZipFile(zip_file, "r") as zip_ref:
46 zip_ref.extractall()
47 shutil.move(dirname, dst_path)
48 # Cleanup
49 os.remove(zip_file)
50 print("Done")
51
52
53 def ensure_JS_dep_single(url, out_name=None):
54 # Used for downloading e.g. jQuery single file
55 if out_name is None:
56 out_name = url.split("/")[-1]
57 dst_path = os.path.join(external_dir_single, out_name)
58 if os.path.isfile(dst_path):
59 return
60 print(f"Downloading the {out_name} dependency from the internet...")
61 urllib.request.urlretrieve(url, out_name)
62 shutil.move(out_name, dst_path)
63
64
65 # Important: when you update JS dependency version, make sure to also update the
66 # hardcoded included files and versions in: mesa/visualization/templates/modular_template.html
67
68 # Ensure Bootstrap
69 bootstrap_version = "5.1.3"
70 ensure_JS_dep(
71 f"bootstrap-{bootstrap_version}-dist",
72 f"https://github.com/twbs/bootstrap/releases/download/v{bootstrap_version}/bootstrap-{bootstrap_version}-dist.zip",
73 )
74
75 # Ensure Bootstrap Slider
76 bootstrap_slider_version = "11.0.2"
77 ensure_JS_dep(
78 f"bootstrap-slider-{bootstrap_slider_version}",
79 f"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip",
80 )
81
82 jquery_version = "2.2.4"
83 ensure_JS_dep_single(
84 f"https://code.jquery.com/jquery-{jquery_version}.min.js",
85 )
86 # Important: when updating the D3 version, make sure to update the constant
87 # D3_JS_FILE in mesa/visualization/ModularVisualization.py.
88 d3_version = "7.4.3"
89 ensure_JS_dep_single(
90 f"https://cdnjs.cloudflare.com/ajax/libs/d3/{d3_version}/d3.min.js",
91 out_name=f"d3-{d3_version}.min.js",
92 )
93 # Important: Make sure to update CHART_JS_FILE in
94 # mesa/visualization/ModularVisualization.py.
95 chartjs_version = "3.6.1"
96 ensure_JS_dep_single(
97 f"https://cdn.jsdelivr.net/npm/chart.js@{chartjs_version}/dist/chart.min.js",
98 out_name=f"chart-{chartjs_version}.min.js",
99 )
100
101
102 setup(
103 name="Mesa",
104 version=version,
105 description="Agent-based modeling (ABM) in Python 3+",
106 long_description=readme,
107 author="Project Mesa Team",
108 author_email="[email protected]",
109 url="https://github.com/projectmesa/mesa",
110 packages=find_packages(),
111 package_data={
112 "mesa": [
113 "visualization/templates/*.html",
114 "visualization/templates/css/*",
115 "visualization/templates/js/*",
116 "visualization/templates/external/**/*",
117 ],
118 "cookiecutter-mesa": ["cookiecutter-mesa/*"],
119 },
120 include_package_data=True,
121 install_requires=requires,
122 extras_require=extras_require,
123 keywords="agent based modeling model ABM simulation multi-agent",
124 license="Apache 2.0",
125 zip_safe=False,
126 classifiers=[
127 "Topic :: Scientific/Engineering",
128 "Topic :: Scientific/Engineering :: Artificial Life",
129 "Topic :: Scientific/Engineering :: Artificial Intelligence",
130 "Intended Audience :: Science/Research",
131 "Programming Language :: Python :: 3 :: Only",
132 "Programming Language :: Python :: 3.7",
133 "Programming Language :: Python :: 3.8",
134 "Programming Language :: Python :: 3.9",
135 "Programming Language :: Python :: 3.10",
136 "License :: OSI Approved :: Apache Software License",
137 "Operating System :: OS Independent",
138 "Development Status :: 3 - Alpha",
139 "Natural Language :: English",
140 ],
141 entry_points="""
142 [console_scripts]
143 mesa=mesa.main:cli
144 """,
145 python_requires=">=3.7",
146 )
147
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
def ensure_JS_dep_single(url, out_name=None):
- # Used for downloading e.g. jQuery single file
+ # Used for downloading e.g. D3.js single file
if out_name is None:
out_name = url.split("/")[-1]
dst_path = os.path.join(external_dir_single, out_name)
@@ -79,10 +79,6 @@
f"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip",
)
-jquery_version = "2.2.4"
-ensure_JS_dep_single(
- f"https://code.jquery.com/jquery-{jquery_version}.min.js",
-)
# Important: when updating the D3 version, make sure to update the constant
# D3_JS_FILE in mesa/visualization/ModularVisualization.py.
d3_version = "7.4.3"
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n \n \n def ensure_JS_dep_single(url, out_name=None):\n- # Used for downloading e.g. jQuery single file\n+ # Used for downloading e.g. D3.js single file\n if out_name is None:\n out_name = url.split(\"/\")[-1]\n dst_path = os.path.join(external_dir_single, out_name)\n@@ -79,10 +79,6 @@\n f\"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip\",\n )\n \n-jquery_version = \"2.2.4\"\n-ensure_JS_dep_single(\n- f\"https://code.jquery.com/jquery-{jquery_version}.min.js\",\n-)\n # Important: when updating the D3 version, make sure to update the constant\n # D3_JS_FILE in mesa/visualization/ModularVisualization.py.\n d3_version = \"7.4.3\"\n", "issue": "refactor: Remove dependency on jQuery\nWe should replace the `$(...)` with vanilla JS.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\nimport os\nimport urllib.request\nimport zipfile\nimport shutil\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\n\nrequires = [\"click\", \"cookiecutter\", \"networkx\", \"numpy\", \"pandas\", \"tornado\", \"tqdm\"]\n\nextras_require = {\n \"dev\": [\"black\", \"coverage\", \"flake8\", \"pytest >= 4.6\", \"pytest-cov\", \"sphinx\"],\n \"docs\": [\"sphinx\", \"ipython\"],\n}\n\nversion = \"\"\nwith open(\"mesa/__init__.py\") as fd:\n version = re.search(\n r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE\n ).group(1)\n\nwith open(\"README.rst\", \"rb\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n# Ensure JS dependencies are downloaded\nexternal_dir = \"mesa/visualization/templates/external\"\n# We use a different path for single-file JS because some of them are loaded\n# the same way as Mesa JS files\nexternal_dir_single = \"mesa/visualization/templates/js/external\"\n# First, ensure that the external directories exists\nos.makedirs(external_dir, exist_ok=True)\nos.makedirs(external_dir_single, exist_ok=True)\n\n\ndef ensure_JS_dep(dirname, url):\n dst_path = os.path.join(external_dir, dirname)\n if os.path.isdir(dst_path):\n # Do nothing if already downloaded\n return\n print(f\"Downloading the {dirname} dependency from the internet...\")\n zip_file = dirname + \".zip\"\n urllib.request.urlretrieve(url, zip_file)\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall()\n shutil.move(dirname, dst_path)\n # Cleanup\n os.remove(zip_file)\n print(\"Done\")\n\n\ndef ensure_JS_dep_single(url, out_name=None):\n # Used for downloading e.g. jQuery single file\n if out_name is None:\n out_name = url.split(\"/\")[-1]\n dst_path = os.path.join(external_dir_single, out_name)\n if os.path.isfile(dst_path):\n return\n print(f\"Downloading the {out_name} dependency from the internet...\")\n urllib.request.urlretrieve(url, out_name)\n shutil.move(out_name, dst_path)\n\n\n# Important: when you update JS dependency version, make sure to also update the\n# hardcoded included files and versions in: mesa/visualization/templates/modular_template.html\n\n# Ensure Bootstrap\nbootstrap_version = \"5.1.3\"\nensure_JS_dep(\n f\"bootstrap-{bootstrap_version}-dist\",\n f\"https://github.com/twbs/bootstrap/releases/download/v{bootstrap_version}/bootstrap-{bootstrap_version}-dist.zip\",\n)\n\n# Ensure Bootstrap Slider\nbootstrap_slider_version = \"11.0.2\"\nensure_JS_dep(\n f\"bootstrap-slider-{bootstrap_slider_version}\",\n f\"https://github.com/seiyria/bootstrap-slider/archive/refs/tags/v{bootstrap_slider_version}.zip\",\n)\n\njquery_version = \"2.2.4\"\nensure_JS_dep_single(\n f\"https://code.jquery.com/jquery-{jquery_version}.min.js\",\n)\n# Important: when updating the D3 version, make sure to update the constant\n# D3_JS_FILE in mesa/visualization/ModularVisualization.py.\nd3_version = \"7.4.3\"\nensure_JS_dep_single(\n f\"https://cdnjs.cloudflare.com/ajax/libs/d3/{d3_version}/d3.min.js\",\n out_name=f\"d3-{d3_version}.min.js\",\n)\n# Important: Make sure to update CHART_JS_FILE in\n# mesa/visualization/ModularVisualization.py.\nchartjs_version = \"3.6.1\"\nensure_JS_dep_single(\n f\"https://cdn.jsdelivr.net/npm/chart.js@{chartjs_version}/dist/chart.min.js\",\n out_name=f\"chart-{chartjs_version}.min.js\",\n)\n\n\nsetup(\n name=\"Mesa\",\n version=version,\n description=\"Agent-based modeling (ABM) in Python 3+\",\n long_description=readme,\n author=\"Project Mesa Team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/projectmesa/mesa\",\n packages=find_packages(),\n package_data={\n \"mesa\": [\n \"visualization/templates/*.html\",\n \"visualization/templates/css/*\",\n \"visualization/templates/js/*\",\n \"visualization/templates/external/**/*\",\n ],\n \"cookiecutter-mesa\": [\"cookiecutter-mesa/*\"],\n },\n include_package_data=True,\n install_requires=requires,\n extras_require=extras_require,\n keywords=\"agent based modeling model ABM simulation multi-agent\",\n license=\"Apache 2.0\",\n zip_safe=False,\n classifiers=[\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Life\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n \"Natural Language :: English\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n mesa=mesa.main:cli\n \"\"\",\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}]}
| 2,088 | 220 |
gh_patches_debug_22620
|
rasdani/github-patches
|
git_diff
|
getnikola__nikola-1582
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build fails with IPython 3.0
Trying to use ipython notebooks with the current dev version of IPython (3.0.0) fails building with some warnings etc. because the `nbformat` interface has changed a little:
```
...WARNING: UserWarning: .../ipython-dev/IPython/nbformat/current.py:19: IPython.nbformat.current is deprecated.
- use IPython.nbformat for read/write/validate public API
- use IPython.nbformat.vX directly to composing notebooks of a particular version
...
... WARNING: UserWarning: .../ipython-dev/IPython/nbformat/current.py:75: reads_json is deprecated, use reads
...
AttributeError: cells
```
This is fairly easily fixed and I will send a PR shortly.
</issue>
<code>
[start of nikola/plugins/compile/ipynb/__init__.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2013-2015 Damián Avila and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Implementation of compile_html based on nbconvert."""
28
29 from __future__ import unicode_literals, print_function
30 import io
31 import os
32
33 try:
34 from IPython.nbconvert.exporters import HTMLExporter
35 from IPython.nbformat import current as nbformat
36 from IPython.config import Config
37 flag = True
38 except ImportError:
39 flag = None
40
41 from nikola.plugin_categories import PageCompiler
42 from nikola.utils import makedirs, req_missing
43
44
45 class CompileIPynb(PageCompiler):
46 """Compile IPynb into HTML."""
47
48 name = "ipynb"
49 supports_onefile = False
50 demote_headers = True
51
52 def compile_html(self, source, dest, is_two_file=True):
53 if flag is None:
54 req_missing(['ipython>=1.1.0'], 'build this site (compile ipynb)')
55 makedirs(os.path.dirname(dest))
56 HTMLExporter.default_template = 'basic'
57 c = Config(self.site.config['IPYNB_CONFIG'])
58 exportHtml = HTMLExporter(config=c)
59 with io.open(dest, "w+", encoding="utf8") as out_file:
60 with io.open(source, "r", encoding="utf8") as in_file:
61 nb = in_file.read()
62 nb_json = nbformat.reads_json(nb)
63 (body, resources) = exportHtml.from_notebook_node(nb_json)
64 out_file.write(body)
65
66 def create_post(self, path, **kw):
67 content = kw.pop('content', None)
68 onefile = kw.pop('onefile', False)
69 # is_page is not needed to create the file
70 kw.pop('is_page', False)
71
72 makedirs(os.path.dirname(path))
73 if onefile:
74 raise Exception('The one-file format is not supported by this compiler.')
75 with io.open(path, "w+", encoding="utf8") as fd:
76 if not content.startswith("Write your"):
77 fd.write(content)
78 else:
79 fd.write("""{
80 "metadata": {
81 "name": ""
82 },
83 "nbformat": 3,
84 "nbformat_minor": 0,
85 "worksheets": [
86 {
87 "cells": [
88 {
89 "cell_type": "code",
90 "collapsed": false,
91 "input": [],
92 "language": "python",
93 "metadata": {},
94 "outputs": []
95 }
96 ],
97 "metadata": {}
98 }
99 ]
100 }""")
101
[end of nikola/plugins/compile/ipynb/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nikola/plugins/compile/ipynb/__init__.py b/nikola/plugins/compile/ipynb/__init__.py
--- a/nikola/plugins/compile/ipynb/__init__.py
+++ b/nikola/plugins/compile/ipynb/__init__.py
@@ -31,8 +31,15 @@
import os
try:
+ import IPython
from IPython.nbconvert.exporters import HTMLExporter
- from IPython.nbformat import current as nbformat
+ if IPython.version_info[0] >= 3: # API changed with 3.0.0
+ from IPython import nbformat
+ current_nbformat = nbformat.current_nbformat
+ else:
+ import IPython.nbformat.current as nbformat
+ current_nbformat = 'json'
+
from IPython.config import Config
flag = True
except ImportError:
@@ -58,8 +65,7 @@
exportHtml = HTMLExporter(config=c)
with io.open(dest, "w+", encoding="utf8") as out_file:
with io.open(source, "r", encoding="utf8") as in_file:
- nb = in_file.read()
- nb_json = nbformat.reads_json(nb)
+ nb_json = nbformat.read(in_file, current_nbformat)
(body, resources) = exportHtml.from_notebook_node(nb_json)
out_file.write(body)
|
{"golden_diff": "diff --git a/nikola/plugins/compile/ipynb/__init__.py b/nikola/plugins/compile/ipynb/__init__.py\n--- a/nikola/plugins/compile/ipynb/__init__.py\n+++ b/nikola/plugins/compile/ipynb/__init__.py\n@@ -31,8 +31,15 @@\n import os\n \n try:\n+ import IPython\n from IPython.nbconvert.exporters import HTMLExporter\n- from IPython.nbformat import current as nbformat\n+ if IPython.version_info[0] >= 3: # API changed with 3.0.0\n+ from IPython import nbformat\n+ current_nbformat = nbformat.current_nbformat\n+ else:\n+ import IPython.nbformat.current as nbformat\n+ current_nbformat = 'json'\n+\n from IPython.config import Config\n flag = True\n except ImportError:\n@@ -58,8 +65,7 @@\n exportHtml = HTMLExporter(config=c)\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n- nb = in_file.read()\n- nb_json = nbformat.reads_json(nb)\n+ nb_json = nbformat.read(in_file, current_nbformat)\n (body, resources) = exportHtml.from_notebook_node(nb_json)\n out_file.write(body)\n", "issue": "Build fails with IPython 3.0\nTrying to use ipython notebooks with the current dev version of IPython (3.0.0) fails building with some warnings etc. because the `nbformat` interface has changed a little:\n\n```\n...WARNING: UserWarning: .../ipython-dev/IPython/nbformat/current.py:19: IPython.nbformat.current is deprecated.\n\n- use IPython.nbformat for read/write/validate public API\n- use IPython.nbformat.vX directly to composing notebooks of a particular version\n...\n... WARNING: UserWarning: .../ipython-dev/IPython/nbformat/current.py:75: reads_json is deprecated, use reads\n...\nAttributeError: cells\n```\n\nThis is fairly easily fixed and I will send a PR shortly.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2015 Dami\u00e1n Avila and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Implementation of compile_html based on nbconvert.\"\"\"\n\nfrom __future__ import unicode_literals, print_function\nimport io\nimport os\n\ntry:\n from IPython.nbconvert.exporters import HTMLExporter\n from IPython.nbformat import current as nbformat\n from IPython.config import Config\n flag = True\nexcept ImportError:\n flag = None\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import makedirs, req_missing\n\n\nclass CompileIPynb(PageCompiler):\n \"\"\"Compile IPynb into HTML.\"\"\"\n\n name = \"ipynb\"\n supports_onefile = False\n demote_headers = True\n\n def compile_html(self, source, dest, is_two_file=True):\n if flag is None:\n req_missing(['ipython>=1.1.0'], 'build this site (compile ipynb)')\n makedirs(os.path.dirname(dest))\n HTMLExporter.default_template = 'basic'\n c = Config(self.site.config['IPYNB_CONFIG'])\n exportHtml = HTMLExporter(config=c)\n with io.open(dest, \"w+\", encoding=\"utf8\") as out_file:\n with io.open(source, \"r\", encoding=\"utf8\") as in_file:\n nb = in_file.read()\n nb_json = nbformat.reads_json(nb)\n (body, resources) = exportHtml.from_notebook_node(nb_json)\n out_file.write(body)\n\n def create_post(self, path, **kw):\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not needed to create the file\n kw.pop('is_page', False)\n\n makedirs(os.path.dirname(path))\n if onefile:\n raise Exception('The one-file format is not supported by this compiler.')\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if not content.startswith(\"Write your\"):\n fd.write(content)\n else:\n fd.write(\"\"\"{\n \"metadata\": {\n \"name\": \"\"\n },\n \"nbformat\": 3,\n \"nbformat_minor\": 0,\n \"worksheets\": [\n {\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"collapsed\": false,\n \"input\": [],\n \"language\": \"python\",\n \"metadata\": {},\n \"outputs\": []\n }\n ],\n \"metadata\": {}\n }\n ]\n}\"\"\")\n", "path": "nikola/plugins/compile/ipynb/__init__.py"}]}
| 1,688 | 316 |
gh_patches_debug_10289
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-5661
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Include crawl date in data
I'm looking at an old output directory, trying to workout which release it is.
I think we could add the crawl time and/or build id to the dataset attributes easily.
I think @rjw62 asked for this before. Which I promptly forgot. Sorry.
I'll look at this later or Monday.
</issue>
<code>
[start of locations/exporters/geojson.py]
1 import base64
2 import hashlib
3 import io
4 import json
5 import logging
6 import uuid
7
8 from scrapy.exporters import JsonItemExporter
9 from scrapy.utils.misc import walk_modules
10 from scrapy.utils.python import to_bytes
11 from scrapy.utils.spider import iter_spider_classes
12
13 from locations.settings import SPIDER_MODULES
14
15 mapping = (
16 ("addr_full", "addr:full"),
17 ("housenumber", "addr:housenumber"),
18 ("street", "addr:street"),
19 ("street_address", "addr:street_address"),
20 ("city", "addr:city"),
21 ("state", "addr:state"),
22 ("postcode", "addr:postcode"),
23 ("country", "addr:country"),
24 ("name", "name"),
25 ("phone", "phone"),
26 ("website", "website"),
27 ("twitter", "contact:twitter"),
28 ("facebook", "contact:facebook"),
29 ("email", "contact:email"),
30 ("opening_hours", "opening_hours"),
31 ("image", "image"),
32 ("brand", "brand"),
33 ("brand_wikidata", "brand:wikidata"),
34 ("located_in", "located_in"),
35 ("located_in_wikidata", "located_in:wikidata"),
36 ("nsi_id", "nsi_id"),
37 )
38
39
40 def item_to_properties(item):
41 props = {}
42
43 # Ref is required, unless `no_refs = True` is set in spider
44 if ref := item.get("ref"):
45 props["ref"] = str(ref)
46
47 # Add in the extra bits
48 if extras := item.get("extras"):
49 for key, value in extras.items():
50 if value:
51 # Only export populated values
52 props[key] = value
53
54 # Bring in the optional stuff
55 for map_from, map_to in mapping:
56 if item_value := item.get(map_from):
57 props[map_to] = item_value
58
59 return props
60
61
62 def compute_hash(item):
63 ref = str(item.get("ref") or uuid.uuid1()).encode("utf8")
64 sha1 = hashlib.sha1(ref)
65
66 if spider_name := item.get("extras", {}).get("@spider"):
67 sha1.update(spider_name.encode("utf8"))
68
69 return base64.urlsafe_b64encode(sha1.digest()).decode("utf8")
70
71
72 def find_spider_class(spider_name):
73 if not spider_name:
74 return None
75 for mod in SPIDER_MODULES:
76 for module in walk_modules(mod):
77 for spider_class in iter_spider_classes(module):
78 if spider_name == spider_class.name:
79 return spider_class
80 return None
81
82
83 def get_dataset_attributes(spider_name) -> {}:
84 spider_class = find_spider_class(spider_name)
85 dataset_attributes = getattr(spider_class, "dataset_attributes", {})
86 settings = getattr(spider_class, "custom_settings", {}) or {}
87 if not settings.get("ROBOTSTXT_OBEY", True):
88 # See https://github.com/alltheplaces/alltheplaces/issues/4537
89 dataset_attributes["spider:robots_txt"] = "ignored"
90 dataset_attributes["@spider"] = spider_name
91
92 return dataset_attributes
93
94
95 class GeoJsonExporter(JsonItemExporter):
96 def __init__(self, file, **kwargs):
97 super().__init__(file, **kwargs)
98 self.spider_name = None
99
100 def start_exporting(self):
101 pass
102
103 def export_item(self, item):
104 spider_name = item.get("extras", {}).get("@spider")
105 if self.first_item:
106 self.spider_name = spider_name
107 self.write_geojson_header()
108 if spider_name != self.spider_name:
109 # It really should not happen that a single exporter instance
110 # handles output from different spiders. If it does happen,
111 # we rather crash than emit GeoJSON with the wrong dataset
112 # properties, which may include legally relevant license tags.
113 raise ValueError(
114 f"harvest from multiple spiders ({spider_name, self.spider_name}) cannot be written to same GeoJSON file"
115 )
116
117 super().export_item(item)
118
119 def _get_serialized_fields(self, item, default_value=None, include_empty=None):
120 feature = []
121 feature.append(("type", "Feature"))
122 feature.append(("id", compute_hash(item)))
123 feature.append(("properties", item_to_properties(item)))
124
125 lat = item.get("lat")
126 lon = item.get("lon")
127 geometry = item.get("geometry")
128 if lat and lon and not geometry:
129 try:
130 geometry = {
131 "type": "Point",
132 "coordinates": [float(item["lon"]), float(item["lat"])],
133 }
134 except ValueError:
135 logging.warning("Couldn't convert lat (%s) and lon (%s) to float", lat, lon)
136 feature.append(("geometry", geometry))
137
138 return feature
139
140 def write_geojson_header(self):
141 header = io.StringIO()
142 header.write('{"type":"FeatureCollection","dataset_attributes":')
143 json.dump(
144 get_dataset_attributes(self.spider_name), header, ensure_ascii=False, separators=(",", ":"), sort_keys=True
145 )
146 header.write(',"features":[\n')
147 self.file.write(to_bytes(header.getvalue(), self.encoding))
148
149 def finish_exporting(self):
150 self.file.write(b"\n]}\n")
151
[end of locations/exporters/geojson.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/locations/exporters/geojson.py b/locations/exporters/geojson.py
--- a/locations/exporters/geojson.py
+++ b/locations/exporters/geojson.py
@@ -1,4 +1,5 @@
import base64
+import datetime
import hashlib
import io
import json
@@ -88,6 +89,7 @@
# See https://github.com/alltheplaces/alltheplaces/issues/4537
dataset_attributes["spider:robots_txt"] = "ignored"
dataset_attributes["@spider"] = spider_name
+ dataset_attributes["spider:collection_time"] = datetime.datetime.now().isoformat()
return dataset_attributes
|
{"golden_diff": "diff --git a/locations/exporters/geojson.py b/locations/exporters/geojson.py\n--- a/locations/exporters/geojson.py\n+++ b/locations/exporters/geojson.py\n@@ -1,4 +1,5 @@\n import base64\n+import datetime\n import hashlib\n import io\n import json\n@@ -88,6 +89,7 @@\n # See https://github.com/alltheplaces/alltheplaces/issues/4537\n dataset_attributes[\"spider:robots_txt\"] = \"ignored\"\n dataset_attributes[\"@spider\"] = spider_name\n+ dataset_attributes[\"spider:collection_time\"] = datetime.datetime.now().isoformat()\n \n return dataset_attributes\n", "issue": "Include crawl date in data\nI'm looking at an old output directory, trying to workout which release it is.\r\n\r\nI think we could add the crawl time and/or build id to the dataset attributes easily.\r\n\r\nI think @rjw62 asked for this before. Which I promptly forgot. Sorry.\r\n\r\nI'll look at this later or Monday.\n", "before_files": [{"content": "import base64\nimport hashlib\nimport io\nimport json\nimport logging\nimport uuid\n\nfrom scrapy.exporters import JsonItemExporter\nfrom scrapy.utils.misc import walk_modules\nfrom scrapy.utils.python import to_bytes\nfrom scrapy.utils.spider import iter_spider_classes\n\nfrom locations.settings import SPIDER_MODULES\n\nmapping = (\n (\"addr_full\", \"addr:full\"),\n (\"housenumber\", \"addr:housenumber\"),\n (\"street\", \"addr:street\"),\n (\"street_address\", \"addr:street_address\"),\n (\"city\", \"addr:city\"),\n (\"state\", \"addr:state\"),\n (\"postcode\", \"addr:postcode\"),\n (\"country\", \"addr:country\"),\n (\"name\", \"name\"),\n (\"phone\", \"phone\"),\n (\"website\", \"website\"),\n (\"twitter\", \"contact:twitter\"),\n (\"facebook\", \"contact:facebook\"),\n (\"email\", \"contact:email\"),\n (\"opening_hours\", \"opening_hours\"),\n (\"image\", \"image\"),\n (\"brand\", \"brand\"),\n (\"brand_wikidata\", \"brand:wikidata\"),\n (\"located_in\", \"located_in\"),\n (\"located_in_wikidata\", \"located_in:wikidata\"),\n (\"nsi_id\", \"nsi_id\"),\n)\n\n\ndef item_to_properties(item):\n props = {}\n\n # Ref is required, unless `no_refs = True` is set in spider\n if ref := item.get(\"ref\"):\n props[\"ref\"] = str(ref)\n\n # Add in the extra bits\n if extras := item.get(\"extras\"):\n for key, value in extras.items():\n if value:\n # Only export populated values\n props[key] = value\n\n # Bring in the optional stuff\n for map_from, map_to in mapping:\n if item_value := item.get(map_from):\n props[map_to] = item_value\n\n return props\n\n\ndef compute_hash(item):\n ref = str(item.get(\"ref\") or uuid.uuid1()).encode(\"utf8\")\n sha1 = hashlib.sha1(ref)\n\n if spider_name := item.get(\"extras\", {}).get(\"@spider\"):\n sha1.update(spider_name.encode(\"utf8\"))\n\n return base64.urlsafe_b64encode(sha1.digest()).decode(\"utf8\")\n\n\ndef find_spider_class(spider_name):\n if not spider_name:\n return None\n for mod in SPIDER_MODULES:\n for module in walk_modules(mod):\n for spider_class in iter_spider_classes(module):\n if spider_name == spider_class.name:\n return spider_class\n return None\n\n\ndef get_dataset_attributes(spider_name) -> {}:\n spider_class = find_spider_class(spider_name)\n dataset_attributes = getattr(spider_class, \"dataset_attributes\", {})\n settings = getattr(spider_class, \"custom_settings\", {}) or {}\n if not settings.get(\"ROBOTSTXT_OBEY\", True):\n # See https://github.com/alltheplaces/alltheplaces/issues/4537\n dataset_attributes[\"spider:robots_txt\"] = \"ignored\"\n dataset_attributes[\"@spider\"] = spider_name\n\n return dataset_attributes\n\n\nclass GeoJsonExporter(JsonItemExporter):\n def __init__(self, file, **kwargs):\n super().__init__(file, **kwargs)\n self.spider_name = None\n\n def start_exporting(self):\n pass\n\n def export_item(self, item):\n spider_name = item.get(\"extras\", {}).get(\"@spider\")\n if self.first_item:\n self.spider_name = spider_name\n self.write_geojson_header()\n if spider_name != self.spider_name:\n # It really should not happen that a single exporter instance\n # handles output from different spiders. If it does happen,\n # we rather crash than emit GeoJSON with the wrong dataset\n # properties, which may include legally relevant license tags.\n raise ValueError(\n f\"harvest from multiple spiders ({spider_name, self.spider_name}) cannot be written to same GeoJSON file\"\n )\n\n super().export_item(item)\n\n def _get_serialized_fields(self, item, default_value=None, include_empty=None):\n feature = []\n feature.append((\"type\", \"Feature\"))\n feature.append((\"id\", compute_hash(item)))\n feature.append((\"properties\", item_to_properties(item)))\n\n lat = item.get(\"lat\")\n lon = item.get(\"lon\")\n geometry = item.get(\"geometry\")\n if lat and lon and not geometry:\n try:\n geometry = {\n \"type\": \"Point\",\n \"coordinates\": [float(item[\"lon\"]), float(item[\"lat\"])],\n }\n except ValueError:\n logging.warning(\"Couldn't convert lat (%s) and lon (%s) to float\", lat, lon)\n feature.append((\"geometry\", geometry))\n\n return feature\n\n def write_geojson_header(self):\n header = io.StringIO()\n header.write('{\"type\":\"FeatureCollection\",\"dataset_attributes\":')\n json.dump(\n get_dataset_attributes(self.spider_name), header, ensure_ascii=False, separators=(\",\", \":\"), sort_keys=True\n )\n header.write(',\"features\":[\\n')\n self.file.write(to_bytes(header.getvalue(), self.encoding))\n\n def finish_exporting(self):\n self.file.write(b\"\\n]}\\n\")\n", "path": "locations/exporters/geojson.py"}]}
| 2,096 | 154 |
gh_patches_debug_11428
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-11825
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Unable to update Warehouse address
### What are you trying to achieve?
I'm trying to update the warehouse update, with the country set to "UK", according to addressValidationRules query, the required fields are
```
streetAddress1",
"city",
"postalCode"
```
### Steps to reproduce the problem
1. In shipping zone update/creating a new on select country UK
2. Fill all fields with the necessary information
3. Try to save changes
### What did you expect to happen?
Being able to update the warehouse address properly.
### Logs
Api responds with error -> Error code REQUIRED on field countryAreaAPI
### Environment
Saleor version: 3.10
</issue>
<code>
[start of saleor/account/forms.py]
1 from phonenumbers.phonenumberutil import country_code_for_region
2
3 from .i18n import AddressMetaForm, get_address_form_class
4
5
6 def get_address_form(
7 data, country_code, initial=None, instance=None, enable_normalization=True, **kwargs
8 ):
9 country_form = AddressMetaForm(data, initial=initial)
10 if country_form.is_valid():
11 country_code = country_form.cleaned_data["country"]
12
13 if initial is None and country_code:
14 initial = {}
15 if country_code:
16 initial["phone"] = "+{}".format(country_code_for_region(country_code))
17
18 address_form_class = get_address_form_class(country_code)
19
20 if instance is not None:
21 address_form_class = get_address_form_class(instance.country.code)
22 address_form = address_form_class(
23 data, instance=instance, enable_normalization=enable_normalization, **kwargs
24 )
25 else:
26 initial_address = initial
27 address_form = address_form_class(
28 data or None,
29 initial=initial_address,
30 enable_normalization=enable_normalization,
31 **kwargs,
32 )
33
34 if hasattr(address_form.fields["country_area"], "choices"):
35 choices = address_form.fields["country_area"].choices
36 choices = [(choice[1], choice[1]) for choice in choices]
37 address_form.fields["country_area"].choices = choices
38 return address_form
39
[end of saleor/account/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/account/forms.py b/saleor/account/forms.py
--- a/saleor/account/forms.py
+++ b/saleor/account/forms.py
@@ -14,11 +14,9 @@
initial = {}
if country_code:
initial["phone"] = "+{}".format(country_code_for_region(country_code))
-
address_form_class = get_address_form_class(country_code)
if instance is not None:
- address_form_class = get_address_form_class(instance.country.code)
address_form = address_form_class(
data, instance=instance, enable_normalization=enable_normalization, **kwargs
)
|
{"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -14,11 +14,9 @@\n initial = {}\n if country_code:\n initial[\"phone\"] = \"+{}\".format(country_code_for_region(country_code))\n-\n address_form_class = get_address_form_class(country_code)\n \n if instance is not None:\n- address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(\n data, instance=instance, enable_normalization=enable_normalization, **kwargs\n )\n", "issue": "Bug: Unable to update Warehouse address\n### What are you trying to achieve?\n\nI'm trying to update the warehouse update, with the country set to \"UK\", according to addressValidationRules query, the required fields are \r\n```\r\nstreetAddress1\",\r\n\"city\",\r\n\"postalCode\"\r\n```\n\n### Steps to reproduce the problem\n\n1. In shipping zone update/creating a new on select country UK\r\n2. Fill all fields with the necessary information\r\n3. Try to save changes\n\n### What did you expect to happen?\n\nBeing able to update the warehouse address properly.\n\n### Logs\n\nApi responds with error -> Error code REQUIRED on field countryAreaAPI\n\n### Environment\n\nSaleor version: 3.10\r\n\n", "before_files": [{"content": "from phonenumbers.phonenumberutil import country_code_for_region\n\nfrom .i18n import AddressMetaForm, get_address_form_class\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, enable_normalization=True, **kwargs\n):\n country_form = AddressMetaForm(data, initial=initial)\n if country_form.is_valid():\n country_code = country_form.cleaned_data[\"country\"]\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial[\"phone\"] = \"+{}\".format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if instance is not None:\n address_form_class = get_address_form_class(instance.country.code)\n address_form = address_form_class(\n data, instance=instance, enable_normalization=enable_normalization, **kwargs\n )\n else:\n initial_address = initial\n address_form = address_form_class(\n data or None,\n initial=initial_address,\n enable_normalization=enable_normalization,\n **kwargs,\n )\n\n if hasattr(address_form.fields[\"country_area\"], \"choices\"):\n choices = address_form.fields[\"country_area\"].choices\n choices = [(choice[1], choice[1]) for choice in choices]\n address_form.fields[\"country_area\"].choices = choices\n return address_form\n", "path": "saleor/account/forms.py"}]}
| 1,040 | 137 |
gh_patches_debug_35033
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-2635
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
feat: LightGlue-ONNX
#### Changes
<!-- Please include a summary of the change and which issue is fixed. -->
This PR adds a wrapper class for loading and running LightGlue-ONNX models via ONNXRuntime.
<!-- Please also include relevant motivation and context. -->
Re: https://github.com/fabio-sim/LightGlue-ONNX/issues/40
<!-- List any dependencies that are required for this change. -->
`onnxruntime-gpu>=1.16` is required as a dependency to instantiate the new `OnnxLightGlue` class. (Importing it will still work without installing).
related to #2559
#### Type of change
<!-- Please delete options that are not relevant. -->
- [x] 📚 Documentation Update
- [x] 🧪 Tests Cases
- [x] 🔬 New feature (non-breaking change which adds functionality)
- [x] 📝 This change requires a documentation update
#### Checklist
- [x] My code follows the style guidelines of this project
- [x] I have performed a self-review of my own code
- [x] I have commented my code, particularly in hard-to-understand areas
- [x] I have made corresponding changes to the documentation
- [x] My changes generate no new warnings
- [ ] Did you update CHANGELOG in case of a major change?
#### Example Usage
Sample images can be found [here](https://github.com/fabio-sim/LightGlue-ONNX/tree/main/assets).
```python
import torch
from kornia.feature import DISK, OnnxLightGlue
from kornia.io import ImageLoadType, load_image
device = torch.device("cuda")
img0 = load_image("sacre_coeur1.jpg", ImageLoadType.RGB32, device=device)[None]
img1 = load_image("sacre_coeur2.jpg", ImageLoadType.RGB32, device=device)[None]
extractor = DISK.from_pretrained("depth", device=device).eval().to(device)
data = {}
with torch.no_grad():
for key, img in [("image0", img0), ("image1", img1)]:
features = extractor(img, n=None, window_size=5, score_threshold=0.0, pad_if_not_divisible=True)
data[key] = {
"image": img,
"keypoints": features[0].keypoints[None],
"keypoint_scores": features[0].detection_scores[None],
"descriptors": features[0].descriptors[None],
}
matcher = OnnxLightGlue(weights="disk_fp16", device=device)
result = matcher(data)
print(result)
```
</issue>
<code>
[start of kornia/feature/lightglue_onnx/lightglue.py]
1 from __future__ import annotations
2
3 from typing import ClassVar
4
5 import torch
6
7 from kornia.core import Device, Tensor
8 from kornia.core.check import KORNIA_CHECK, KORNIA_CHECK_SAME_DEVICES, KORNIA_CHECK_SHAPE
9
10 from .utils import download_onnx_from_url, normalize_keypoints
11
12 try:
13 import numpy as np
14 import onnxruntime as ort
15 except ImportError:
16 np = None # type: ignore
17 ort = None
18
19 __all__ = ["OnnxLightGlue"]
20
21
22 class OnnxLightGlue:
23 r"""Wrapper for loading LightGlue-ONNX models and running inference via ONNXRuntime.
24
25 LightGlue :cite:`LightGlue2023` performs fast descriptor-based deep keypoint matching.
26 This module requires `onnxruntime` to be installed.
27
28 If you have trained your own LightGlue model, see https://github.com/fabio-sim/LightGlue-ONNX
29 for how to export the model to ONNX and optimize it.
30
31 Args:
32 weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights are:
33 `disk`, `superpoint`, `disk_fp16`, and `superpoint_fp16`. Defaults to `disk_fp16`.
34 device: Device to run inference on. Currently, only `cuda` is supported. Defaults to `cuda`.
35 """
36 MODEL_URLS: ClassVar[dict[str, str]] = {
37 "disk": "https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused.onnx",
38 "superpoint": "https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/superpoint_lightglue_fused.onnx",
39 "disk_fp16": "https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused_fp16.onnx",
40 "superpoint_fp16": "https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/superpoint_lightglue_fused_fp16.onnx",
41 }
42
43 required_data_keys: ClassVar[list[str]] = ["image0", "image1"]
44
45 def __init__(self, weights: str = "disk_fp16", device: Device = None) -> None:
46 KORNIA_CHECK(ort is not None, "onnxruntime is not installed.")
47 KORNIA_CHECK(np is not None, "numpy is not installed.")
48
49 if device is None:
50 device = torch.device("cuda")
51 elif isinstance(device, str):
52 device = torch.device(device)
53 self.device = device
54
55 if device.type == "cpu":
56 raise NotImplementedError("CPUExecutionProvider is not supported yet for Multihead-Attention op.")
57 elif device.type == "cuda":
58 providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
59 else:
60 raise ValueError(f"Unsupported device {device}")
61
62 if weights in self.MODEL_URLS:
63 weights = download_onnx_from_url(self.MODEL_URLS[weights])
64
65 self.session = ort.InferenceSession(weights, providers=providers)
66
67 def __call__(self, data: dict[str, dict[str, Tensor]]) -> dict[str, Tensor]:
68 return self.forward(data)
69
70 def forward(self, data: dict[str, dict[str, Tensor]]) -> dict[str, Tensor]:
71 r"""Match keypoints and descriptors between two images.
72
73 The output contains the matches (the indices of the matching keypoint pairs between the first and second image)
74 and the corresponding confidence scores.
75 Only a batch size of 1 is supported.
76
77 Args:
78 data: Dictionary containing both images and the keypoints and descriptors thereof.
79
80 Returns:
81 output: Dictionary containing the following matches and scores.
82
83 `data`:
84 image0: dict
85 keypoints (`float32`): [1 x M x 2]
86 descriptors (`float32`): [1 x M x D]
87 image: [1 x C x H x W] or image_size: [1 x 2]
88 image1: dict
89 keypoints (`float32`): [1 x N x 2]
90 descriptors (`float32`): [1 x N x D]
91 image: [1 x C x H x W] or image_size: [1 x 2]
92
93 `output`:
94 matches (`int64`): [S x 2]
95 scores (`float32`): [S]
96 """
97 # Input validation.
98 for key in self.required_data_keys:
99 KORNIA_CHECK(key in data, f'Missing key {key} in data')
100 data0, data1 = data['image0'], data['image1']
101 kpts0_, kpts1_ = data0['keypoints'].contiguous(), data1['keypoints'].contiguous()
102 desc0, desc1 = data0['descriptors'].contiguous(), data1['descriptors'].contiguous()
103 KORNIA_CHECK_SAME_DEVICES([kpts0_, desc0, kpts1_, desc1], "Wrong device")
104 KORNIA_CHECK(kpts0_.device.type == self.device.type, "Wrong device")
105 KORNIA_CHECK(torch.float32 == kpts0_.dtype == kpts1_.dtype == desc0.dtype == desc1.dtype, "Wrong dtype")
106 KORNIA_CHECK_SHAPE(kpts0_, ["1", "M", "2"])
107 KORNIA_CHECK_SHAPE(kpts1_, ["1", "N", "2"])
108 KORNIA_CHECK_SHAPE(desc0, ["1", "M", "D"])
109 KORNIA_CHECK_SHAPE(desc1, ["1", "N", "D"])
110 KORNIA_CHECK(kpts0_.shape[1] == desc0.shape[1], "Number of keypoints does not match number of descriptors")
111 KORNIA_CHECK(kpts1_.shape[1] == desc1.shape[1], "Number of keypoints does not match number of descriptors")
112 KORNIA_CHECK(desc0.shape[2] == desc1.shape[2], "Descriptors' dimensions do not match")
113
114 # Normalize keypoints.
115 size0, size1 = data0.get('image_size'), data1.get('image_size')
116 size0 = size0 if size0 is not None else data0['image'].shape[-2:][::-1] # type: ignore
117 size1 = size1 if size1 is not None else data1['image'].shape[-2:][::-1] # type: ignore
118
119 kpts0 = normalize_keypoints(kpts0_, size=size0) # type: ignore
120 kpts1 = normalize_keypoints(kpts1_, size=size1) # type: ignore
121
122 KORNIA_CHECK(torch.all(kpts0 >= -1).item() and torch.all(kpts0 <= 1).item(), "") # type: ignore
123 KORNIA_CHECK(torch.all(kpts1 >= -1).item() and torch.all(kpts1 <= 1).item(), "") # type: ignore
124
125 # Inference.
126 lightglue_inputs = {"kpts0": kpts0, "kpts1": kpts1, "desc0": desc0, "desc1": desc1}
127 lightglue_outputs = ["matches0", "mscores0"]
128 binding = self.session.io_binding()
129
130 for name, tensor in lightglue_inputs.items():
131 binding.bind_input(
132 name,
133 device_type=self.device.type,
134 device_id=0,
135 element_type=np.float32,
136 shape=tuple(tensor.shape),
137 buffer_ptr=tensor.data_ptr(),
138 )
139
140 for name in lightglue_outputs:
141 binding.bind_output(name, device_type=self.device.type, device_id=0)
142
143 self.session.run_with_iobinding(binding)
144
145 matches, mscores = binding.get_outputs()
146
147 # TODO: The following is an unnecessary copy. Replace with a better solution when torch supports
148 # constructing a tensor from a data pointer, or when ORT supports converting to torch tensor.
149 # https://github.com/microsoft/onnxruntime/issues/15963
150 outputs = {
151 "matches": torch.from_dlpack(matches.numpy()).to(self.device),
152 "scores": torch.from_dlpack(mscores.numpy()).to(self.device),
153 }
154 return outputs
155
[end of kornia/feature/lightglue_onnx/lightglue.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kornia/feature/lightglue_onnx/lightglue.py b/kornia/feature/lightglue_onnx/lightglue.py
--- a/kornia/feature/lightglue_onnx/lightglue.py
+++ b/kornia/feature/lightglue_onnx/lightglue.py
@@ -29,9 +29,9 @@
for how to export the model to ONNX and optimize it.
Args:
- weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights are:
- `disk`, `superpoint`, `disk_fp16`, and `superpoint_fp16`. Defaults to `disk_fp16`.
- device: Device to run inference on. Currently, only `cuda` is supported. Defaults to `cuda`.
+ weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights
+ are ``'disk'``, ``'superpoint'``, ``'disk_fp16'``, and ``'superpoint_fp16'``.
+ device: Device to run inference on. Currently, only ``'cuda'`` is supported. Defaults to ``'cuda'``.
"""
MODEL_URLS: ClassVar[dict[str, str]] = {
"disk": "https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused.onnx",
@@ -78,21 +78,27 @@
data: Dictionary containing both images and the keypoints and descriptors thereof.
Returns:
- output: Dictionary containing the following matches and scores.
-
- `data`:
- image0: dict
- keypoints (`float32`): [1 x M x 2]
- descriptors (`float32`): [1 x M x D]
- image: [1 x C x H x W] or image_size: [1 x 2]
- image1: dict
- keypoints (`float32`): [1 x N x 2]
- descriptors (`float32`): [1 x N x D]
- image: [1 x C x H x W] or image_size: [1 x 2]
-
- `output`:
- matches (`int64`): [S x 2]
- scores (`float32`): [S]
+ Dictionary containing the matches and scores.
+
+ ``data`` (``dict``):
+ ``image0`` (``dict``):
+ ``keypoints`` (`float32`): :math:`(1, M, 2)`
+
+ ``descriptors`` (`float32`): :math:`(1, M, D)`
+
+ ``image``: :math:`(1, C, H, W)` or ``image_size``: :math:`(1, 2)`
+
+ ``image1`` (``dict``):
+ ``keypoints`` (`float32`): :math:`(1, N, 2)`
+
+ ``descriptors`` (`float32`): :math:`(1, N, D)`
+
+ ``image``: :math:`(1, C, H, W)` or ``image_size``: :math:`(1, 2)`
+
+ ``output`` (``dict``):
+ ``matches`` (`int64`): :math:`(S, 2)`
+
+ ``scores`` (`float32`): :math:`(S)`
"""
# Input validation.
for key in self.required_data_keys:
|
{"golden_diff": "diff --git a/kornia/feature/lightglue_onnx/lightglue.py b/kornia/feature/lightglue_onnx/lightglue.py\n--- a/kornia/feature/lightglue_onnx/lightglue.py\n+++ b/kornia/feature/lightglue_onnx/lightglue.py\n@@ -29,9 +29,9 @@\n for how to export the model to ONNX and optimize it.\n \n Args:\n- weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights are:\n- `disk`, `superpoint`, `disk_fp16`, and `superpoint_fp16`. Defaults to `disk_fp16`.\n- device: Device to run inference on. Currently, only `cuda` is supported. Defaults to `cuda`.\n+ weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights\n+ are ``'disk'``, ``'superpoint'``, ``'disk_fp16'``, and ``'superpoint_fp16'``.\n+ device: Device to run inference on. Currently, only ``'cuda'`` is supported. Defaults to ``'cuda'``.\n \"\"\"\n MODEL_URLS: ClassVar[dict[str, str]] = {\n \"disk\": \"https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused.onnx\",\n@@ -78,21 +78,27 @@\n data: Dictionary containing both images and the keypoints and descriptors thereof.\n \n Returns:\n- output: Dictionary containing the following matches and scores.\n-\n- `data`:\n- image0: dict\n- keypoints (`float32`): [1 x M x 2]\n- descriptors (`float32`): [1 x M x D]\n- image: [1 x C x H x W] or image_size: [1 x 2]\n- image1: dict\n- keypoints (`float32`): [1 x N x 2]\n- descriptors (`float32`): [1 x N x D]\n- image: [1 x C x H x W] or image_size: [1 x 2]\n-\n- `output`:\n- matches (`int64`): [S x 2]\n- scores (`float32`): [S]\n+ Dictionary containing the matches and scores.\n+\n+ ``data`` (``dict``):\n+ ``image0`` (``dict``):\n+ ``keypoints`` (`float32`): :math:`(1, M, 2)`\n+\n+ ``descriptors`` (`float32`): :math:`(1, M, D)`\n+\n+ ``image``: :math:`(1, C, H, W)` or ``image_size``: :math:`(1, 2)`\n+\n+ ``image1`` (``dict``):\n+ ``keypoints`` (`float32`): :math:`(1, N, 2)`\n+\n+ ``descriptors`` (`float32`): :math:`(1, N, D)`\n+\n+ ``image``: :math:`(1, C, H, W)` or ``image_size``: :math:`(1, 2)`\n+\n+ ``output`` (``dict``):\n+ ``matches`` (`int64`): :math:`(S, 2)`\n+\n+ ``scores`` (`float32`): :math:`(S)`\n \"\"\"\n # Input validation.\n for key in self.required_data_keys:\n", "issue": "feat: LightGlue-ONNX\n#### Changes\r\n<!-- Please include a summary of the change and which issue is fixed. -->\r\nThis PR adds a wrapper class for loading and running LightGlue-ONNX models via ONNXRuntime.\r\n<!-- Please also include relevant motivation and context. -->\r\nRe: https://github.com/fabio-sim/LightGlue-ONNX/issues/40\r\n<!-- List any dependencies that are required for this change. -->\r\n`onnxruntime-gpu>=1.16` is required as a dependency to instantiate the new `OnnxLightGlue` class. (Importing it will still work without installing).\r\nrelated to #2559 \r\n\r\n#### Type of change\r\n<!-- Please delete options that are not relevant. -->\r\n- [x] \ud83d\udcda Documentation Update\r\n- [x] \ud83e\uddea Tests Cases\r\n- [x] \ud83d\udd2c New feature (non-breaking change which adds functionality)\r\n- [x] \ud83d\udcdd This change requires a documentation update\r\n\r\n#### Checklist\r\n\r\n- [x] My code follows the style guidelines of this project\r\n- [x] I have performed a self-review of my own code\r\n- [x] I have commented my code, particularly in hard-to-understand areas\r\n- [x] I have made corresponding changes to the documentation\r\n- [x] My changes generate no new warnings\r\n- [ ] Did you update CHANGELOG in case of a major change?\r\n\r\n#### Example Usage\r\nSample images can be found [here](https://github.com/fabio-sim/LightGlue-ONNX/tree/main/assets).\r\n```python\r\nimport torch\r\n\r\nfrom kornia.feature import DISK, OnnxLightGlue\r\nfrom kornia.io import ImageLoadType, load_image\r\n\r\ndevice = torch.device(\"cuda\")\r\n\r\nimg0 = load_image(\"sacre_coeur1.jpg\", ImageLoadType.RGB32, device=device)[None]\r\nimg1 = load_image(\"sacre_coeur2.jpg\", ImageLoadType.RGB32, device=device)[None]\r\n\r\nextractor = DISK.from_pretrained(\"depth\", device=device).eval().to(device)\r\n\r\ndata = {}\r\nwith torch.no_grad():\r\n for key, img in [(\"image0\", img0), (\"image1\", img1)]:\r\n features = extractor(img, n=None, window_size=5, score_threshold=0.0, pad_if_not_divisible=True)\r\n data[key] = {\r\n \"image\": img,\r\n \"keypoints\": features[0].keypoints[None],\r\n \"keypoint_scores\": features[0].detection_scores[None],\r\n \"descriptors\": features[0].descriptors[None],\r\n }\r\n\r\nmatcher = OnnxLightGlue(weights=\"disk_fp16\", device=device)\r\nresult = matcher(data)\r\nprint(result)\r\n```\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import ClassVar\n\nimport torch\n\nfrom kornia.core import Device, Tensor\nfrom kornia.core.check import KORNIA_CHECK, KORNIA_CHECK_SAME_DEVICES, KORNIA_CHECK_SHAPE\n\nfrom .utils import download_onnx_from_url, normalize_keypoints\n\ntry:\n import numpy as np\n import onnxruntime as ort\nexcept ImportError:\n np = None # type: ignore\n ort = None\n\n__all__ = [\"OnnxLightGlue\"]\n\n\nclass OnnxLightGlue:\n r\"\"\"Wrapper for loading LightGlue-ONNX models and running inference via ONNXRuntime.\n\n LightGlue :cite:`LightGlue2023` performs fast descriptor-based deep keypoint matching.\n This module requires `onnxruntime` to be installed.\n\n If you have trained your own LightGlue model, see https://github.com/fabio-sim/LightGlue-ONNX\n for how to export the model to ONNX and optimize it.\n\n Args:\n weights: Pretrained weights, or a path to your own exported ONNX model. Available pretrained weights are:\n `disk`, `superpoint`, `disk_fp16`, and `superpoint_fp16`. Defaults to `disk_fp16`.\n device: Device to run inference on. Currently, only `cuda` is supported. Defaults to `cuda`.\n \"\"\"\n MODEL_URLS: ClassVar[dict[str, str]] = {\n \"disk\": \"https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused.onnx\",\n \"superpoint\": \"https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/superpoint_lightglue_fused.onnx\",\n \"disk_fp16\": \"https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/disk_lightglue_fused_fp16.onnx\",\n \"superpoint_fp16\": \"https://github.com/fabio-sim/LightGlue-ONNX/releases/download/v1.0.0/superpoint_lightglue_fused_fp16.onnx\",\n }\n\n required_data_keys: ClassVar[list[str]] = [\"image0\", \"image1\"]\n\n def __init__(self, weights: str = \"disk_fp16\", device: Device = None) -> None:\n KORNIA_CHECK(ort is not None, \"onnxruntime is not installed.\")\n KORNIA_CHECK(np is not None, \"numpy is not installed.\")\n\n if device is None:\n device = torch.device(\"cuda\")\n elif isinstance(device, str):\n device = torch.device(device)\n self.device = device\n\n if device.type == \"cpu\":\n raise NotImplementedError(\"CPUExecutionProvider is not supported yet for Multihead-Attention op.\")\n elif device.type == \"cuda\":\n providers = [\"CUDAExecutionProvider\", \"CPUExecutionProvider\"]\n else:\n raise ValueError(f\"Unsupported device {device}\")\n\n if weights in self.MODEL_URLS:\n weights = download_onnx_from_url(self.MODEL_URLS[weights])\n\n self.session = ort.InferenceSession(weights, providers=providers)\n\n def __call__(self, data: dict[str, dict[str, Tensor]]) -> dict[str, Tensor]:\n return self.forward(data)\n\n def forward(self, data: dict[str, dict[str, Tensor]]) -> dict[str, Tensor]:\n r\"\"\"Match keypoints and descriptors between two images.\n\n The output contains the matches (the indices of the matching keypoint pairs between the first and second image)\n and the corresponding confidence scores.\n Only a batch size of 1 is supported.\n\n Args:\n data: Dictionary containing both images and the keypoints and descriptors thereof.\n\n Returns:\n output: Dictionary containing the following matches and scores.\n\n `data`:\n image0: dict\n keypoints (`float32`): [1 x M x 2]\n descriptors (`float32`): [1 x M x D]\n image: [1 x C x H x W] or image_size: [1 x 2]\n image1: dict\n keypoints (`float32`): [1 x N x 2]\n descriptors (`float32`): [1 x N x D]\n image: [1 x C x H x W] or image_size: [1 x 2]\n\n `output`:\n matches (`int64`): [S x 2]\n scores (`float32`): [S]\n \"\"\"\n # Input validation.\n for key in self.required_data_keys:\n KORNIA_CHECK(key in data, f'Missing key {key} in data')\n data0, data1 = data['image0'], data['image1']\n kpts0_, kpts1_ = data0['keypoints'].contiguous(), data1['keypoints'].contiguous()\n desc0, desc1 = data0['descriptors'].contiguous(), data1['descriptors'].contiguous()\n KORNIA_CHECK_SAME_DEVICES([kpts0_, desc0, kpts1_, desc1], \"Wrong device\")\n KORNIA_CHECK(kpts0_.device.type == self.device.type, \"Wrong device\")\n KORNIA_CHECK(torch.float32 == kpts0_.dtype == kpts1_.dtype == desc0.dtype == desc1.dtype, \"Wrong dtype\")\n KORNIA_CHECK_SHAPE(kpts0_, [\"1\", \"M\", \"2\"])\n KORNIA_CHECK_SHAPE(kpts1_, [\"1\", \"N\", \"2\"])\n KORNIA_CHECK_SHAPE(desc0, [\"1\", \"M\", \"D\"])\n KORNIA_CHECK_SHAPE(desc1, [\"1\", \"N\", \"D\"])\n KORNIA_CHECK(kpts0_.shape[1] == desc0.shape[1], \"Number of keypoints does not match number of descriptors\")\n KORNIA_CHECK(kpts1_.shape[1] == desc1.shape[1], \"Number of keypoints does not match number of descriptors\")\n KORNIA_CHECK(desc0.shape[2] == desc1.shape[2], \"Descriptors' dimensions do not match\")\n\n # Normalize keypoints.\n size0, size1 = data0.get('image_size'), data1.get('image_size')\n size0 = size0 if size0 is not None else data0['image'].shape[-2:][::-1] # type: ignore\n size1 = size1 if size1 is not None else data1['image'].shape[-2:][::-1] # type: ignore\n\n kpts0 = normalize_keypoints(kpts0_, size=size0) # type: ignore\n kpts1 = normalize_keypoints(kpts1_, size=size1) # type: ignore\n\n KORNIA_CHECK(torch.all(kpts0 >= -1).item() and torch.all(kpts0 <= 1).item(), \"\") # type: ignore\n KORNIA_CHECK(torch.all(kpts1 >= -1).item() and torch.all(kpts1 <= 1).item(), \"\") # type: ignore\n\n # Inference.\n lightglue_inputs = {\"kpts0\": kpts0, \"kpts1\": kpts1, \"desc0\": desc0, \"desc1\": desc1}\n lightglue_outputs = [\"matches0\", \"mscores0\"]\n binding = self.session.io_binding()\n\n for name, tensor in lightglue_inputs.items():\n binding.bind_input(\n name,\n device_type=self.device.type,\n device_id=0,\n element_type=np.float32,\n shape=tuple(tensor.shape),\n buffer_ptr=tensor.data_ptr(),\n )\n\n for name in lightglue_outputs:\n binding.bind_output(name, device_type=self.device.type, device_id=0)\n\n self.session.run_with_iobinding(binding)\n\n matches, mscores = binding.get_outputs()\n\n # TODO: The following is an unnecessary copy. Replace with a better solution when torch supports\n # constructing a tensor from a data pointer, or when ORT supports converting to torch tensor.\n # https://github.com/microsoft/onnxruntime/issues/15963\n outputs = {\n \"matches\": torch.from_dlpack(matches.numpy()).to(self.device),\n \"scores\": torch.from_dlpack(mscores.numpy()).to(self.device),\n }\n return outputs\n", "path": "kornia/feature/lightglue_onnx/lightglue.py"}]}
| 3,337 | 797 |
gh_patches_debug_24695
|
rasdani/github-patches
|
git_diff
|
cornellius-gp__gpytorch-484
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Recursively initialize Module parameters
Say I have an `ExactGP` called `gp`. It would be great if I could just do `gp.initialize(kwargs)` and all the parameters will be initialized recursively. This would allow us to put all the initialization values in one place.
Note that it would have to raise an error if there were a parameter name collision.
</issue>
<code>
[start of gpytorch/module.py]
1 #!/usr/bin/env python3
2
3 from collections import OrderedDict
4
5 import torch
6 from torch import nn
7 from torch.distributions import Distribution
8
9 from .lazy import LazyTensor
10 from .utils.deprecation import DeprecationError
11
12
13 class Module(nn.Module):
14 def __init__(self):
15 super().__init__()
16 self._added_loss_terms = OrderedDict()
17 self._priors = OrderedDict()
18
19 def __call__(self, *inputs, **kwargs):
20 outputs = self.forward(*inputs, **kwargs)
21 if isinstance(outputs, list):
22 return [_validate_module_outputs(output) for output in outputs]
23 return _validate_module_outputs(outputs)
24
25 def _get_module_and_name(self, parameter_name):
26 """Get module and name from full parameter name."""
27 module, name = parameter_name.split(".", 1)
28 if module in self._modules:
29 return self.__getattr__(module), name
30 else:
31 raise AttributeError(
32 "Invalid parameter name {}. {} has no module {}".format(parameter_name, type(self).__name__, module)
33 )
34
35 def added_loss_terms(self):
36 for _, strategy in self.named_added_loss_terms():
37 yield strategy
38
39 def forward(self, *inputs, **kwargs):
40 raise NotImplementedError
41
42 def hyperparameters(self):
43 for _, param in self.named_hyperparameters():
44 yield param
45
46 def initialize(self, **kwargs):
47 """
48 Set a value for a parameter
49
50 kwargs: (param_name, value) - parameter to initialize
51 Value can take the form of a tensor, a float, or an int
52 """
53
54 for name, val in kwargs.items():
55 if isinstance(val, int):
56 val = float(val)
57 if not hasattr(self, name):
58 raise AttributeError("Unknown parameter {p} for {c}".format(p=name, c=self.__class__.__name__))
59 elif name not in self._parameters:
60 setattr(self, name, val)
61 elif torch.is_tensor(val):
62 try:
63 self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))
64 except RuntimeError:
65 self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))
66
67 elif isinstance(val, float):
68 self.__getattr__(name).data.fill_(val)
69 else:
70 raise AttributeError("Type {t} not valid for initializing parameter {p}".format(t=type(val), p=name))
71
72 # Ensure value is contained in support of prior (if present)
73 prior_name = "_".join([name, "prior"])
74 if prior_name in self._priors:
75 prior, closure, _ = self._priors[prior_name]
76 try:
77 prior._validate_sample(closure())
78 except ValueError as e:
79 raise ValueError("Invalid input value for prior {}. Error:\n{}".format(prior_name, e))
80
81 return self
82
83 def named_added_loss_terms(self):
84 """Returns an iterator over module variational strategies, yielding both
85 the name of the variational strategy as well as the strategy itself.
86
87 Yields:
88 (string, VariationalStrategy): Tuple containing the name of the
89 strategy and the strategy
90
91 """
92 return _extract_named_added_loss_terms(module=self, memo=None, prefix="")
93
94 def named_hyperparameters(self):
95 for name, param in self.named_parameters():
96 if "variational_" not in name:
97 yield name, param
98
99 def named_priors(self, memo=None, prefix=""):
100 """Returns an iterator over the module's priors, yielding the name of the prior,
101 the prior, the associated parameter names, and the transformation callable.
102
103 Yields:
104 (string, Prior, tuple((Parameter, callable)), callable): Tuple containing:
105 - the name of the prior
106 - the prior
107 - a tuple of tuples (param, transform), one for each of the parameters associated with the prior
108 - the prior's transform to be called on the parameters
109 """
110 return _extract_named_priors(module=self, memo=None, prefix="")
111
112 def named_variational_parameters(self):
113 for name, param in self.named_parameters():
114 if "variational_" in name:
115 yield name, param
116
117 def register_added_loss_term(self, name):
118 self._added_loss_terms[name] = None
119
120 def register_parameter(self, name, parameter, prior=None):
121 r"""
122 Adds a parameter to the module. The parameter can be accessed as an attribute using the given name.
123
124 Args:
125 :attr:`name` (str):
126 The name of the parameter
127 :attr:`parameter` (torch.nn.Parameter):
128 The parameter
129 """
130 if prior is not None:
131 raise DeprecationError(
132 "Setting a prior upon registering a parameter is deprecated. Please use "
133 ".register_prior('{name}_prior', prior, '{name}') instead.".format(name=name)
134 )
135 if "_parameters" not in self.__dict__:
136 raise AttributeError("Cannot assign parameter before Module.__init__() call")
137 super().register_parameter(name, parameter)
138
139 def register_prior(self, name, prior, param_or_closure, setting_closure=None):
140 """
141 Adds a prior to the module. The prior can be accessed as an attribute using the given name.
142
143 Args:
144 :attr:`name` (str):
145 The name of the prior
146 :attr:`prior` (Prior):
147 The prior to be registered`
148 :attr:`param_or_closure` (string or callable):
149 Either the name of the parameter, or a closure (which upon calling evalutes a function on
150 one or more parameters):
151 single parameter without a transform: `.register_prior("foo_prior", foo_prior, "foo_param")`
152 transform a single parameter (e.g. put a log-Normal prior on it):
153 `.register_prior("foo_prior", NormalPrior(0, 1), lambda: torch.log(self.foo_param))`
154 function of multiple parameters:
155 `.register_prior("foo2_prior", foo2_prior, lambda: f(self.param1, self.param2)))`
156 :attr:`setting_closure` (callable, optional):
157 A function taking in a tensor in (transformed) parameter space and initializing the
158 internal parameter representation to the proper value by applying the inverse transform.
159 Enables setting parametres directly in the transformed space, as well as sampling
160 parameter values from priors (see `sample_from_prior`)
161
162 """
163 if isinstance(param_or_closure, str):
164 if param_or_closure not in self._parameters:
165 raise AttributeError(
166 "Unknown parameter {name} for {module}".format(
167 name=param_or_closure, module=self.__class__.__name__
168 )
169 + "Make sure the parameter is registered before registering a prior."
170 )
171
172 def closure():
173 return self._parameters[param_or_closure]
174
175 if setting_closure is not None:
176 raise RuntimeError("Must specify a closure instead of a parameter name when providing setting_closure")
177
178 def setting_closure(val):
179 return self.initialize(**{param_or_closure: val})
180
181 else:
182 closure = param_or_closure
183 self.add_module(name, prior)
184 self._priors[name] = (prior, closure, setting_closure)
185
186 def sample_from_prior(self, prior_name):
187 """Sample parameter values from prior. Modifies the module's parameters in-place."""
188 if prior_name not in self._priors:
189 raise RuntimeError("Unknown prior name '{}'".format(prior_name))
190 prior, _, setting_closure = self._priors[prior_name]
191 if setting_closure is None:
192 raise RuntimeError("Must provide inverse transform to be able to sample from prior.")
193 setting_closure(prior.sample())
194
195 def update_added_loss_term(self, name, added_loss_term):
196 from .mlls import AddedLossTerm
197
198 if not isinstance(added_loss_term, AddedLossTerm):
199 raise RuntimeError("added_loss_term must be a AddedLossTerm")
200 if name not in self._added_loss_terms.keys():
201 raise RuntimeError("added_loss_term {} not registered".format(name))
202 self._added_loss_terms[name] = added_loss_term
203
204 def variational_parameters(self):
205 for _, param in self.named_variational_parameters():
206 yield param
207
208 def __getattr__(self, name):
209 try:
210 return super().__getattr__(name)
211 except AttributeError as e:
212 try:
213 return super().__getattribute__(name)
214 except AttributeError:
215 raise e
216
217
218 def _validate_module_outputs(outputs):
219 if isinstance(outputs, tuple):
220 if not all(
221 torch.is_tensor(output) or isinstance(output, Distribution) or isinstance(output, LazyTensor)
222 for output in outputs
223 ):
224 raise RuntimeError(
225 "All outputs must be a Distribution, torch.Tensor, or LazyTensor. "
226 "Got {}".format([output.__class__.__name__ for output in outputs])
227 )
228 if len(outputs) == 1:
229 outputs = outputs[0]
230 return outputs
231 elif torch.is_tensor(outputs) or isinstance(outputs, Distribution) or isinstance(outputs, LazyTensor):
232 return outputs
233 else:
234 raise RuntimeError(
235 "Output must be a Distribution, torch.Tensor, or LazyTensor. Got {}".format(outputs.__class__.__name__)
236 )
237
238
239 def _extract_named_added_loss_terms(module, memo=None, prefix=""):
240 if memo is None:
241 memo = set()
242 if hasattr(module, "_added_loss_terms"):
243 for name, strategy in module._added_loss_terms.items():
244 if strategy is not None and strategy not in memo:
245 memo.add(strategy)
246 yield prefix + ("." if prefix else "") + name, strategy
247 for mname, module_ in module.named_children():
248 submodule_prefix = prefix + ("." if prefix else "") + mname
249 for name, strategy in _extract_named_added_loss_terms(module=module_, memo=memo, prefix=submodule_prefix):
250 yield name, strategy
251
252
253 def _extract_named_priors(module, memo=None, prefix=""):
254 if memo is None:
255 memo = set()
256 if hasattr(module, "_priors"):
257 for name, (prior, closure, inv_closure) in module._priors.items():
258 if prior is not None and prior not in memo:
259 memo.add(prior)
260 full_name = ("." if prefix else "").join([prefix, name])
261 yield full_name, prior, closure, inv_closure
262 for mname, module_ in module.named_children():
263 submodule_prefix = prefix + ("." if prefix else "") + mname
264 for name, prior, closure, inv_closure in _extract_named_priors(module_, memo=memo, prefix=submodule_prefix):
265 yield name, prior, closure, inv_closure
266
[end of gpytorch/module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gpytorch/module.py b/gpytorch/module.py
--- a/gpytorch/module.py
+++ b/gpytorch/module.py
@@ -47,14 +47,26 @@
"""
Set a value for a parameter
- kwargs: (param_name, value) - parameter to initialize
+ kwargs: (param_name, value) - parameter to initialize.
+ Can also initialize recursively by passing in the full name of a
+ parameter. For example if model has attribute model.likelihood,
+ we can initialize the noise with either
+ `model.initialize(**{'likelihood.noise': 0.1})`
+ or
+ `model.likelihood.initialize(noise=0.1)`.
+ The former method would allow users to more easily store the
+ initialization values as one object.
+
Value can take the form of a tensor, a float, or an int
"""
for name, val in kwargs.items():
if isinstance(val, int):
val = float(val)
- if not hasattr(self, name):
+ if '.' in name:
+ module, name = self._get_module_and_name(name)
+ module.initialize(**{name: val})
+ elif not hasattr(self, name):
raise AttributeError("Unknown parameter {p} for {c}".format(p=name, c=self.__class__.__name__))
elif name not in self._parameters:
setattr(self, name, val)
|
{"golden_diff": "diff --git a/gpytorch/module.py b/gpytorch/module.py\n--- a/gpytorch/module.py\n+++ b/gpytorch/module.py\n@@ -47,14 +47,26 @@\n \"\"\"\n Set a value for a parameter\n \n- kwargs: (param_name, value) - parameter to initialize\n+ kwargs: (param_name, value) - parameter to initialize.\n+ Can also initialize recursively by passing in the full name of a\n+ parameter. For example if model has attribute model.likelihood,\n+ we can initialize the noise with either\n+ `model.initialize(**{'likelihood.noise': 0.1})`\n+ or\n+ `model.likelihood.initialize(noise=0.1)`.\n+ The former method would allow users to more easily store the\n+ initialization values as one object.\n+\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n \n for name, val in kwargs.items():\n if isinstance(val, int):\n val = float(val)\n- if not hasattr(self, name):\n+ if '.' in name:\n+ module, name = self._get_module_and_name(name)\n+ module.initialize(**{name: val})\n+ elif not hasattr(self, name):\n raise AttributeError(\"Unknown parameter {p} for {c}\".format(p=name, c=self.__class__.__name__))\n elif name not in self._parameters:\n setattr(self, name, val)\n", "issue": "Recursively initialize Module parameters\nSay I have an `ExactGP` called `gp`. It would be great if I could just do `gp.initialize(kwargs)` and all the parameters will be initialized recursively. This would allow us to put all the initialization values in one place. \r\n\r\nNote that it would have to raise an error if there were a parameter name collision.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom collections import OrderedDict\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import Distribution\n\nfrom .lazy import LazyTensor\nfrom .utils.deprecation import DeprecationError\n\n\nclass Module(nn.Module):\n def __init__(self):\n super().__init__()\n self._added_loss_terms = OrderedDict()\n self._priors = OrderedDict()\n\n def __call__(self, *inputs, **kwargs):\n outputs = self.forward(*inputs, **kwargs)\n if isinstance(outputs, list):\n return [_validate_module_outputs(output) for output in outputs]\n return _validate_module_outputs(outputs)\n\n def _get_module_and_name(self, parameter_name):\n \"\"\"Get module and name from full parameter name.\"\"\"\n module, name = parameter_name.split(\".\", 1)\n if module in self._modules:\n return self.__getattr__(module), name\n else:\n raise AttributeError(\n \"Invalid parameter name {}. {} has no module {}\".format(parameter_name, type(self).__name__, module)\n )\n\n def added_loss_terms(self):\n for _, strategy in self.named_added_loss_terms():\n yield strategy\n\n def forward(self, *inputs, **kwargs):\n raise NotImplementedError\n\n def hyperparameters(self):\n for _, param in self.named_hyperparameters():\n yield param\n\n def initialize(self, **kwargs):\n \"\"\"\n Set a value for a parameter\n\n kwargs: (param_name, value) - parameter to initialize\n Value can take the form of a tensor, a float, or an int\n \"\"\"\n\n for name, val in kwargs.items():\n if isinstance(val, int):\n val = float(val)\n if not hasattr(self, name):\n raise AttributeError(\"Unknown parameter {p} for {c}\".format(p=name, c=self.__class__.__name__))\n elif name not in self._parameters:\n setattr(self, name, val)\n elif torch.is_tensor(val):\n try:\n self.__getattr__(name).data.copy_(val.expand_as(self.__getattr__(name)))\n except RuntimeError:\n self.__getattr__(name).data.copy_(val.view_as(self.__getattr__(name)))\n\n elif isinstance(val, float):\n self.__getattr__(name).data.fill_(val)\n else:\n raise AttributeError(\"Type {t} not valid for initializing parameter {p}\".format(t=type(val), p=name))\n\n # Ensure value is contained in support of prior (if present)\n prior_name = \"_\".join([name, \"prior\"])\n if prior_name in self._priors:\n prior, closure, _ = self._priors[prior_name]\n try:\n prior._validate_sample(closure())\n except ValueError as e:\n raise ValueError(\"Invalid input value for prior {}. Error:\\n{}\".format(prior_name, e))\n\n return self\n\n def named_added_loss_terms(self):\n \"\"\"Returns an iterator over module variational strategies, yielding both\n the name of the variational strategy as well as the strategy itself.\n\n Yields:\n (string, VariationalStrategy): Tuple containing the name of the\n strategy and the strategy\n\n \"\"\"\n return _extract_named_added_loss_terms(module=self, memo=None, prefix=\"\")\n\n def named_hyperparameters(self):\n for name, param in self.named_parameters():\n if \"variational_\" not in name:\n yield name, param\n\n def named_priors(self, memo=None, prefix=\"\"):\n \"\"\"Returns an iterator over the module's priors, yielding the name of the prior,\n the prior, the associated parameter names, and the transformation callable.\n\n Yields:\n (string, Prior, tuple((Parameter, callable)), callable): Tuple containing:\n - the name of the prior\n - the prior\n - a tuple of tuples (param, transform), one for each of the parameters associated with the prior\n - the prior's transform to be called on the parameters\n \"\"\"\n return _extract_named_priors(module=self, memo=None, prefix=\"\")\n\n def named_variational_parameters(self):\n for name, param in self.named_parameters():\n if \"variational_\" in name:\n yield name, param\n\n def register_added_loss_term(self, name):\n self._added_loss_terms[name] = None\n\n def register_parameter(self, name, parameter, prior=None):\n r\"\"\"\n Adds a parameter to the module. The parameter can be accessed as an attribute using the given name.\n\n Args:\n :attr:`name` (str):\n The name of the parameter\n :attr:`parameter` (torch.nn.Parameter):\n The parameter\n \"\"\"\n if prior is not None:\n raise DeprecationError(\n \"Setting a prior upon registering a parameter is deprecated. Please use \"\n \".register_prior('{name}_prior', prior, '{name}') instead.\".format(name=name)\n )\n if \"_parameters\" not in self.__dict__:\n raise AttributeError(\"Cannot assign parameter before Module.__init__() call\")\n super().register_parameter(name, parameter)\n\n def register_prior(self, name, prior, param_or_closure, setting_closure=None):\n \"\"\"\n Adds a prior to the module. The prior can be accessed as an attribute using the given name.\n\n Args:\n :attr:`name` (str):\n The name of the prior\n :attr:`prior` (Prior):\n The prior to be registered`\n :attr:`param_or_closure` (string or callable):\n Either the name of the parameter, or a closure (which upon calling evalutes a function on\n one or more parameters):\n single parameter without a transform: `.register_prior(\"foo_prior\", foo_prior, \"foo_param\")`\n transform a single parameter (e.g. put a log-Normal prior on it):\n `.register_prior(\"foo_prior\", NormalPrior(0, 1), lambda: torch.log(self.foo_param))`\n function of multiple parameters:\n `.register_prior(\"foo2_prior\", foo2_prior, lambda: f(self.param1, self.param2)))`\n :attr:`setting_closure` (callable, optional):\n A function taking in a tensor in (transformed) parameter space and initializing the\n internal parameter representation to the proper value by applying the inverse transform.\n Enables setting parametres directly in the transformed space, as well as sampling\n parameter values from priors (see `sample_from_prior`)\n\n \"\"\"\n if isinstance(param_or_closure, str):\n if param_or_closure not in self._parameters:\n raise AttributeError(\n \"Unknown parameter {name} for {module}\".format(\n name=param_or_closure, module=self.__class__.__name__\n )\n + \"Make sure the parameter is registered before registering a prior.\"\n )\n\n def closure():\n return self._parameters[param_or_closure]\n\n if setting_closure is not None:\n raise RuntimeError(\"Must specify a closure instead of a parameter name when providing setting_closure\")\n\n def setting_closure(val):\n return self.initialize(**{param_or_closure: val})\n\n else:\n closure = param_or_closure\n self.add_module(name, prior)\n self._priors[name] = (prior, closure, setting_closure)\n\n def sample_from_prior(self, prior_name):\n \"\"\"Sample parameter values from prior. Modifies the module's parameters in-place.\"\"\"\n if prior_name not in self._priors:\n raise RuntimeError(\"Unknown prior name '{}'\".format(prior_name))\n prior, _, setting_closure = self._priors[prior_name]\n if setting_closure is None:\n raise RuntimeError(\"Must provide inverse transform to be able to sample from prior.\")\n setting_closure(prior.sample())\n\n def update_added_loss_term(self, name, added_loss_term):\n from .mlls import AddedLossTerm\n\n if not isinstance(added_loss_term, AddedLossTerm):\n raise RuntimeError(\"added_loss_term must be a AddedLossTerm\")\n if name not in self._added_loss_terms.keys():\n raise RuntimeError(\"added_loss_term {} not registered\".format(name))\n self._added_loss_terms[name] = added_loss_term\n\n def variational_parameters(self):\n for _, param in self.named_variational_parameters():\n yield param\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError as e:\n try:\n return super().__getattribute__(name)\n except AttributeError:\n raise e\n\n\ndef _validate_module_outputs(outputs):\n if isinstance(outputs, tuple):\n if not all(\n torch.is_tensor(output) or isinstance(output, Distribution) or isinstance(output, LazyTensor)\n for output in outputs\n ):\n raise RuntimeError(\n \"All outputs must be a Distribution, torch.Tensor, or LazyTensor. \"\n \"Got {}\".format([output.__class__.__name__ for output in outputs])\n )\n if len(outputs) == 1:\n outputs = outputs[0]\n return outputs\n elif torch.is_tensor(outputs) or isinstance(outputs, Distribution) or isinstance(outputs, LazyTensor):\n return outputs\n else:\n raise RuntimeError(\n \"Output must be a Distribution, torch.Tensor, or LazyTensor. Got {}\".format(outputs.__class__.__name__)\n )\n\n\ndef _extract_named_added_loss_terms(module, memo=None, prefix=\"\"):\n if memo is None:\n memo = set()\n if hasattr(module, \"_added_loss_terms\"):\n for name, strategy in module._added_loss_terms.items():\n if strategy is not None and strategy not in memo:\n memo.add(strategy)\n yield prefix + (\".\" if prefix else \"\") + name, strategy\n for mname, module_ in module.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n for name, strategy in _extract_named_added_loss_terms(module=module_, memo=memo, prefix=submodule_prefix):\n yield name, strategy\n\n\ndef _extract_named_priors(module, memo=None, prefix=\"\"):\n if memo is None:\n memo = set()\n if hasattr(module, \"_priors\"):\n for name, (prior, closure, inv_closure) in module._priors.items():\n if prior is not None and prior not in memo:\n memo.add(prior)\n full_name = (\".\" if prefix else \"\").join([prefix, name])\n yield full_name, prior, closure, inv_closure\n for mname, module_ in module.named_children():\n submodule_prefix = prefix + (\".\" if prefix else \"\") + mname\n for name, prior, closure, inv_closure in _extract_named_priors(module_, memo=memo, prefix=submodule_prefix):\n yield name, prior, closure, inv_closure\n", "path": "gpytorch/module.py"}]}
| 3,562 | 316 |
gh_patches_debug_27471
|
rasdani/github-patches
|
git_diff
|
wger-project__wger-235
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicate weight entries in CSV import
It seems it's possible to trigger a uniqueness constraint error using the import CSV function for the weight entries. I could have sworn this was already fixed, but it looks it isn't.
During import the view should make sure that duplicate entries are not saved.
</issue>
<code>
[start of wger/weight/helpers.py]
1 # -*- coding: utf-8 -*-
2
3 # This file is part of wger Workout Manager.
4 #
5 # wger Workout Manager is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # wger Workout Manager is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU Affero General Public License
16
17 import logging
18 import six
19 import datetime
20 import decimal
21 import csv
22 import json
23 from collections import OrderedDict
24
25 from django.core.cache import cache
26
27 from wger.utils.helpers import DecimalJsonEncoder
28 from wger.utils.cache import cache_mapper
29 from wger.weight.models import WeightEntry
30 from wger.manager.models import WorkoutSession
31 from wger.manager.models import WorkoutLog
32
33 logger = logging.getLogger(__name__)
34
35
36 def parse_weight_csv(request, cleaned_data):
37
38 try:
39 dialect = csv.Sniffer().sniff(cleaned_data['csv_input'])
40 except csv.Error:
41 dialect = 'excel'
42
43 # csv.reader expects a file-like object, so use StringIO
44 parsed_csv = csv.reader(six.StringIO(cleaned_data['csv_input']),
45 dialect)
46 distinct_weight_entries = []
47 weight_list = []
48 error_list = []
49
50 # Process the CSV items first
51 for row in parsed_csv:
52 try:
53 parsed_date = datetime.datetime.strptime(row[0], cleaned_data['date_format'])
54 parsed_weight = decimal.Decimal(row[1].replace(',', '.'))
55 duplicate_date_in_db = WeightEntry.objects.filter(date=parsed_date,
56 user=request.user).exists()
57 # within the list there are no duplicates
58 unique_among_csv = (parsed_date, parsed_weight) not in distinct_weight_entries
59 # there is no existing weight entry in the database for that date
60 unique_in_db = not duplicate_date_in_db
61
62 if unique_among_csv and unique_in_db:
63 distinct_weight_entries.append((parsed_date, parsed_weight))
64 else:
65 error_list.append(row)
66
67 except (ValueError, IndexError, decimal.InvalidOperation):
68 error_list.append(row)
69
70 # Create the valid weight entries
71 for date, weight in distinct_weight_entries:
72 weight_list.append(WeightEntry(date=date,
73 weight=weight,
74 user=request.user))
75
76 return (weight_list, error_list)
77
78
79 def group_log_entries(user, year, month, day=None):
80 '''
81 Processes and regroups a list of log entries so they can be more easily
82 used in the different calendar pages
83
84 :param user: the user to filter the logs for
85 :param year: year
86 :param month: month
87 :param day: optional, day
88
89 :return: a dictionary with grouped logs by date and exercise
90 '''
91 if day:
92 log_hash = hash((user.pk, year, month, day))
93 else:
94 log_hash = hash((user.pk, year, month))
95
96 # There can be workout sessions without any associated log entries, so it is
97 # not enough so simply iterate through the logs
98 if day:
99 filter_date = datetime.date(year, month, day)
100 logs = WorkoutLog.objects.filter(user=user, date=filter_date)
101 sessions = WorkoutSession.objects.filter(user=user, date=filter_date)
102
103 else:
104 logs = WorkoutLog.objects.filter(user=user,
105 date__year=year,
106 date__month=month)
107
108 sessions = WorkoutSession.objects.filter(user=user,
109 date__year=year,
110 date__month=month)
111
112 logs = logs.order_by('date', 'id')
113 out = cache.get(cache_mapper.get_workout_log_list(log_hash))
114 # out = OrderedDict()
115
116 if not out:
117 out = OrderedDict()
118
119 # Logs
120 for entry in logs:
121 if not out.get(entry.date):
122 out[entry.date] = {'date': entry.date,
123 'workout': entry.workout,
124 'session': entry.get_workout_session(),
125 'logs': OrderedDict()}
126
127 if not out[entry.date]['logs'].get(entry.exercise):
128 out[entry.date]['logs'][entry.exercise] = []
129
130 out[entry.date]['logs'][entry.exercise].append(entry)
131
132 # Sessions
133 for entry in sessions:
134 if not out.get(entry.date):
135 out[entry.date] = {'date': entry.date,
136 'workout': entry.workout,
137 'session': entry,
138 'logs': {}}
139
140 cache.set(cache_mapper.get_workout_log_list(log_hash), out)
141 return out
142
143
144 def process_log_entries(logs):
145 '''
146 Processes and regroups a list of log entries so they can be rendered
147 and passed to the D3 library to render a chart
148 '''
149
150 reps = []
151 entry_log = OrderedDict()
152 chart_data = []
153 max_weight = {}
154
155 # Group by date
156 for entry in logs:
157 if entry.reps not in reps:
158 reps.append(entry.reps)
159
160 if not entry_log.get(entry.date):
161 entry_log[entry.date] = []
162 entry_log[entry.date].append(entry)
163
164 # Find the maximum weight per date per repetition.
165 # If on a day there are several entries with the same number of
166 # repetitions, but different weights, only the entry with the
167 # higher weight is shown in the chart
168 if not max_weight.get(entry.date):
169 max_weight[entry.date] = {entry.reps: entry.weight}
170
171 if not max_weight[entry.date].get(entry.reps):
172 max_weight[entry.date][entry.reps] = entry.weight
173
174 if entry.weight > max_weight[entry.date][entry.reps]:
175 max_weight[entry.date][entry.reps] = entry.weight
176
177 # Group by repetitions
178 reps_list = {}
179 for entry in logs:
180 temp = {'date': '%s' % entry.date,
181 'id': 'manager:workout:log-%s' % entry.id}
182
183 # Only unique date, rep and weight combinations
184 if reps_list.get((entry.date, entry.reps, entry.weight)):
185 continue
186 else:
187 reps_list[(entry.date, entry.reps, entry.weight)] = True
188
189 # Only add if weight is the maximum for the day
190 if entry.weight != max_weight[entry.date][entry.reps]:
191 continue
192
193 for rep in reps:
194 if entry.reps == rep:
195 temp[rep] = entry.weight
196 else:
197 # Mark entries without data, this is later filtered out by D3.
198 # We use the string 'n.a' instead of 0 to differentiate actual exercises
199 # where no weight was used.
200 temp[rep] = 'n.a'
201 chart_data.append(temp)
202
203 return entry_log, json.dumps(chart_data, cls=DecimalJsonEncoder)
204
[end of wger/weight/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wger/weight/helpers.py b/wger/weight/helpers.py
--- a/wger/weight/helpers.py
+++ b/wger/weight/helpers.py
@@ -44,6 +44,7 @@
parsed_csv = csv.reader(six.StringIO(cleaned_data['csv_input']),
dialect)
distinct_weight_entries = []
+ entry_dates = set()
weight_list = []
error_list = []
@@ -54,13 +55,15 @@
parsed_weight = decimal.Decimal(row[1].replace(',', '.'))
duplicate_date_in_db = WeightEntry.objects.filter(date=parsed_date,
user=request.user).exists()
- # within the list there are no duplicates
- unique_among_csv = (parsed_date, parsed_weight) not in distinct_weight_entries
+ # within the list there are no duplicate dates
+ unique_among_csv = parsed_date not in entry_dates
+
# there is no existing weight entry in the database for that date
unique_in_db = not duplicate_date_in_db
if unique_among_csv and unique_in_db:
distinct_weight_entries.append((parsed_date, parsed_weight))
+ entry_dates.add(parsed_date)
else:
error_list.append(row)
|
{"golden_diff": "diff --git a/wger/weight/helpers.py b/wger/weight/helpers.py\n--- a/wger/weight/helpers.py\n+++ b/wger/weight/helpers.py\n@@ -44,6 +44,7 @@\n parsed_csv = csv.reader(six.StringIO(cleaned_data['csv_input']),\n dialect)\n distinct_weight_entries = []\n+ entry_dates = set()\n weight_list = []\n error_list = []\n \n@@ -54,13 +55,15 @@\n parsed_weight = decimal.Decimal(row[1].replace(',', '.'))\n duplicate_date_in_db = WeightEntry.objects.filter(date=parsed_date,\n user=request.user).exists()\n- # within the list there are no duplicates\n- unique_among_csv = (parsed_date, parsed_weight) not in distinct_weight_entries\n+ # within the list there are no duplicate dates\n+ unique_among_csv = parsed_date not in entry_dates\n+\n # there is no existing weight entry in the database for that date\n unique_in_db = not duplicate_date_in_db\n \n if unique_among_csv and unique_in_db:\n distinct_weight_entries.append((parsed_date, parsed_weight))\n+ entry_dates.add(parsed_date)\n else:\n error_list.append(row)\n", "issue": "Duplicate weight entries in CSV import\nIt seems it's possible to trigger a uniqueness constraint error using the import CSV function for the weight entries. I could have sworn this was already fixed, but it looks it isn't.\n\nDuring import the view should make sure that duplicate entries are not saved.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This file is part of wger Workout Manager.\n#\n# wger Workout Manager is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# wger Workout Manager is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n\nimport logging\nimport six\nimport datetime\nimport decimal\nimport csv\nimport json\nfrom collections import OrderedDict\n\nfrom django.core.cache import cache\n\nfrom wger.utils.helpers import DecimalJsonEncoder\nfrom wger.utils.cache import cache_mapper\nfrom wger.weight.models import WeightEntry\nfrom wger.manager.models import WorkoutSession\nfrom wger.manager.models import WorkoutLog\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_weight_csv(request, cleaned_data):\n\n try:\n dialect = csv.Sniffer().sniff(cleaned_data['csv_input'])\n except csv.Error:\n dialect = 'excel'\n\n # csv.reader expects a file-like object, so use StringIO\n parsed_csv = csv.reader(six.StringIO(cleaned_data['csv_input']),\n dialect)\n distinct_weight_entries = []\n weight_list = []\n error_list = []\n\n # Process the CSV items first\n for row in parsed_csv:\n try:\n parsed_date = datetime.datetime.strptime(row[0], cleaned_data['date_format'])\n parsed_weight = decimal.Decimal(row[1].replace(',', '.'))\n duplicate_date_in_db = WeightEntry.objects.filter(date=parsed_date,\n user=request.user).exists()\n # within the list there are no duplicates\n unique_among_csv = (parsed_date, parsed_weight) not in distinct_weight_entries\n # there is no existing weight entry in the database for that date\n unique_in_db = not duplicate_date_in_db\n\n if unique_among_csv and unique_in_db:\n distinct_weight_entries.append((parsed_date, parsed_weight))\n else:\n error_list.append(row)\n\n except (ValueError, IndexError, decimal.InvalidOperation):\n error_list.append(row)\n\n # Create the valid weight entries\n for date, weight in distinct_weight_entries:\n weight_list.append(WeightEntry(date=date,\n weight=weight,\n user=request.user))\n\n return (weight_list, error_list)\n\n\ndef group_log_entries(user, year, month, day=None):\n '''\n Processes and regroups a list of log entries so they can be more easily\n used in the different calendar pages\n\n :param user: the user to filter the logs for\n :param year: year\n :param month: month\n :param day: optional, day\n\n :return: a dictionary with grouped logs by date and exercise\n '''\n if day:\n log_hash = hash((user.pk, year, month, day))\n else:\n log_hash = hash((user.pk, year, month))\n\n # There can be workout sessions without any associated log entries, so it is\n # not enough so simply iterate through the logs\n if day:\n filter_date = datetime.date(year, month, day)\n logs = WorkoutLog.objects.filter(user=user, date=filter_date)\n sessions = WorkoutSession.objects.filter(user=user, date=filter_date)\n\n else:\n logs = WorkoutLog.objects.filter(user=user,\n date__year=year,\n date__month=month)\n\n sessions = WorkoutSession.objects.filter(user=user,\n date__year=year,\n date__month=month)\n\n logs = logs.order_by('date', 'id')\n out = cache.get(cache_mapper.get_workout_log_list(log_hash))\n # out = OrderedDict()\n\n if not out:\n out = OrderedDict()\n\n # Logs\n for entry in logs:\n if not out.get(entry.date):\n out[entry.date] = {'date': entry.date,\n 'workout': entry.workout,\n 'session': entry.get_workout_session(),\n 'logs': OrderedDict()}\n\n if not out[entry.date]['logs'].get(entry.exercise):\n out[entry.date]['logs'][entry.exercise] = []\n\n out[entry.date]['logs'][entry.exercise].append(entry)\n\n # Sessions\n for entry in sessions:\n if not out.get(entry.date):\n out[entry.date] = {'date': entry.date,\n 'workout': entry.workout,\n 'session': entry,\n 'logs': {}}\n\n cache.set(cache_mapper.get_workout_log_list(log_hash), out)\n return out\n\n\ndef process_log_entries(logs):\n '''\n Processes and regroups a list of log entries so they can be rendered\n and passed to the D3 library to render a chart\n '''\n\n reps = []\n entry_log = OrderedDict()\n chart_data = []\n max_weight = {}\n\n # Group by date\n for entry in logs:\n if entry.reps not in reps:\n reps.append(entry.reps)\n\n if not entry_log.get(entry.date):\n entry_log[entry.date] = []\n entry_log[entry.date].append(entry)\n\n # Find the maximum weight per date per repetition.\n # If on a day there are several entries with the same number of\n # repetitions, but different weights, only the entry with the\n # higher weight is shown in the chart\n if not max_weight.get(entry.date):\n max_weight[entry.date] = {entry.reps: entry.weight}\n\n if not max_weight[entry.date].get(entry.reps):\n max_weight[entry.date][entry.reps] = entry.weight\n\n if entry.weight > max_weight[entry.date][entry.reps]:\n max_weight[entry.date][entry.reps] = entry.weight\n\n # Group by repetitions\n reps_list = {}\n for entry in logs:\n temp = {'date': '%s' % entry.date,\n 'id': 'manager:workout:log-%s' % entry.id}\n\n # Only unique date, rep and weight combinations\n if reps_list.get((entry.date, entry.reps, entry.weight)):\n continue\n else:\n reps_list[(entry.date, entry.reps, entry.weight)] = True\n\n # Only add if weight is the maximum for the day\n if entry.weight != max_weight[entry.date][entry.reps]:\n continue\n\n for rep in reps:\n if entry.reps == rep:\n temp[rep] = entry.weight\n else:\n # Mark entries without data, this is later filtered out by D3.\n # We use the string 'n.a' instead of 0 to differentiate actual exercises\n # where no weight was used.\n temp[rep] = 'n.a'\n chart_data.append(temp)\n\n return entry_log, json.dumps(chart_data, cls=DecimalJsonEncoder)\n", "path": "wger/weight/helpers.py"}]}
| 2,629 | 266 |
gh_patches_debug_14810
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-1720
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup_requires="string" not handled by PEP 517 backend
Does this need to be fixed in setuptools rather since the PEP says the return value needs to be a list of strings? https://www.python.org/dev/peps/pep-0517/#get-requires-for-build-wheel
It looks like here is the setuptools code: https://github.com/pypa/setuptools/blob/cdb5eeae678d8ccc90bf7d4348013a294f11be75/setuptools/build_meta.py#L138
_Originally posted by @cjerdonek in https://github.com/pypa/pip/issues/6255#issuecomment-462468517_
</issue>
<code>
[start of setuptools/build_meta.py]
1 """A PEP 517 interface to setuptools
2
3 Previously, when a user or a command line tool (let's call it a "frontend")
4 needed to make a request of setuptools to take a certain action, for
5 example, generating a list of installation requirements, the frontend would
6 would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
7
8 PEP 517 defines a different method of interfacing with setuptools. Rather
9 than calling "setup.py" directly, the frontend should:
10
11 1. Set the current directory to the directory with a setup.py file
12 2. Import this module into a safe python interpreter (one in which
13 setuptools can potentially set global variables or crash hard).
14 3. Call one of the functions defined in PEP 517.
15
16 What each function does is defined in PEP 517. However, here is a "casual"
17 definition of the functions (this definition should not be relied on for
18 bug reports or API stability):
19
20 - `build_wheel`: build a wheel in the folder and return the basename
21 - `get_requires_for_build_wheel`: get the `setup_requires` to build
22 - `prepare_metadata_for_build_wheel`: get the `install_requires`
23 - `build_sdist`: build an sdist in the folder and return the basename
24 - `get_requires_for_build_sdist`: get the `setup_requires` to build
25
26 Again, this is not a formal definition! Just a "taste" of the module.
27 """
28
29 import io
30 import os
31 import sys
32 import tokenize
33 import shutil
34 import contextlib
35
36 import setuptools
37 import distutils
38
39 __all__ = ['get_requires_for_build_sdist',
40 'get_requires_for_build_wheel',
41 'prepare_metadata_for_build_wheel',
42 'build_wheel',
43 'build_sdist',
44 '__legacy__',
45 'SetupRequirementsError']
46
47 class SetupRequirementsError(BaseException):
48 def __init__(self, specifiers):
49 self.specifiers = specifiers
50
51
52 class Distribution(setuptools.dist.Distribution):
53 def fetch_build_eggs(self, specifiers):
54 raise SetupRequirementsError(specifiers)
55
56 @classmethod
57 @contextlib.contextmanager
58 def patch(cls):
59 """
60 Replace
61 distutils.dist.Distribution with this class
62 for the duration of this context.
63 """
64 orig = distutils.core.Distribution
65 distutils.core.Distribution = cls
66 try:
67 yield
68 finally:
69 distutils.core.Distribution = orig
70
71
72 def _to_str(s):
73 """
74 Convert a filename to a string (on Python 2, explicitly
75 a byte string, not Unicode) as distutils checks for the
76 exact type str.
77 """
78 if sys.version_info[0] == 2 and not isinstance(s, str):
79 # Assume it's Unicode, as that's what the PEP says
80 # should be provided.
81 return s.encode(sys.getfilesystemencoding())
82 return s
83
84
85 def _get_immediate_subdirectories(a_dir):
86 return [name for name in os.listdir(a_dir)
87 if os.path.isdir(os.path.join(a_dir, name))]
88
89
90 def _file_with_extension(directory, extension):
91 matching = (
92 f for f in os.listdir(directory)
93 if f.endswith(extension)
94 )
95 file, = matching
96 return file
97
98
99 def _open_setup_script(setup_script):
100 if not os.path.exists(setup_script):
101 # Supply a default setup.py
102 return io.StringIO(u"from setuptools import setup; setup()")
103
104 return getattr(tokenize, 'open', open)(setup_script)
105
106
107 class _BuildMetaBackend(object):
108
109 def _fix_config(self, config_settings):
110 config_settings = config_settings or {}
111 config_settings.setdefault('--global-option', [])
112 return config_settings
113
114 def _get_build_requires(self, config_settings, requirements):
115 config_settings = self._fix_config(config_settings)
116
117 sys.argv = sys.argv[:1] + ['egg_info'] + \
118 config_settings["--global-option"]
119 try:
120 with Distribution.patch():
121 self.run_setup()
122 except SetupRequirementsError as e:
123 requirements += e.specifiers
124
125 return requirements
126
127 def run_setup(self, setup_script='setup.py'):
128 # Note that we can reuse our build directory between calls
129 # Correctness comes first, then optimization later
130 __file__ = setup_script
131 __name__ = '__main__'
132
133 with _open_setup_script(__file__) as f:
134 code = f.read().replace(r'\r\n', r'\n')
135
136 exec(compile(code, __file__, 'exec'), locals())
137
138 def get_requires_for_build_wheel(self, config_settings=None):
139 config_settings = self._fix_config(config_settings)
140 return self._get_build_requires(config_settings, requirements=['wheel'])
141
142 def get_requires_for_build_sdist(self, config_settings=None):
143 config_settings = self._fix_config(config_settings)
144 return self._get_build_requires(config_settings, requirements=[])
145
146 def prepare_metadata_for_build_wheel(self, metadata_directory,
147 config_settings=None):
148 sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
149 _to_str(metadata_directory)]
150 self.run_setup()
151
152 dist_info_directory = metadata_directory
153 while True:
154 dist_infos = [f for f in os.listdir(dist_info_directory)
155 if f.endswith('.dist-info')]
156
157 if (len(dist_infos) == 0 and
158 len(_get_immediate_subdirectories(dist_info_directory)) == 1):
159
160 dist_info_directory = os.path.join(
161 dist_info_directory, os.listdir(dist_info_directory)[0])
162 continue
163
164 assert len(dist_infos) == 1
165 break
166
167 # PEP 517 requires that the .dist-info directory be placed in the
168 # metadata_directory. To comply, we MUST copy the directory to the root
169 if dist_info_directory != metadata_directory:
170 shutil.move(
171 os.path.join(dist_info_directory, dist_infos[0]),
172 metadata_directory)
173 shutil.rmtree(dist_info_directory, ignore_errors=True)
174
175 return dist_infos[0]
176
177 def build_wheel(self, wheel_directory, config_settings=None,
178 metadata_directory=None):
179 config_settings = self._fix_config(config_settings)
180 wheel_directory = os.path.abspath(wheel_directory)
181 sys.argv = sys.argv[:1] + ['bdist_wheel'] + \
182 config_settings["--global-option"]
183 self.run_setup()
184 if wheel_directory != 'dist':
185 shutil.rmtree(wheel_directory)
186 shutil.copytree('dist', wheel_directory)
187
188 return _file_with_extension(wheel_directory, '.whl')
189
190 def build_sdist(self, sdist_directory, config_settings=None):
191 config_settings = self._fix_config(config_settings)
192 sdist_directory = os.path.abspath(sdist_directory)
193 sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \
194 config_settings["--global-option"] + \
195 ["--dist-dir", sdist_directory]
196 self.run_setup()
197
198 return _file_with_extension(sdist_directory, '.tar.gz')
199
200
201 class _BuildMetaLegacyBackend(_BuildMetaBackend):
202 """Compatibility backend for setuptools
203
204 This is a version of setuptools.build_meta that endeavors to maintain backwards
205 compatibility with pre-PEP 517 modes of invocation. It exists as a temporary
206 bridge between the old packaging mechanism and the new packaging mechanism,
207 and will eventually be removed.
208 """
209 def run_setup(self, setup_script='setup.py'):
210 # In order to maintain compatibility with scripts assuming that
211 # the setup.py script is in a directory on the PYTHONPATH, inject
212 # '' into sys.path. (pypa/setuptools#1642)
213 sys_path = list(sys.path) # Save the original path
214
215 script_dir = os.path.dirname(os.path.abspath(setup_script))
216 if script_dir not in sys.path:
217 sys.path.insert(0, script_dir)
218
219 try:
220 super(_BuildMetaLegacyBackend,
221 self).run_setup(setup_script=setup_script)
222 finally:
223 # While PEP 517 frontends should be calling each hook in a fresh
224 # subprocess according to the standard (and thus it should not be
225 # strictly necessary to restore the old sys.path), we'll restore
226 # the original path so that the path manipulation does not persist
227 # within the hook after run_setup is called.
228 sys.path[:] = sys_path
229
230 # The primary backend
231 _BACKEND = _BuildMetaBackend()
232
233 get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
234 get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
235 prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
236 build_wheel = _BACKEND.build_wheel
237 build_sdist = _BACKEND.build_sdist
238
239
240 # The legacy backend
241 __legacy__ = _BuildMetaLegacyBackend()
242
[end of setuptools/build_meta.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py
--- a/setuptools/build_meta.py
+++ b/setuptools/build_meta.py
@@ -36,6 +36,8 @@
import setuptools
import distutils
+from pkg_resources import parse_requirements
+
__all__ = ['get_requires_for_build_sdist',
'get_requires_for_build_wheel',
'prepare_metadata_for_build_wheel',
@@ -51,7 +53,9 @@
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
- raise SetupRequirementsError(specifiers)
+ specifier_list = list(map(str, parse_requirements(specifiers)))
+
+ raise SetupRequirementsError(specifier_list)
@classmethod
@contextlib.contextmanager
|
{"golden_diff": "diff --git a/setuptools/build_meta.py b/setuptools/build_meta.py\n--- a/setuptools/build_meta.py\n+++ b/setuptools/build_meta.py\n@@ -36,6 +36,8 @@\n import setuptools\n import distutils\n \n+from pkg_resources import parse_requirements\n+\n __all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n 'prepare_metadata_for_build_wheel',\n@@ -51,7 +53,9 @@\n \n class Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n- raise SetupRequirementsError(specifiers)\n+ specifier_list = list(map(str, parse_requirements(specifiers)))\n+\n+ raise SetupRequirementsError(specifier_list)\n \n @classmethod\n @contextlib.contextmanager\n", "issue": "setup_requires=\"string\" not handled by PEP 517 backend\nDoes this need to be fixed in setuptools rather since the PEP says the return value needs to be a list of strings? https://www.python.org/dev/peps/pep-0517/#get-requires-for-build-wheel\r\n\r\nIt looks like here is the setuptools code: https://github.com/pypa/setuptools/blob/cdb5eeae678d8ccc90bf7d4348013a294f11be75/setuptools/build_meta.py#L138\r\n\r\n_Originally posted by @cjerdonek in https://github.com/pypa/pip/issues/6255#issuecomment-462468517_\n", "before_files": [{"content": "\"\"\"A PEP 517 interface to setuptools\n\nPreviously, when a user or a command line tool (let's call it a \"frontend\")\nneeded to make a request of setuptools to take a certain action, for\nexample, generating a list of installation requirements, the frontend would\nwould call \"setup.py egg_info\" or \"setup.py bdist_wheel\" on the command line.\n\nPEP 517 defines a different method of interfacing with setuptools. Rather\nthan calling \"setup.py\" directly, the frontend should:\n\n 1. Set the current directory to the directory with a setup.py file\n 2. Import this module into a safe python interpreter (one in which\n setuptools can potentially set global variables or crash hard).\n 3. Call one of the functions defined in PEP 517.\n\nWhat each function does is defined in PEP 517. However, here is a \"casual\"\ndefinition of the functions (this definition should not be relied on for\nbug reports or API stability):\n\n - `build_wheel`: build a wheel in the folder and return the basename\n - `get_requires_for_build_wheel`: get the `setup_requires` to build\n - `prepare_metadata_for_build_wheel`: get the `install_requires`\n - `build_sdist`: build an sdist in the folder and return the basename\n - `get_requires_for_build_sdist`: get the `setup_requires` to build\n\nAgain, this is not a formal definition! Just a \"taste\" of the module.\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport tokenize\nimport shutil\nimport contextlib\n\nimport setuptools\nimport distutils\n\n__all__ = ['get_requires_for_build_sdist',\n 'get_requires_for_build_wheel',\n 'prepare_metadata_for_build_wheel',\n 'build_wheel',\n 'build_sdist',\n '__legacy__',\n 'SetupRequirementsError']\n\nclass SetupRequirementsError(BaseException):\n def __init__(self, specifiers):\n self.specifiers = specifiers\n\n\nclass Distribution(setuptools.dist.Distribution):\n def fetch_build_eggs(self, specifiers):\n raise SetupRequirementsError(specifiers)\n\n @classmethod\n @contextlib.contextmanager\n def patch(cls):\n \"\"\"\n Replace\n distutils.dist.Distribution with this class\n for the duration of this context.\n \"\"\"\n orig = distutils.core.Distribution\n distutils.core.Distribution = cls\n try:\n yield\n finally:\n distutils.core.Distribution = orig\n\n\ndef _to_str(s):\n \"\"\"\n Convert a filename to a string (on Python 2, explicitly\n a byte string, not Unicode) as distutils checks for the\n exact type str.\n \"\"\"\n if sys.version_info[0] == 2 and not isinstance(s, str):\n # Assume it's Unicode, as that's what the PEP says\n # should be provided.\n return s.encode(sys.getfilesystemencoding())\n return s\n\n\ndef _get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\n\ndef _file_with_extension(directory, extension):\n matching = (\n f for f in os.listdir(directory)\n if f.endswith(extension)\n )\n file, = matching\n return file\n\n\ndef _open_setup_script(setup_script):\n if not os.path.exists(setup_script):\n # Supply a default setup.py\n return io.StringIO(u\"from setuptools import setup; setup()\")\n\n return getattr(tokenize, 'open', open)(setup_script)\n\n\nclass _BuildMetaBackend(object):\n\n def _fix_config(self, config_settings):\n config_settings = config_settings or {}\n config_settings.setdefault('--global-option', [])\n return config_settings\n\n def _get_build_requires(self, config_settings, requirements):\n config_settings = self._fix_config(config_settings)\n\n sys.argv = sys.argv[:1] + ['egg_info'] + \\\n config_settings[\"--global-option\"]\n try:\n with Distribution.patch():\n self.run_setup()\n except SetupRequirementsError as e:\n requirements += e.specifiers\n\n return requirements\n\n def run_setup(self, setup_script='setup.py'):\n # Note that we can reuse our build directory between calls\n # Correctness comes first, then optimization later\n __file__ = setup_script\n __name__ = '__main__'\n\n with _open_setup_script(__file__) as f:\n code = f.read().replace(r'\\r\\n', r'\\n')\n\n exec(compile(code, __file__, 'exec'), locals())\n\n def get_requires_for_build_wheel(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=['wheel'])\n\n def get_requires_for_build_sdist(self, config_settings=None):\n config_settings = self._fix_config(config_settings)\n return self._get_build_requires(config_settings, requirements=[])\n\n def prepare_metadata_for_build_wheel(self, metadata_directory,\n config_settings=None):\n sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',\n _to_str(metadata_directory)]\n self.run_setup()\n\n dist_info_directory = metadata_directory\n while True:\n dist_infos = [f for f in os.listdir(dist_info_directory)\n if f.endswith('.dist-info')]\n\n if (len(dist_infos) == 0 and\n len(_get_immediate_subdirectories(dist_info_directory)) == 1):\n\n dist_info_directory = os.path.join(\n dist_info_directory, os.listdir(dist_info_directory)[0])\n continue\n\n assert len(dist_infos) == 1\n break\n\n # PEP 517 requires that the .dist-info directory be placed in the\n # metadata_directory. To comply, we MUST copy the directory to the root\n if dist_info_directory != metadata_directory:\n shutil.move(\n os.path.join(dist_info_directory, dist_infos[0]),\n metadata_directory)\n shutil.rmtree(dist_info_directory, ignore_errors=True)\n\n return dist_infos[0]\n\n def build_wheel(self, wheel_directory, config_settings=None,\n metadata_directory=None):\n config_settings = self._fix_config(config_settings)\n wheel_directory = os.path.abspath(wheel_directory)\n sys.argv = sys.argv[:1] + ['bdist_wheel'] + \\\n config_settings[\"--global-option\"]\n self.run_setup()\n if wheel_directory != 'dist':\n shutil.rmtree(wheel_directory)\n shutil.copytree('dist', wheel_directory)\n\n return _file_with_extension(wheel_directory, '.whl')\n\n def build_sdist(self, sdist_directory, config_settings=None):\n config_settings = self._fix_config(config_settings)\n sdist_directory = os.path.abspath(sdist_directory)\n sys.argv = sys.argv[:1] + ['sdist', '--formats', 'gztar'] + \\\n config_settings[\"--global-option\"] + \\\n [\"--dist-dir\", sdist_directory]\n self.run_setup()\n\n return _file_with_extension(sdist_directory, '.tar.gz')\n\n\nclass _BuildMetaLegacyBackend(_BuildMetaBackend):\n \"\"\"Compatibility backend for setuptools\n\n This is a version of setuptools.build_meta that endeavors to maintain backwards\n compatibility with pre-PEP 517 modes of invocation. It exists as a temporary\n bridge between the old packaging mechanism and the new packaging mechanism,\n and will eventually be removed.\n \"\"\"\n def run_setup(self, setup_script='setup.py'):\n # In order to maintain compatibility with scripts assuming that\n # the setup.py script is in a directory on the PYTHONPATH, inject\n # '' into sys.path. (pypa/setuptools#1642)\n sys_path = list(sys.path) # Save the original path\n\n script_dir = os.path.dirname(os.path.abspath(setup_script))\n if script_dir not in sys.path:\n sys.path.insert(0, script_dir)\n\n try:\n super(_BuildMetaLegacyBackend,\n self).run_setup(setup_script=setup_script)\n finally:\n # While PEP 517 frontends should be calling each hook in a fresh\n # subprocess according to the standard (and thus it should not be\n # strictly necessary to restore the old sys.path), we'll restore\n # the original path so that the path manipulation does not persist\n # within the hook after run_setup is called.\n sys.path[:] = sys_path\n\n# The primary backend\n_BACKEND = _BuildMetaBackend()\n\nget_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel\nget_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist\nprepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel\nbuild_wheel = _BACKEND.build_wheel\nbuild_sdist = _BACKEND.build_sdist\n\n\n# The legacy backend\n__legacy__ = _BuildMetaLegacyBackend()\n", "path": "setuptools/build_meta.py"}]}
| 3,251 | 166 |
gh_patches_debug_19322
|
rasdani/github-patches
|
git_diff
|
psf__black-3282
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support formatting Jupyter Notebooks in GitHub Actions
**Is your feature request related to a problem? Please describe.**
I'm trying to setup a GitHub Action that runs Black on a project that includes *.py and *.ipynb files, but the default action does not include the Jupyter extra. I followed the integration described in [this piece of documentation](https://black.readthedocs.io/en/stable/integrations/github_actions.html) but the option to include the Jupyter extra (`black[jupyter]`) is not available.
**Describe the solution you'd like**
If the action included an argument to include the Jupyter extra, the GitHub Action would work in as expected (when using `pip install black[jupyter]` locally).
**Describe alternatives you've considered**
I considered a custom GitHub Action and installing Black manually, but found out that modifying part of the action available in this repository is cleaner and would bring support to users with a similar need without affecting those that already use the GitHub Action.
**Additional context**
I was trying different things out and arrived to a solution that works as expected and can be included in this project without affecting users that already use the GitHub Action. **Add a new option to the GitHub Action to enable the Jupyter extra dependency**. I think that a boolean value might do the trick and using `false` as default maintains the current behavior.
``` diff
diff --git a/action.yml b/action.yml
index cfa6ef9..ed6c32e 100644
--- a/action.yml
+++ b/action.yml
@@ -8,6 +8,10 @@ inputs:
'--check --diff'"
required: false
default: "--check --diff"
+ jupyter:
+ description: "Include the required extra dependencies to format Jupyter Notebooks."
+ required: false
+ default: false
src:
description: "Source to run Black. Default: '.'"
required: false
@@ -38,6 +42,7 @@ runs:
# TODO: Remove once https://github.com/actions/runner/issues/665 is fixed.
INPUT_OPTIONS: ${{ inputs.options }}
INPUT_SRC: ${{ inputs.src }}
+ INPUT_JUPYTER: ${{ inputs.jupyter }}
INPUT_BLACK_ARGS: ${{ inputs.black_args }}
INPUT_VERSION: ${{ inputs.version }}
pythonioencoding: utf-8
```
In this file, if the flag is enabled (if the `INPUT_JUPYTER` envar has a true value) then the `jupyter` extra is included in the installation step. Colorama is already included by default.
```diff
diff --git a/action/main.py b/action/main.py
index cd920f5..fbf6e73 100644
--- a/action/main.py
+++ b/action/main.py
@@ -10,11 +10,16 @@ ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
OPTIONS = os.getenv("INPUT_OPTIONS", default="")
SRC = os.getenv("INPUT_SRC", default="")
BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
+JUPYTER = os.getenv("INPUT_JUPYTER")
VERSION = os.getenv("INPUT_VERSION", default="")
run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True)
-req = "black[colorama]"
+
+if JUPYTER:
+ req = "black[colorama,jupyter]"
+else:
+ req = "black[colorama]"
if VERSION:
req += f"=={VERSION}"
pip_proc = run(
```
The only difference would be visible in case I want to use the Jupyter extra, which can be enabled by passing the value explicitly:
```diff
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: psf/black@stable
+ jupyter: true
options: "--check --diff --verbose"
```
I forked this project to test the GitHub Action and it does work as expected (https://github.com/aaossa/black/commit/7af4287355003cd44e0febd8fe88e92f205db324). If you agree with this feature request, I can submit a PR with these changes and update the relevant documentation 👌
</issue>
<code>
[start of action/main.py]
1 import os
2 import shlex
3 import sys
4 from pathlib import Path
5 from subprocess import PIPE, STDOUT, run
6
7 ACTION_PATH = Path(os.environ["GITHUB_ACTION_PATH"])
8 ENV_PATH = ACTION_PATH / ".black-env"
9 ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
10 OPTIONS = os.getenv("INPUT_OPTIONS", default="")
11 SRC = os.getenv("INPUT_SRC", default="")
12 BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
13 VERSION = os.getenv("INPUT_VERSION", default="")
14
15 run([sys.executable, "-m", "venv", str(ENV_PATH)], check=True)
16
17 version_specifier = VERSION
18 if VERSION and VERSION[0] in "0123456789":
19 version_specifier = f"=={VERSION}"
20 req = f"black[colorama]{version_specifier}"
21 pip_proc = run(
22 [str(ENV_BIN / "python"), "-m", "pip", "install", req],
23 stdout=PIPE,
24 stderr=STDOUT,
25 encoding="utf-8",
26 )
27 if pip_proc.returncode:
28 print(pip_proc.stdout)
29 print("::error::Failed to install Black.", flush=True)
30 sys.exit(pip_proc.returncode)
31
32
33 base_cmd = [str(ENV_BIN / "black")]
34 if BLACK_ARGS:
35 # TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS.
36 proc = run([*base_cmd, *shlex.split(BLACK_ARGS)])
37 else:
38 proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)])
39
40 sys.exit(proc.returncode)
41
[end of action/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/action/main.py b/action/main.py
--- a/action/main.py
+++ b/action/main.py
@@ -9,6 +9,7 @@
ENV_BIN = ENV_PATH / ("Scripts" if sys.platform == "win32" else "bin")
OPTIONS = os.getenv("INPUT_OPTIONS", default="")
SRC = os.getenv("INPUT_SRC", default="")
+JUPYTER = os.getenv("INPUT_JUPYTER") == "true"
BLACK_ARGS = os.getenv("INPUT_BLACK_ARGS", default="")
VERSION = os.getenv("INPUT_VERSION", default="")
@@ -17,7 +18,11 @@
version_specifier = VERSION
if VERSION and VERSION[0] in "0123456789":
version_specifier = f"=={VERSION}"
-req = f"black[colorama]{version_specifier}"
+if JUPYTER:
+ extra_deps = "[colorama,jupyter]"
+else:
+ extra_deps = "[colorama]"
+req = f"black{extra_deps}{version_specifier}"
pip_proc = run(
[str(ENV_BIN / "python"), "-m", "pip", "install", req],
stdout=PIPE,
|
{"golden_diff": "diff --git a/action/main.py b/action/main.py\n--- a/action/main.py\n+++ b/action/main.py\n@@ -9,6 +9,7 @@\n ENV_BIN = ENV_PATH / (\"Scripts\" if sys.platform == \"win32\" else \"bin\")\n OPTIONS = os.getenv(\"INPUT_OPTIONS\", default=\"\")\n SRC = os.getenv(\"INPUT_SRC\", default=\"\")\n+JUPYTER = os.getenv(\"INPUT_JUPYTER\") == \"true\"\n BLACK_ARGS = os.getenv(\"INPUT_BLACK_ARGS\", default=\"\")\n VERSION = os.getenv(\"INPUT_VERSION\", default=\"\")\n \n@@ -17,7 +18,11 @@\n version_specifier = VERSION\n if VERSION and VERSION[0] in \"0123456789\":\n version_specifier = f\"=={VERSION}\"\n-req = f\"black[colorama]{version_specifier}\"\n+if JUPYTER:\n+ extra_deps = \"[colorama,jupyter]\"\n+else:\n+ extra_deps = \"[colorama]\"\n+req = f\"black{extra_deps}{version_specifier}\"\n pip_proc = run(\n [str(ENV_BIN / \"python\"), \"-m\", \"pip\", \"install\", req],\n stdout=PIPE,\n", "issue": "Support formatting Jupyter Notebooks in GitHub Actions\n**Is your feature request related to a problem? Please describe.**\r\n\r\nI'm trying to setup a GitHub Action that runs Black on a project that includes *.py and *.ipynb files, but the default action does not include the Jupyter extra. I followed the integration described in [this piece of documentation](https://black.readthedocs.io/en/stable/integrations/github_actions.html) but the option to include the Jupyter extra (`black[jupyter]`) is not available.\r\n\r\n**Describe the solution you'd like**\r\n\r\nIf the action included an argument to include the Jupyter extra, the GitHub Action would work in as expected (when using `pip install black[jupyter]` locally).\r\n\r\n**Describe alternatives you've considered**\r\n\r\nI considered a custom GitHub Action and installing Black manually, but found out that modifying part of the action available in this repository is cleaner and would bring support to users with a similar need without affecting those that already use the GitHub Action.\r\n\r\n**Additional context**\r\n\r\nI was trying different things out and arrived to a solution that works as expected and can be included in this project without affecting users that already use the GitHub Action. **Add a new option to the GitHub Action to enable the Jupyter extra dependency**. I think that a boolean value might do the trick and using `false` as default maintains the current behavior.\r\n\r\n``` diff\r\ndiff --git a/action.yml b/action.yml\r\nindex cfa6ef9..ed6c32e 100644\r\n--- a/action.yml\r\n+++ b/action.yml\r\n@@ -8,6 +8,10 @@ inputs:\r\n '--check --diff'\"\r\n required: false\r\n default: \"--check --diff\"\r\n+ jupyter:\r\n+ description: \"Include the required extra dependencies to format Jupyter Notebooks.\"\r\n+ required: false\r\n+ default: false\r\n src:\r\n description: \"Source to run Black. Default: '.'\"\r\n required: false\r\n@@ -38,6 +42,7 @@ runs:\r\n # TODO: Remove once https://github.com/actions/runner/issues/665 is fixed.\r\n INPUT_OPTIONS: ${{ inputs.options }}\r\n INPUT_SRC: ${{ inputs.src }}\r\n+ INPUT_JUPYTER: ${{ inputs.jupyter }}\r\n INPUT_BLACK_ARGS: ${{ inputs.black_args }}\r\n INPUT_VERSION: ${{ inputs.version }}\r\n pythonioencoding: utf-8\r\n```\r\n\r\nIn this file, if the flag is enabled (if the `INPUT_JUPYTER` envar has a true value) then the `jupyter` extra is included in the installation step. Colorama is already included by default. \r\n\r\n```diff\r\ndiff --git a/action/main.py b/action/main.py\r\nindex cd920f5..fbf6e73 100644\r\n--- a/action/main.py\r\n+++ b/action/main.py\r\n@@ -10,11 +10,16 @@ ENV_BIN = ENV_PATH / (\"Scripts\" if sys.platform == \"win32\" else \"bin\")\r\n OPTIONS = os.getenv(\"INPUT_OPTIONS\", default=\"\")\r\n SRC = os.getenv(\"INPUT_SRC\", default=\"\")\r\n BLACK_ARGS = os.getenv(\"INPUT_BLACK_ARGS\", default=\"\")\r\n+JUPYTER = os.getenv(\"INPUT_JUPYTER\")\r\n VERSION = os.getenv(\"INPUT_VERSION\", default=\"\")\r\n\r\n run([sys.executable, \"-m\", \"venv\", str(ENV_PATH)], check=True)\r\n\r\n-req = \"black[colorama]\"\r\n+\r\n+if JUPYTER:\r\n+ req = \"black[colorama,jupyter]\"\r\n+else:\r\n+ req = \"black[colorama]\"\r\n if VERSION:\r\n req += f\"=={VERSION}\"\r\n pip_proc = run(\r\n```\r\n\r\nThe only difference would be visible in case I want to use the Jupyter extra, which can be enabled by passing the value explicitly:\r\n\r\n```diff\r\njobs:\r\n lint:\r\n runs-on: ubuntu-latest\r\n steps:\r\n - uses: actions/checkout@v2\r\n - uses: psf/black@stable\r\n+ jupyter: true\r\n options: \"--check --diff --verbose\"\r\n\r\n```\r\n\r\nI forked this project to test the GitHub Action and it does work as expected (https://github.com/aaossa/black/commit/7af4287355003cd44e0febd8fe88e92f205db324). If you agree with this feature request, I can submit a PR with these changes and update the relevant documentation \ud83d\udc4c \r\n\r\n\n", "before_files": [{"content": "import os\nimport shlex\nimport sys\nfrom pathlib import Path\nfrom subprocess import PIPE, STDOUT, run\n\nACTION_PATH = Path(os.environ[\"GITHUB_ACTION_PATH\"])\nENV_PATH = ACTION_PATH / \".black-env\"\nENV_BIN = ENV_PATH / (\"Scripts\" if sys.platform == \"win32\" else \"bin\")\nOPTIONS = os.getenv(\"INPUT_OPTIONS\", default=\"\")\nSRC = os.getenv(\"INPUT_SRC\", default=\"\")\nBLACK_ARGS = os.getenv(\"INPUT_BLACK_ARGS\", default=\"\")\nVERSION = os.getenv(\"INPUT_VERSION\", default=\"\")\n\nrun([sys.executable, \"-m\", \"venv\", str(ENV_PATH)], check=True)\n\nversion_specifier = VERSION\nif VERSION and VERSION[0] in \"0123456789\":\n version_specifier = f\"=={VERSION}\"\nreq = f\"black[colorama]{version_specifier}\"\npip_proc = run(\n [str(ENV_BIN / \"python\"), \"-m\", \"pip\", \"install\", req],\n stdout=PIPE,\n stderr=STDOUT,\n encoding=\"utf-8\",\n)\nif pip_proc.returncode:\n print(pip_proc.stdout)\n print(\"::error::Failed to install Black.\", flush=True)\n sys.exit(pip_proc.returncode)\n\n\nbase_cmd = [str(ENV_BIN / \"black\")]\nif BLACK_ARGS:\n # TODO: remove after a while since this is deprecated in favour of SRC + OPTIONS.\n proc = run([*base_cmd, *shlex.split(BLACK_ARGS)])\nelse:\n proc = run([*base_cmd, *shlex.split(OPTIONS), *shlex.split(SRC)])\n\nsys.exit(proc.returncode)\n", "path": "action/main.py"}]}
| 1,924 | 256 |
gh_patches_debug_27301
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-684
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Next sentinel host is not contacted after socket timeout
If a there is a socket timeout with a sentinel host, `redis.exceptions.TimeoutError` is returned and none of the other sentinel hosts are contacted.
If there is a connection timeout or connection refused, the next host is tried. It would great if there was way to try the next sentinel host in the same way for socket timeout errors.
(You can `retry_on_timeout=True` to retry the same sentinel host once, but if you get a socket timeout a second time, `redis.exceptions.TimeoutError` is returned.)
</issue>
<code>
[start of redis/sentinel.py]
1 import os
2 import random
3 import weakref
4
5 from redis.client import StrictRedis
6 from redis.connection import ConnectionPool, Connection
7 from redis.exceptions import ConnectionError, ResponseError, ReadOnlyError
8 from redis._compat import iteritems, nativestr, xrange
9
10
11 class MasterNotFoundError(ConnectionError):
12 pass
13
14
15 class SlaveNotFoundError(ConnectionError):
16 pass
17
18
19 class SentinelManagedConnection(Connection):
20 def __init__(self, **kwargs):
21 self.connection_pool = kwargs.pop('connection_pool')
22 super(SentinelManagedConnection, self).__init__(**kwargs)
23
24 def __repr__(self):
25 pool = self.connection_pool
26 s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)
27 if self.host:
28 host_info = ',host=%s,port=%s' % (self.host, self.port)
29 s = s % host_info
30 return s
31
32 def connect_to(self, address):
33 self.host, self.port = address
34 super(SentinelManagedConnection, self).connect()
35 if self.connection_pool.check_connection:
36 self.send_command('PING')
37 if nativestr(self.read_response()) != 'PONG':
38 raise ConnectionError('PING failed')
39
40 def connect(self):
41 if self._sock:
42 return # already connected
43 if self.connection_pool.is_master:
44 self.connect_to(self.connection_pool.get_master_address())
45 else:
46 for slave in self.connection_pool.rotate_slaves():
47 try:
48 return self.connect_to(slave)
49 except ConnectionError:
50 continue
51 raise SlaveNotFoundError # Never be here
52
53 def read_response(self):
54 try:
55 return super(SentinelManagedConnection, self).read_response()
56 except ReadOnlyError:
57 if self.connection_pool.is_master:
58 # When talking to a master, a ReadOnlyError when likely
59 # indicates that the previous master that we're still connected
60 # to has been demoted to a slave and there's a new master.
61 # calling disconnect will force the connection to re-query
62 # sentinel during the next connect() attempt.
63 self.disconnect()
64 raise ConnectionError('The previous master is now a slave')
65 raise
66
67
68 class SentinelConnectionPool(ConnectionPool):
69 """
70 Sentinel backed connection pool.
71
72 If ``check_connection`` flag is set to True, SentinelManagedConnection
73 sends a PING command right after establishing the connection.
74 """
75
76 def __init__(self, service_name, sentinel_manager, **kwargs):
77 kwargs['connection_class'] = kwargs.get(
78 'connection_class', SentinelManagedConnection)
79 self.is_master = kwargs.pop('is_master', True)
80 self.check_connection = kwargs.pop('check_connection', False)
81 super(SentinelConnectionPool, self).__init__(**kwargs)
82 self.connection_kwargs['connection_pool'] = weakref.proxy(self)
83 self.service_name = service_name
84 self.sentinel_manager = sentinel_manager
85
86 def __repr__(self):
87 return "%s<service=%s(%s)" % (
88 type(self).__name__,
89 self.service_name,
90 self.is_master and 'master' or 'slave',
91 )
92
93 def reset(self):
94 super(SentinelConnectionPool, self).reset()
95 self.master_address = None
96 self.slave_rr_counter = None
97
98 def get_master_address(self):
99 master_address = self.sentinel_manager.discover_master(
100 self.service_name)
101 if self.is_master:
102 if self.master_address is None:
103 self.master_address = master_address
104 elif master_address != self.master_address:
105 # Master address changed, disconnect all clients in this pool
106 self.disconnect()
107 return master_address
108
109 def rotate_slaves(self):
110 "Round-robin slave balancer"
111 slaves = self.sentinel_manager.discover_slaves(self.service_name)
112 if slaves:
113 if self.slave_rr_counter is None:
114 self.slave_rr_counter = random.randint(0, len(slaves) - 1)
115 for _ in xrange(len(slaves)):
116 self.slave_rr_counter = (
117 self.slave_rr_counter + 1) % len(slaves)
118 slave = slaves[self.slave_rr_counter]
119 yield slave
120 # Fallback to the master connection
121 try:
122 yield self.get_master_address()
123 except MasterNotFoundError:
124 pass
125 raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
126
127 def _checkpid(self):
128 if self.pid != os.getpid():
129 self.disconnect()
130 self.reset()
131 self.__init__(self.service_name, self.sentinel_manager,
132 is_master=self.is_master,
133 check_connection=self.check_connection,
134 connection_class=self.connection_class,
135 max_connections=self.max_connections,
136 **self.connection_kwargs)
137
138
139 class Sentinel(object):
140 """
141 Redis Sentinel cluster client
142
143 >>> from redis.sentinel import Sentinel
144 >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
145 >>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
146 >>> master.set('foo', 'bar')
147 >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
148 >>> slave.get('foo')
149 'bar'
150
151 ``sentinels`` is a list of sentinel nodes. Each node is represented by
152 a pair (hostname, port).
153
154 ``min_other_sentinels`` defined a minimum number of peers for a sentinel.
155 When querying a sentinel, if it doesn't meet this threshold, responses
156 from that sentinel won't be considered valid.
157
158 ``sentinel_kwargs`` is a dictionary of connection arguments used when
159 connecting to sentinel instances. Any argument that can be passed to
160 a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
161 not specified, any socket_timeout and socket_keepalive options specified
162 in ``connection_kwargs`` will be used.
163
164 ``connection_kwargs`` are keyword arguments that will be used when
165 establishing a connection to a Redis server.
166 """
167
168 def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
169 **connection_kwargs):
170 # if sentinel_kwargs isn't defined, use the socket_* options from
171 # connection_kwargs
172 if sentinel_kwargs is None:
173 sentinel_kwargs = dict([(k, v)
174 for k, v in iteritems(connection_kwargs)
175 if k.startswith('socket_')
176 ])
177 self.sentinel_kwargs = sentinel_kwargs
178
179 self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs)
180 for hostname, port in sentinels]
181 self.min_other_sentinels = min_other_sentinels
182 self.connection_kwargs = connection_kwargs
183
184 def __repr__(self):
185 sentinel_addresses = []
186 for sentinel in self.sentinels:
187 sentinel_addresses.append('%s:%s' % (
188 sentinel.connection_pool.connection_kwargs['host'],
189 sentinel.connection_pool.connection_kwargs['port'],
190 ))
191 return '%s<sentinels=[%s]>' % (
192 type(self).__name__,
193 ','.join(sentinel_addresses))
194
195 def check_master_state(self, state, service_name):
196 if not state['is_master'] or state['is_sdown'] or state['is_odown']:
197 return False
198 # Check if our sentinel doesn't see other nodes
199 if state['num-other-sentinels'] < self.min_other_sentinels:
200 return False
201 return True
202
203 def discover_master(self, service_name):
204 """
205 Asks sentinel servers for the Redis master's address corresponding
206 to the service labeled ``service_name``.
207
208 Returns a pair (address, port) or raises MasterNotFoundError if no
209 master is found.
210 """
211 for sentinel_no, sentinel in enumerate(self.sentinels):
212 try:
213 masters = sentinel.sentinel_masters()
214 except ConnectionError:
215 continue
216 state = masters.get(service_name)
217 if state and self.check_master_state(state, service_name):
218 # Put this sentinel at the top of the list
219 self.sentinels[0], self.sentinels[sentinel_no] = (
220 sentinel, self.sentinels[0])
221 return state['ip'], state['port']
222 raise MasterNotFoundError("No master found for %r" % (service_name,))
223
224 def filter_slaves(self, slaves):
225 "Remove slaves that are in an ODOWN or SDOWN state"
226 slaves_alive = []
227 for slave in slaves:
228 if slave['is_odown'] or slave['is_sdown']:
229 continue
230 slaves_alive.append((slave['ip'], slave['port']))
231 return slaves_alive
232
233 def discover_slaves(self, service_name):
234 "Returns a list of alive slaves for service ``service_name``"
235 for sentinel in self.sentinels:
236 try:
237 slaves = sentinel.sentinel_slaves(service_name)
238 except (ConnectionError, ResponseError):
239 continue
240 slaves = self.filter_slaves(slaves)
241 if slaves:
242 return slaves
243 return []
244
245 def master_for(self, service_name, redis_class=StrictRedis,
246 connection_pool_class=SentinelConnectionPool, **kwargs):
247 """
248 Returns a redis client instance for the ``service_name`` master.
249
250 A SentinelConnectionPool class is used to retrive the master's
251 address before establishing a new connection.
252
253 NOTE: If the master's address has changed, any cached connections to
254 the old master are closed.
255
256 By default clients will be a redis.StrictRedis instance. Specify a
257 different class to the ``redis_class`` argument if you desire
258 something different.
259
260 The ``connection_pool_class`` specifies the connection pool to use.
261 The SentinelConnectionPool will be used by default.
262
263 All other keyword arguments are merged with any connection_kwargs
264 passed to this class and passed to the connection pool as keyword
265 arguments to be used to initialize Redis connections.
266 """
267 kwargs['is_master'] = True
268 connection_kwargs = dict(self.connection_kwargs)
269 connection_kwargs.update(kwargs)
270 return redis_class(connection_pool=connection_pool_class(
271 service_name, self, **connection_kwargs))
272
273 def slave_for(self, service_name, redis_class=StrictRedis,
274 connection_pool_class=SentinelConnectionPool, **kwargs):
275 """
276 Returns redis client instance for the ``service_name`` slave(s).
277
278 A SentinelConnectionPool class is used to retrive the slave's
279 address before establishing a new connection.
280
281 By default clients will be a redis.StrictRedis instance. Specify a
282 different class to the ``redis_class`` argument if you desire
283 something different.
284
285 The ``connection_pool_class`` specifies the connection pool to use.
286 The SentinelConnectionPool will be used by default.
287
288 All other keyword arguments are merged with any connection_kwargs
289 passed to this class and passed to the connection pool as keyword
290 arguments to be used to initialize Redis connections.
291 """
292 kwargs['is_master'] = False
293 connection_kwargs = dict(self.connection_kwargs)
294 connection_kwargs.update(kwargs)
295 return redis_class(connection_pool=connection_pool_class(
296 service_name, self, **connection_kwargs))
297
[end of redis/sentinel.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redis/sentinel.py b/redis/sentinel.py
--- a/redis/sentinel.py
+++ b/redis/sentinel.py
@@ -4,7 +4,8 @@
from redis.client import StrictRedis
from redis.connection import ConnectionPool, Connection
-from redis.exceptions import ConnectionError, ResponseError, ReadOnlyError
+from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,
+ TimeoutError)
from redis._compat import iteritems, nativestr, xrange
@@ -211,7 +212,7 @@
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = sentinel.sentinel_masters()
- except ConnectionError:
+ except (ConnectionError, TimeoutError):
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
@@ -235,7 +236,7 @@
for sentinel in self.sentinels:
try:
slaves = sentinel.sentinel_slaves(service_name)
- except (ConnectionError, ResponseError):
+ except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
|
{"golden_diff": "diff --git a/redis/sentinel.py b/redis/sentinel.py\n--- a/redis/sentinel.py\n+++ b/redis/sentinel.py\n@@ -4,7 +4,8 @@\n \n from redis.client import StrictRedis\n from redis.connection import ConnectionPool, Connection\n-from redis.exceptions import ConnectionError, ResponseError, ReadOnlyError\n+from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,\n+ TimeoutError)\n from redis._compat import iteritems, nativestr, xrange\n \n \n@@ -211,7 +212,7 @@\n for sentinel_no, sentinel in enumerate(self.sentinels):\n try:\n masters = sentinel.sentinel_masters()\n- except ConnectionError:\n+ except (ConnectionError, TimeoutError):\n continue\n state = masters.get(service_name)\n if state and self.check_master_state(state, service_name):\n@@ -235,7 +236,7 @@\n for sentinel in self.sentinels:\n try:\n slaves = sentinel.sentinel_slaves(service_name)\n- except (ConnectionError, ResponseError):\n+ except (ConnectionError, ResponseError, TimeoutError):\n continue\n slaves = self.filter_slaves(slaves)\n if slaves:\n", "issue": "Next sentinel host is not contacted after socket timeout\nIf a there is a socket timeout with a sentinel host, `redis.exceptions.TimeoutError` is returned and none of the other sentinel hosts are contacted.\n\nIf there is a connection timeout or connection refused, the next host is tried. It would great if there was way to try the next sentinel host in the same way for socket timeout errors.\n\n(You can `retry_on_timeout=True` to retry the same sentinel host once, but if you get a socket timeout a second time, `redis.exceptions.TimeoutError` is returned.)\n\n", "before_files": [{"content": "import os\nimport random\nimport weakref\n\nfrom redis.client import StrictRedis\nfrom redis.connection import ConnectionPool, Connection\nfrom redis.exceptions import ConnectionError, ResponseError, ReadOnlyError\nfrom redis._compat import iteritems, nativestr, xrange\n\n\nclass MasterNotFoundError(ConnectionError):\n pass\n\n\nclass SlaveNotFoundError(ConnectionError):\n pass\n\n\nclass SentinelManagedConnection(Connection):\n def __init__(self, **kwargs):\n self.connection_pool = kwargs.pop('connection_pool')\n super(SentinelManagedConnection, self).__init__(**kwargs)\n\n def __repr__(self):\n pool = self.connection_pool\n s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)\n if self.host:\n host_info = ',host=%s,port=%s' % (self.host, self.port)\n s = s % host_info\n return s\n\n def connect_to(self, address):\n self.host, self.port = address\n super(SentinelManagedConnection, self).connect()\n if self.connection_pool.check_connection:\n self.send_command('PING')\n if nativestr(self.read_response()) != 'PONG':\n raise ConnectionError('PING failed')\n\n def connect(self):\n if self._sock:\n return # already connected\n if self.connection_pool.is_master:\n self.connect_to(self.connection_pool.get_master_address())\n else:\n for slave in self.connection_pool.rotate_slaves():\n try:\n return self.connect_to(slave)\n except ConnectionError:\n continue\n raise SlaveNotFoundError # Never be here\n\n def read_response(self):\n try:\n return super(SentinelManagedConnection, self).read_response()\n except ReadOnlyError:\n if self.connection_pool.is_master:\n # When talking to a master, a ReadOnlyError when likely\n # indicates that the previous master that we're still connected\n # to has been demoted to a slave and there's a new master.\n # calling disconnect will force the connection to re-query\n # sentinel during the next connect() attempt.\n self.disconnect()\n raise ConnectionError('The previous master is now a slave')\n raise\n\n\nclass SentinelConnectionPool(ConnectionPool):\n \"\"\"\n Sentinel backed connection pool.\n\n If ``check_connection`` flag is set to True, SentinelManagedConnection\n sends a PING command right after establishing the connection.\n \"\"\"\n\n def __init__(self, service_name, sentinel_manager, **kwargs):\n kwargs['connection_class'] = kwargs.get(\n 'connection_class', SentinelManagedConnection)\n self.is_master = kwargs.pop('is_master', True)\n self.check_connection = kwargs.pop('check_connection', False)\n super(SentinelConnectionPool, self).__init__(**kwargs)\n self.connection_kwargs['connection_pool'] = weakref.proxy(self)\n self.service_name = service_name\n self.sentinel_manager = sentinel_manager\n\n def __repr__(self):\n return \"%s<service=%s(%s)\" % (\n type(self).__name__,\n self.service_name,\n self.is_master and 'master' or 'slave',\n )\n\n def reset(self):\n super(SentinelConnectionPool, self).reset()\n self.master_address = None\n self.slave_rr_counter = None\n\n def get_master_address(self):\n master_address = self.sentinel_manager.discover_master(\n self.service_name)\n if self.is_master:\n if self.master_address is None:\n self.master_address = master_address\n elif master_address != self.master_address:\n # Master address changed, disconnect all clients in this pool\n self.disconnect()\n return master_address\n\n def rotate_slaves(self):\n \"Round-robin slave balancer\"\n slaves = self.sentinel_manager.discover_slaves(self.service_name)\n if slaves:\n if self.slave_rr_counter is None:\n self.slave_rr_counter = random.randint(0, len(slaves) - 1)\n for _ in xrange(len(slaves)):\n self.slave_rr_counter = (\n self.slave_rr_counter + 1) % len(slaves)\n slave = slaves[self.slave_rr_counter]\n yield slave\n # Fallback to the master connection\n try:\n yield self.get_master_address()\n except MasterNotFoundError:\n pass\n raise SlaveNotFoundError('No slave found for %r' % (self.service_name))\n\n def _checkpid(self):\n if self.pid != os.getpid():\n self.disconnect()\n self.reset()\n self.__init__(self.service_name, self.sentinel_manager,\n is_master=self.is_master,\n check_connection=self.check_connection,\n connection_class=self.connection_class,\n max_connections=self.max_connections,\n **self.connection_kwargs)\n\n\nclass Sentinel(object):\n \"\"\"\n Redis Sentinel cluster client\n\n >>> from redis.sentinel import Sentinel\n >>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)\n >>> master = sentinel.master_for('mymaster', socket_timeout=0.1)\n >>> master.set('foo', 'bar')\n >>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)\n >>> slave.get('foo')\n 'bar'\n\n ``sentinels`` is a list of sentinel nodes. Each node is represented by\n a pair (hostname, port).\n\n ``min_other_sentinels`` defined a minimum number of peers for a sentinel.\n When querying a sentinel, if it doesn't meet this threshold, responses\n from that sentinel won't be considered valid.\n\n ``sentinel_kwargs`` is a dictionary of connection arguments used when\n connecting to sentinel instances. Any argument that can be passed to\n a normal Redis connection can be specified here. If ``sentinel_kwargs`` is\n not specified, any socket_timeout and socket_keepalive options specified\n in ``connection_kwargs`` will be used.\n\n ``connection_kwargs`` are keyword arguments that will be used when\n establishing a connection to a Redis server.\n \"\"\"\n\n def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,\n **connection_kwargs):\n # if sentinel_kwargs isn't defined, use the socket_* options from\n # connection_kwargs\n if sentinel_kwargs is None:\n sentinel_kwargs = dict([(k, v)\n for k, v in iteritems(connection_kwargs)\n if k.startswith('socket_')\n ])\n self.sentinel_kwargs = sentinel_kwargs\n\n self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs)\n for hostname, port in sentinels]\n self.min_other_sentinels = min_other_sentinels\n self.connection_kwargs = connection_kwargs\n\n def __repr__(self):\n sentinel_addresses = []\n for sentinel in self.sentinels:\n sentinel_addresses.append('%s:%s' % (\n sentinel.connection_pool.connection_kwargs['host'],\n sentinel.connection_pool.connection_kwargs['port'],\n ))\n return '%s<sentinels=[%s]>' % (\n type(self).__name__,\n ','.join(sentinel_addresses))\n\n def check_master_state(self, state, service_name):\n if not state['is_master'] or state['is_sdown'] or state['is_odown']:\n return False\n # Check if our sentinel doesn't see other nodes\n if state['num-other-sentinels'] < self.min_other_sentinels:\n return False\n return True\n\n def discover_master(self, service_name):\n \"\"\"\n Asks sentinel servers for the Redis master's address corresponding\n to the service labeled ``service_name``.\n\n Returns a pair (address, port) or raises MasterNotFoundError if no\n master is found.\n \"\"\"\n for sentinel_no, sentinel in enumerate(self.sentinels):\n try:\n masters = sentinel.sentinel_masters()\n except ConnectionError:\n continue\n state = masters.get(service_name)\n if state and self.check_master_state(state, service_name):\n # Put this sentinel at the top of the list\n self.sentinels[0], self.sentinels[sentinel_no] = (\n sentinel, self.sentinels[0])\n return state['ip'], state['port']\n raise MasterNotFoundError(\"No master found for %r\" % (service_name,))\n\n def filter_slaves(self, slaves):\n \"Remove slaves that are in an ODOWN or SDOWN state\"\n slaves_alive = []\n for slave in slaves:\n if slave['is_odown'] or slave['is_sdown']:\n continue\n slaves_alive.append((slave['ip'], slave['port']))\n return slaves_alive\n\n def discover_slaves(self, service_name):\n \"Returns a list of alive slaves for service ``service_name``\"\n for sentinel in self.sentinels:\n try:\n slaves = sentinel.sentinel_slaves(service_name)\n except (ConnectionError, ResponseError):\n continue\n slaves = self.filter_slaves(slaves)\n if slaves:\n return slaves\n return []\n\n def master_for(self, service_name, redis_class=StrictRedis,\n connection_pool_class=SentinelConnectionPool, **kwargs):\n \"\"\"\n Returns a redis client instance for the ``service_name`` master.\n\n A SentinelConnectionPool class is used to retrive the master's\n address before establishing a new connection.\n\n NOTE: If the master's address has changed, any cached connections to\n the old master are closed.\n\n By default clients will be a redis.StrictRedis instance. Specify a\n different class to the ``redis_class`` argument if you desire\n something different.\n\n The ``connection_pool_class`` specifies the connection pool to use.\n The SentinelConnectionPool will be used by default.\n\n All other keyword arguments are merged with any connection_kwargs\n passed to this class and passed to the connection pool as keyword\n arguments to be used to initialize Redis connections.\n \"\"\"\n kwargs['is_master'] = True\n connection_kwargs = dict(self.connection_kwargs)\n connection_kwargs.update(kwargs)\n return redis_class(connection_pool=connection_pool_class(\n service_name, self, **connection_kwargs))\n\n def slave_for(self, service_name, redis_class=StrictRedis,\n connection_pool_class=SentinelConnectionPool, **kwargs):\n \"\"\"\n Returns redis client instance for the ``service_name`` slave(s).\n\n A SentinelConnectionPool class is used to retrive the slave's\n address before establishing a new connection.\n\n By default clients will be a redis.StrictRedis instance. Specify a\n different class to the ``redis_class`` argument if you desire\n something different.\n\n The ``connection_pool_class`` specifies the connection pool to use.\n The SentinelConnectionPool will be used by default.\n\n All other keyword arguments are merged with any connection_kwargs\n passed to this class and passed to the connection pool as keyword\n arguments to be used to initialize Redis connections.\n \"\"\"\n kwargs['is_master'] = False\n connection_kwargs = dict(self.connection_kwargs)\n connection_kwargs.update(kwargs)\n return redis_class(connection_pool=connection_pool_class(\n service_name, self, **connection_kwargs))\n", "path": "redis/sentinel.py"}]}
| 3,813 | 267 |
gh_patches_debug_33798
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-43869
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
openvswitch_db: ovs-vsctl: "True" is not a valid boolean (use "true" or "false")
##### SUMMARY
`test/integration/targets/openvswitch_db/tests/basic.yaml:68`
```yaml
{
"changed": false,
"cmd": "/usr/bin/ovs-vsctl -t 5 set Bridge br-test stp_enable=True",
"msg": "ovs-vsctl: \"True\" is not a valid boolean (use \"true\" or \"false\")",
"rc": 1,
"stderr": "ovs-vsctl: \"True\" is not a valid boolean (use \"true\" or \"false\")\n",
"stderr_lines": [
"ovs-vsctl: \"True\" is not a valid boolean (use \"true\" or \"false\")"
],
"stdout": "",
"stdout_lines": []
}
```
Possibly caused by https://github.com/ansible/ansible/pull/42110
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
openvswitch_db
##### ANSIBLE VERSION
```
2.7
```
##### CONFIGURATION
<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).-->
##### OS / ENVIRONMENT
<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.-->
##### STEPS TO REPRODUCE
<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used. -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
<!--- Paste verbatim command output between quotes below -->
```
```
</issue>
<code>
[start of lib/ansible/modules/network/ovs/openvswitch_db.py]
1 #!/usr/bin/python
2 # coding: utf-8 -*-
3
4 #
5 # (c) 2015, Mark Hamilton <[email protected]>
6 # Portions copyright @ 2015 VMware, Inc.
7 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
8
9 from __future__ import absolute_import, division, print_function
10 __metaclass__ = type
11
12
13 ANSIBLE_METADATA = {'metadata_version': '1.1',
14 'status': ['preview'],
15 'supported_by': 'network'}
16
17
18 DOCUMENTATION = """
19 ---
20 module: openvswitch_db
21 author: "Mark Hamilton ([email protected])"
22 version_added: 2.0
23 short_description: Configure open vswitch database.
24 requirements: [ "ovs-vsctl >= 2.3.3" ]
25 description:
26 - Set column values in record in database table.
27 options:
28 state:
29 required: false
30 description:
31 - Configures the state of the key. When set
32 to I(present), the I(key) and I(value) pair will be set
33 on the I(record) and when set to I(absent) the I(key)
34 will not be set.
35 default: present
36 choices: ['present', 'absent']
37 version_added: "2.4"
38 table:
39 required: true
40 description:
41 - Identifies the table in the database.
42 record:
43 required: true
44 description:
45 - Identifies the recoard in the table.
46 col:
47 required: true
48 description:
49 - Identifies the column in the record.
50 key:
51 required: false
52 description:
53 - Identifies the key in the record column, when the column is a map
54 type.
55 value:
56 required: true
57 description:
58 - Expected value for the table, record, column and key.
59 timeout:
60 required: false
61 default: 5
62 description:
63 - How long to wait for ovs-vswitchd to respond
64 """
65
66 EXAMPLES = '''
67 # Increase the maximum idle time to 50 seconds before pruning unused kernel
68 # rules.
69 - openvswitch_db:
70 table: open_vswitch
71 record: .
72 col: other_config
73 key: max-idle
74 value: 50000
75
76 # Disable in band copy
77 - openvswitch_db:
78 table: Bridge
79 record: br-int
80 col: other_config
81 key: disable-in-band
82 value: true
83
84 # Remove in band key
85 - openvswitch_db:
86 state: present
87 table: Bridge
88 record: br-int
89 col: other_config
90 key: disable-in-band
91
92 # Mark port with tag 10
93 - openvswitch_db:
94 table: Port
95 record: port0
96 col: tag
97 value: 10
98 '''
99 import re
100
101 from ansible.module_utils.basic import AnsibleModule
102
103 # Regular expression for map type, must not be empty
104 NON_EMPTY_MAP_RE = re.compile(r'{.+}')
105 # Regular expression for a map column type
106 MAP_RE = re.compile(r'{.*}')
107
108
109 def map_obj_to_commands(want, have, module):
110 """ Define ovs-vsctl command to meet desired state """
111 commands = list()
112
113 if module.params['state'] == 'absent':
114 if 'key' in have.keys():
115 templatized_command = "%(ovs-vsctl)s -t %(timeout)s remove %(table)s %(record)s " \
116 "%(col)s %(key)s=%(value)s"
117 commands.append(templatized_command % module.params)
118 elif module.params['key'] is None:
119 templatized_command = "%(ovs-vsctl)s -t %(timeout)s remove %(table)s %(record)s " \
120 "%(col)s"
121 commands.append(templatized_command % module.params)
122 else:
123 if module.params['key'] is None:
124 templatized_command = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
125 "%(col)s=%(value)s"
126 commands.append(templatized_command % module.params)
127 elif 'key' not in have.keys():
128 templatized_command = "%(ovs-vsctl)s -t %(timeout)s add %(table)s %(record)s " \
129 "%(col)s %(key)s=%(value)s"
130 commands.append(templatized_command % module.params)
131 elif want['value'] != have['value']:
132 templatized_command = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
133 "%(col)s:%(key)s=%(value)s"
134 commands.append(templatized_command % module.params)
135
136 return commands
137
138
139 def map_config_to_obj(module):
140 templatized_command = "%(ovs-vsctl)s -t %(timeout)s list %(table)s %(record)s"
141 command = templatized_command % module.params
142 rc, out, err = module.run_command(command, check_rc=True)
143 if rc != 0:
144 module.fail_json(msg=err)
145
146 match = re.search(r'^' + module.params['col'] + r'(\s+):(\s+)(.*)$', out, re.M)
147
148 col_value = match.group(3)
149
150 # Map types require key argument
151 has_key = module.params['key'] is not None
152 is_map = MAP_RE.match(col_value)
153 if is_map and not has_key:
154 module.fail_json(
155 msg="missing required arguments: key for map type of column")
156
157 col_value_to_dict = {}
158 if NON_EMPTY_MAP_RE.match(col_value):
159 for kv in col_value[1:-1].split(', '):
160 k, v = kv.split('=')
161 col_value_to_dict[k.strip()] = v.strip()
162
163 obj = {
164 'table': module.params['table'],
165 'record': module.params['record'],
166 'col': module.params['col'],
167 }
168
169 if has_key and is_map:
170 if module.params['key'] in col_value_to_dict:
171 obj['key'] = module.params['key']
172 obj['value'] = col_value_to_dict[module.params['key']]
173 else:
174 obj['value'] = col_value.strip()
175
176 return obj
177
178
179 def map_params_to_obj(module):
180 obj = {
181 'table': module.params['table'],
182 'record': module.params['record'],
183 'col': module.params['col'],
184 'value': module.params['value']
185 }
186
187 key = module.params['key']
188 if key is not None:
189 obj['key'] = key
190
191 return obj
192
193
194 def main():
195 """ Entry point for ansible module. """
196 argument_spec = {
197 'state': {'default': 'present', 'choices': ['present', 'absent']},
198 'table': {'required': True},
199 'record': {'required': True},
200 'col': {'required': True},
201 'key': {'required': False},
202 'value': {'required': True},
203 'timeout': {'default': 5, 'type': 'int'},
204 }
205
206 module = AnsibleModule(argument_spec=argument_spec,
207 supports_check_mode=True)
208
209 result = {'changed': False}
210
211 # We add ovs-vsctl to module_params to later build up templatized commands
212 module.params["ovs-vsctl"] = module.get_bin_path("ovs-vsctl", True)
213
214 want = map_params_to_obj(module)
215 have = map_config_to_obj(module)
216
217 commands = map_obj_to_commands(want, have, module)
218 result['commands'] = commands
219
220 if commands:
221 if not module.check_mode:
222 for c in commands:
223 module.run_command(c, check_rc=True)
224 result['changed'] = True
225
226 module.exit_json(**result)
227
228
229 if __name__ == '__main__':
230 main()
231
[end of lib/ansible/modules/network/ovs/openvswitch_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/modules/network/ovs/openvswitch_db.py b/lib/ansible/modules/network/ovs/openvswitch_db.py
--- a/lib/ansible/modules/network/ovs/openvswitch_db.py
+++ b/lib/ansible/modules/network/ovs/openvswitch_db.py
@@ -120,15 +120,14 @@
"%(col)s"
commands.append(templatized_command % module.params)
else:
+ if want == have:
+ # Nothing to commit
+ return commands
if module.params['key'] is None:
templatized_command = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
"%(col)s=%(value)s"
commands.append(templatized_command % module.params)
- elif 'key' not in have.keys():
- templatized_command = "%(ovs-vsctl)s -t %(timeout)s add %(table)s %(record)s " \
- "%(col)s %(key)s=%(value)s"
- commands.append(templatized_command % module.params)
- elif want['value'] != have['value']:
+ else:
templatized_command = "%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s " \
"%(col)s:%(key)s=%(value)s"
commands.append(templatized_command % module.params)
@@ -171,7 +170,7 @@
obj['key'] = module.params['key']
obj['value'] = col_value_to_dict[module.params['key']]
else:
- obj['value'] = col_value.strip()
+ obj['value'] = str(col_value.strip())
return obj
@@ -199,7 +198,7 @@
'record': {'required': True},
'col': {'required': True},
'key': {'required': False},
- 'value': {'required': True},
+ 'value': {'required': True, 'type': 'str'},
'timeout': {'default': 5, 'type': 'int'},
}
|
{"golden_diff": "diff --git a/lib/ansible/modules/network/ovs/openvswitch_db.py b/lib/ansible/modules/network/ovs/openvswitch_db.py\n--- a/lib/ansible/modules/network/ovs/openvswitch_db.py\n+++ b/lib/ansible/modules/network/ovs/openvswitch_db.py\n@@ -120,15 +120,14 @@\n \"%(col)s\"\n commands.append(templatized_command % module.params)\n else:\n+ if want == have:\n+ # Nothing to commit\n+ return commands\n if module.params['key'] is None:\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s \" \\\n \"%(col)s=%(value)s\"\n commands.append(templatized_command % module.params)\n- elif 'key' not in have.keys():\n- templatized_command = \"%(ovs-vsctl)s -t %(timeout)s add %(table)s %(record)s \" \\\n- \"%(col)s %(key)s=%(value)s\"\n- commands.append(templatized_command % module.params)\n- elif want['value'] != have['value']:\n+ else:\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s \" \\\n \"%(col)s:%(key)s=%(value)s\"\n commands.append(templatized_command % module.params)\n@@ -171,7 +170,7 @@\n obj['key'] = module.params['key']\n obj['value'] = col_value_to_dict[module.params['key']]\n else:\n- obj['value'] = col_value.strip()\n+ obj['value'] = str(col_value.strip())\n \n return obj\n \n@@ -199,7 +198,7 @@\n 'record': {'required': True},\n 'col': {'required': True},\n 'key': {'required': False},\n- 'value': {'required': True},\n+ 'value': {'required': True, 'type': 'str'},\n 'timeout': {'default': 5, 'type': 'int'},\n }\n", "issue": "openvswitch_db: ovs-vsctl: \"True\" is not a valid boolean (use \"true\" or \"false\") \n##### SUMMARY\r\n`test/integration/targets/openvswitch_db/tests/basic.yaml:68`\r\n\r\n```yaml\r\n{\r\n\"changed\": false, \r\n\"cmd\": \"/usr/bin/ovs-vsctl -t 5 set Bridge br-test stp_enable=True\", \r\n\"msg\": \"ovs-vsctl: \\\"True\\\" is not a valid boolean (use \\\"true\\\" or \\\"false\\\")\", \r\n\"rc\": 1, \r\n\"stderr\": \"ovs-vsctl: \\\"True\\\" is not a valid boolean (use \\\"true\\\" or \\\"false\\\")\\n\", \r\n\"stderr_lines\": [\r\n\"ovs-vsctl: \\\"True\\\" is not a valid boolean (use \\\"true\\\" or \\\"false\\\")\"\r\n], \r\n\"stdout\": \"\", \r\n\"stdout_lines\": []\r\n}\r\n```\r\n\r\nPossibly caused by https://github.com/ansible/ansible/pull/42110\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nopenvswitch_db\r\n\r\n##### ANSIBLE VERSION\r\n```\r\n2.7\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).-->\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.-->\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\n\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n#\n# (c) 2015, Mark Hamilton <[email protected]>\n# Portions copyright @ 2015 VMware, Inc.\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'network'}\n\n\nDOCUMENTATION = \"\"\"\n---\nmodule: openvswitch_db\nauthor: \"Mark Hamilton ([email protected])\"\nversion_added: 2.0\nshort_description: Configure open vswitch database.\nrequirements: [ \"ovs-vsctl >= 2.3.3\" ]\ndescription:\n - Set column values in record in database table.\noptions:\n state:\n required: false\n description:\n - Configures the state of the key. When set\n to I(present), the I(key) and I(value) pair will be set\n on the I(record) and when set to I(absent) the I(key)\n will not be set.\n default: present\n choices: ['present', 'absent']\n version_added: \"2.4\"\n table:\n required: true\n description:\n - Identifies the table in the database.\n record:\n required: true\n description:\n - Identifies the recoard in the table.\n col:\n required: true\n description:\n - Identifies the column in the record.\n key:\n required: false\n description:\n - Identifies the key in the record column, when the column is a map\n type.\n value:\n required: true\n description:\n - Expected value for the table, record, column and key.\n timeout:\n required: false\n default: 5\n description:\n - How long to wait for ovs-vswitchd to respond\n\"\"\"\n\nEXAMPLES = '''\n# Increase the maximum idle time to 50 seconds before pruning unused kernel\n# rules.\n- openvswitch_db:\n table: open_vswitch\n record: .\n col: other_config\n key: max-idle\n value: 50000\n\n# Disable in band copy\n- openvswitch_db:\n table: Bridge\n record: br-int\n col: other_config\n key: disable-in-band\n value: true\n\n# Remove in band key\n- openvswitch_db:\n state: present\n table: Bridge\n record: br-int\n col: other_config\n key: disable-in-band\n\n# Mark port with tag 10\n- openvswitch_db:\n table: Port\n record: port0\n col: tag\n value: 10\n'''\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n# Regular expression for map type, must not be empty\nNON_EMPTY_MAP_RE = re.compile(r'{.+}')\n# Regular expression for a map column type\nMAP_RE = re.compile(r'{.*}')\n\n\ndef map_obj_to_commands(want, have, module):\n \"\"\" Define ovs-vsctl command to meet desired state \"\"\"\n commands = list()\n\n if module.params['state'] == 'absent':\n if 'key' in have.keys():\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s remove %(table)s %(record)s \" \\\n \"%(col)s %(key)s=%(value)s\"\n commands.append(templatized_command % module.params)\n elif module.params['key'] is None:\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s remove %(table)s %(record)s \" \\\n \"%(col)s\"\n commands.append(templatized_command % module.params)\n else:\n if module.params['key'] is None:\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s \" \\\n \"%(col)s=%(value)s\"\n commands.append(templatized_command % module.params)\n elif 'key' not in have.keys():\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s add %(table)s %(record)s \" \\\n \"%(col)s %(key)s=%(value)s\"\n commands.append(templatized_command % module.params)\n elif want['value'] != have['value']:\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s set %(table)s %(record)s \" \\\n \"%(col)s:%(key)s=%(value)s\"\n commands.append(templatized_command % module.params)\n\n return commands\n\n\ndef map_config_to_obj(module):\n templatized_command = \"%(ovs-vsctl)s -t %(timeout)s list %(table)s %(record)s\"\n command = templatized_command % module.params\n rc, out, err = module.run_command(command, check_rc=True)\n if rc != 0:\n module.fail_json(msg=err)\n\n match = re.search(r'^' + module.params['col'] + r'(\\s+):(\\s+)(.*)$', out, re.M)\n\n col_value = match.group(3)\n\n # Map types require key argument\n has_key = module.params['key'] is not None\n is_map = MAP_RE.match(col_value)\n if is_map and not has_key:\n module.fail_json(\n msg=\"missing required arguments: key for map type of column\")\n\n col_value_to_dict = {}\n if NON_EMPTY_MAP_RE.match(col_value):\n for kv in col_value[1:-1].split(', '):\n k, v = kv.split('=')\n col_value_to_dict[k.strip()] = v.strip()\n\n obj = {\n 'table': module.params['table'],\n 'record': module.params['record'],\n 'col': module.params['col'],\n }\n\n if has_key and is_map:\n if module.params['key'] in col_value_to_dict:\n obj['key'] = module.params['key']\n obj['value'] = col_value_to_dict[module.params['key']]\n else:\n obj['value'] = col_value.strip()\n\n return obj\n\n\ndef map_params_to_obj(module):\n obj = {\n 'table': module.params['table'],\n 'record': module.params['record'],\n 'col': module.params['col'],\n 'value': module.params['value']\n }\n\n key = module.params['key']\n if key is not None:\n obj['key'] = key\n\n return obj\n\n\ndef main():\n \"\"\" Entry point for ansible module. \"\"\"\n argument_spec = {\n 'state': {'default': 'present', 'choices': ['present', 'absent']},\n 'table': {'required': True},\n 'record': {'required': True},\n 'col': {'required': True},\n 'key': {'required': False},\n 'value': {'required': True},\n 'timeout': {'default': 5, 'type': 'int'},\n }\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n result = {'changed': False}\n\n # We add ovs-vsctl to module_params to later build up templatized commands\n module.params[\"ovs-vsctl\"] = module.get_bin_path(\"ovs-vsctl\", True)\n\n want = map_params_to_obj(module)\n have = map_config_to_obj(module)\n\n commands = map_obj_to_commands(want, have, module)\n result['commands'] = commands\n\n if commands:\n if not module.check_mode:\n for c in commands:\n module.run_command(c, check_rc=True)\n result['changed'] = True\n\n module.exit_json(**result)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/network/ovs/openvswitch_db.py"}]}
| 3,387 | 471 |
gh_patches_debug_14767
|
rasdani/github-patches
|
git_diff
|
pymedusa__Medusa-6656
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Torrent file content is empty
**Describe the bug**
I have many log like this:
2019-04-25 21:01:35 WARNING SNATCHQUEUE-SNATCH-274431 :: [f46bfac] Torrent file content is empty: Gotham.S04E20.VOSTFR.WebDl.720p.x264.-.Chris44
**Expected behavior**
torrent from yggtorrent dont snatched
**Screenshots**
<img width="1174" alt="Capture d’écran 2019-04-26 à 08 26 25" src="https://user-images.githubusercontent.com/14791276/56787518-0af92e00-67fd-11e9-8c6e-72063f929f3a.png">
**Medusa (please complete the following information):**
- OS: debian9
- Branch: master
- Commit: Branch:master
Commit: f46bfacf8763204fbde4f26a5916095371d494d1
Version: 0.3.1
**Logs:**
2019-04-25 21:01:35 WARNING SNATCHQUEUE-SNATCH-274431 :: [f46bfac] Torrent file content is empty: Gotham.S04E20.VOSTFR.WebDl.720p.x264.-.Chris44</details>
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of medusa/providers/torrent/torrent_provider.py]
1 # coding=utf-8
2
3 """Provider code for Generic Torrent Provider."""
4
5 from __future__ import unicode_literals
6
7 import logging
8 import os
9 import re
10 from base64 import b16encode, b32decode
11 from os.path import join
12 from random import shuffle
13
14 from bencode import BencodeDecodeError, bdecode
15
16 from feedparser.util import FeedParserDict
17
18 from medusa import app
19 from medusa.classes import TorrentSearchResult
20 from medusa.helper.common import sanitize_filename, try_int
21 from medusa.helpers import remove_file_failed
22 from medusa.logger.adapters.style import BraceAdapter
23 from medusa.providers.generic_provider import GenericProvider
24
25 log = BraceAdapter(logging.getLogger(__name__))
26 log.logger.addHandler(logging.NullHandler())
27
28
29 class TorrentProvider(GenericProvider):
30 """Generic Torrent provider."""
31
32 def __init__(self, name):
33 """Initialize the class."""
34 super(TorrentProvider, self).__init__(name)
35
36 self.ratio = None
37 self.provider_type = GenericProvider.TORRENT
38 self.minseed = 0
39 self.minleech = 0
40
41 def is_active(self):
42 """Check if provider is enabled."""
43 return bool(app.USE_TORRENTS) and self.is_enabled()
44
45 @property
46 def _custom_trackers(self):
47 """Check if provider has custom trackers."""
48 if not self.public or not app.TRACKERS_LIST:
49 return ''
50
51 return '&tr=' + '&tr='.join(x.strip() for x in app.TRACKERS_LIST if x.strip())
52
53 def _get_result(self, episodes):
54 """Return a provider result object."""
55 return TorrentSearchResult(episodes, provider=self)
56
57 def _get_size(self, item):
58 """Get result size."""
59 if isinstance(item, dict):
60 size = item.get('size', -1)
61 elif isinstance(item, (list, tuple)) and len(item) > 2:
62 size = item[2]
63 else:
64 size = -1
65
66 return try_int(size, -1)
67
68 def _get_storage_dir(self):
69 """Get torrent storage dir."""
70 return app.TORRENT_DIR
71
72 def _get_result_info(self, item):
73 """Return seeders and leechers from result."""
74 if isinstance(item, (dict, FeedParserDict)):
75 seeders = item.get('seeders', '-1')
76 leechers = item.get('leechers', '-1')
77
78 elif isinstance(item, (list, tuple)) and len(item) > 1:
79 seeders = item[3]
80 leechers = item[4]
81 else:
82 seeders = -1
83 leechers = -1
84
85 return seeders, leechers
86
87 def _get_title_and_url(self, item):
88 """Get title and url from result."""
89 if isinstance(item, (dict, FeedParserDict)):
90 download_url = item.get('url', '')
91 title = item.get('title', '')
92
93 if not download_url:
94 download_url = item.get('link', '')
95 elif isinstance(item, (list, tuple)) and len(item) > 1:
96 download_url = item[1]
97 title = item[0]
98 else:
99 download_url = ''
100 title = ''
101
102 if download_url:
103 download_url = download_url.replace('&', '&')
104
105 if title:
106 title = title.replace(' ', '.')
107
108 return title, download_url
109
110 def _verify_download(self, file_name=None):
111 """Validate torrent file."""
112 if not file_name or not os.path.isfile(file_name):
113 return False
114
115 try:
116 with open(file_name, 'rb') as f:
117 # `bencode.bdecode` is monkeypatched in `medusa.init`
118 meta_info = bdecode(f.read(), allow_extra_data=True)
119 return 'info' in meta_info and meta_info['info']
120 except BencodeDecodeError as error:
121 log.debug('Failed to validate torrent file: {name}. Error: {error}',
122 {'name': file_name, 'error': error})
123
124 remove_file_failed(file_name)
125 log.debug('{result} is not a valid torrent file',
126 {'result': file_name})
127
128 return False
129
130 def seed_ratio(self):
131 """Return seed ratio of provider."""
132 return self.ratio
133
134 def _get_pubdate(self, item):
135 """Return publish date of the item.
136
137 If provider doesnt have _get_pubdate function this will be used
138 """
139 if isinstance(item, dict):
140 pubdate = item.get('pubdate')
141 elif isinstance(item, (list, tuple)) and len(item) > 2:
142 pubdate = item[5]
143 else:
144 pubdate = None
145
146 return pubdate
147
148 def get_redirect_url(self, url):
149 """Get the address that the provided URL redirects to."""
150 log.debug('Retrieving redirect URL for {url}', {'url': url})
151
152 response = self.session.get(url, allow_redirects=False)
153 if response and response.headers.get('Location'):
154 return response.headers['Location']
155
156 log.debug('Unable to retrieve redirect URL for {url}', {'url': url})
157 return url
158
159 def _make_url(self, result):
160 """Return url if result is a magnet link."""
161 urls = []
162 filename = ''
163
164 if not result or not result.url:
165 return urls, filename
166
167 if result.url.startswith('magnet:'):
168 try:
169 info_hash = re.findall(r'urn:btih:([\w]{32,40})', result.url)[0].upper()
170
171 try:
172 torrent_name = re.findall('dn=([^&]+)', result.url)[0]
173 except Exception:
174 torrent_name = 'NO_DOWNLOAD_NAME'
175
176 if len(info_hash) == 32:
177 info_hash = b16encode(b32decode(info_hash)).upper()
178
179 if not info_hash:
180 log.error('Unable to extract torrent hash from magnet: {0}', result.url)
181 return urls, filename
182
183 urls = [x.format(info_hash=info_hash, torrent_name=torrent_name) for x in self.bt_cache_urls]
184 shuffle(urls)
185 except Exception:
186 log.error('Unable to extract torrent hash or name from magnet: {0}', result.url)
187 return urls, filename
188 else:
189 # Required for Jackett providers that use magnet redirects
190 # See: https://github.com/pymedusa/Medusa/issues/3435
191 if self.kind() == 'TorznabProvider':
192 redirect_url = self.get_redirect_url(result.url)
193 if redirect_url != result.url:
194 result.url = redirect_url
195 return self._make_url(result)
196
197 urls = [result.url]
198
199 result_name = sanitize_filename(result.name)
200 filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)
201
202 return urls, filename
203
[end of medusa/providers/torrent/torrent_provider.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/medusa/providers/torrent/torrent_provider.py b/medusa/providers/torrent/torrent_provider.py
--- a/medusa/providers/torrent/torrent_provider.py
+++ b/medusa/providers/torrent/torrent_provider.py
@@ -146,12 +146,13 @@
return pubdate
def get_redirect_url(self, url):
- """Get the address that the provided URL redirects to."""
+ """Get the final address that the provided URL redirects to."""
log.debug('Retrieving redirect URL for {url}', {'url': url})
- response = self.session.get(url, allow_redirects=False)
- if response and response.headers.get('Location'):
- return response.headers['Location']
+ response = self.session.get(url, stream=True)
+ if response:
+ response.close()
+ return response.url
log.debug('Unable to retrieve redirect URL for {url}', {'url': url})
return url
|
{"golden_diff": "diff --git a/medusa/providers/torrent/torrent_provider.py b/medusa/providers/torrent/torrent_provider.py\n--- a/medusa/providers/torrent/torrent_provider.py\n+++ b/medusa/providers/torrent/torrent_provider.py\n@@ -146,12 +146,13 @@\n return pubdate\n \n def get_redirect_url(self, url):\n- \"\"\"Get the address that the provided URL redirects to.\"\"\"\n+ \"\"\"Get the final address that the provided URL redirects to.\"\"\"\n log.debug('Retrieving redirect URL for {url}', {'url': url})\n \n- response = self.session.get(url, allow_redirects=False)\n- if response and response.headers.get('Location'):\n- return response.headers['Location']\n+ response = self.session.get(url, stream=True)\n+ if response:\n+ response.close()\n+ return response.url\n \n log.debug('Unable to retrieve redirect URL for {url}', {'url': url})\n return url\n", "issue": "Torrent file content is empty\n**Describe the bug**\r\n\r\nI have many log like this:\r\n\r\n2019-04-25 21:01:35 WARNING SNATCHQUEUE-SNATCH-274431 :: [f46bfac] Torrent file content is empty: Gotham.S04E20.VOSTFR.WebDl.720p.x264.-.Chris44\r\n\r\n**Expected behavior**\r\ntorrent from yggtorrent dont snatched\r\n\r\n**Screenshots**\r\n\r\n<img width=\"1174\" alt=\"Capture d\u2019e\u0301cran 2019-04-26 a\u0300 08 26 25\" src=\"https://user-images.githubusercontent.com/14791276/56787518-0af92e00-67fd-11e9-8c6e-72063f929f3a.png\">\r\n\r\n\r\n**Medusa (please complete the following information):**\r\n - OS: debian9\r\n - Branch: master\r\n - Commit: Branch:master \r\n Commit: f46bfacf8763204fbde4f26a5916095371d494d1 \r\n Version: 0.3.1\r\n\r\n**Logs:**\r\n\r\n2019-04-25 21:01:35 WARNING SNATCHQUEUE-SNATCH-274431 :: [f46bfac] Torrent file content is empty: Gotham.S04E20.VOSTFR.WebDl.720p.x264.-.Chris44</details>\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"Provider code for Generic Torrent Provider.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nfrom base64 import b16encode, b32decode\nfrom os.path import join\nfrom random import shuffle\n\nfrom bencode import BencodeDecodeError, bdecode\n\nfrom feedparser.util import FeedParserDict\n\nfrom medusa import app\nfrom medusa.classes import TorrentSearchResult\nfrom medusa.helper.common import sanitize_filename, try_int\nfrom medusa.helpers import remove_file_failed\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.generic_provider import GenericProvider\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass TorrentProvider(GenericProvider):\n \"\"\"Generic Torrent provider.\"\"\"\n\n def __init__(self, name):\n \"\"\"Initialize the class.\"\"\"\n super(TorrentProvider, self).__init__(name)\n\n self.ratio = None\n self.provider_type = GenericProvider.TORRENT\n self.minseed = 0\n self.minleech = 0\n\n def is_active(self):\n \"\"\"Check if provider is enabled.\"\"\"\n return bool(app.USE_TORRENTS) and self.is_enabled()\n\n @property\n def _custom_trackers(self):\n \"\"\"Check if provider has custom trackers.\"\"\"\n if not self.public or not app.TRACKERS_LIST:\n return ''\n\n return '&tr=' + '&tr='.join(x.strip() for x in app.TRACKERS_LIST if x.strip())\n\n def _get_result(self, episodes):\n \"\"\"Return a provider result object.\"\"\"\n return TorrentSearchResult(episodes, provider=self)\n\n def _get_size(self, item):\n \"\"\"Get result size.\"\"\"\n if isinstance(item, dict):\n size = item.get('size', -1)\n elif isinstance(item, (list, tuple)) and len(item) > 2:\n size = item[2]\n else:\n size = -1\n\n return try_int(size, -1)\n\n def _get_storage_dir(self):\n \"\"\"Get torrent storage dir.\"\"\"\n return app.TORRENT_DIR\n\n def _get_result_info(self, item):\n \"\"\"Return seeders and leechers from result.\"\"\"\n if isinstance(item, (dict, FeedParserDict)):\n seeders = item.get('seeders', '-1')\n leechers = item.get('leechers', '-1')\n\n elif isinstance(item, (list, tuple)) and len(item) > 1:\n seeders = item[3]\n leechers = item[4]\n else:\n seeders = -1\n leechers = -1\n\n return seeders, leechers\n\n def _get_title_and_url(self, item):\n \"\"\"Get title and url from result.\"\"\"\n if isinstance(item, (dict, FeedParserDict)):\n download_url = item.get('url', '')\n title = item.get('title', '')\n\n if not download_url:\n download_url = item.get('link', '')\n elif isinstance(item, (list, tuple)) and len(item) > 1:\n download_url = item[1]\n title = item[0]\n else:\n download_url = ''\n title = ''\n\n if download_url:\n download_url = download_url.replace('&', '&')\n\n if title:\n title = title.replace(' ', '.')\n\n return title, download_url\n\n def _verify_download(self, file_name=None):\n \"\"\"Validate torrent file.\"\"\"\n if not file_name or not os.path.isfile(file_name):\n return False\n\n try:\n with open(file_name, 'rb') as f:\n # `bencode.bdecode` is monkeypatched in `medusa.init`\n meta_info = bdecode(f.read(), allow_extra_data=True)\n return 'info' in meta_info and meta_info['info']\n except BencodeDecodeError as error:\n log.debug('Failed to validate torrent file: {name}. Error: {error}',\n {'name': file_name, 'error': error})\n\n remove_file_failed(file_name)\n log.debug('{result} is not a valid torrent file',\n {'result': file_name})\n\n return False\n\n def seed_ratio(self):\n \"\"\"Return seed ratio of provider.\"\"\"\n return self.ratio\n\n def _get_pubdate(self, item):\n \"\"\"Return publish date of the item.\n\n If provider doesnt have _get_pubdate function this will be used\n \"\"\"\n if isinstance(item, dict):\n pubdate = item.get('pubdate')\n elif isinstance(item, (list, tuple)) and len(item) > 2:\n pubdate = item[5]\n else:\n pubdate = None\n\n return pubdate\n\n def get_redirect_url(self, url):\n \"\"\"Get the address that the provided URL redirects to.\"\"\"\n log.debug('Retrieving redirect URL for {url}', {'url': url})\n\n response = self.session.get(url, allow_redirects=False)\n if response and response.headers.get('Location'):\n return response.headers['Location']\n\n log.debug('Unable to retrieve redirect URL for {url}', {'url': url})\n return url\n\n def _make_url(self, result):\n \"\"\"Return url if result is a magnet link.\"\"\"\n urls = []\n filename = ''\n\n if not result or not result.url:\n return urls, filename\n\n if result.url.startswith('magnet:'):\n try:\n info_hash = re.findall(r'urn:btih:([\\w]{32,40})', result.url)[0].upper()\n\n try:\n torrent_name = re.findall('dn=([^&]+)', result.url)[0]\n except Exception:\n torrent_name = 'NO_DOWNLOAD_NAME'\n\n if len(info_hash) == 32:\n info_hash = b16encode(b32decode(info_hash)).upper()\n\n if not info_hash:\n log.error('Unable to extract torrent hash from magnet: {0}', result.url)\n return urls, filename\n\n urls = [x.format(info_hash=info_hash, torrent_name=torrent_name) for x in self.bt_cache_urls]\n shuffle(urls)\n except Exception:\n log.error('Unable to extract torrent hash or name from magnet: {0}', result.url)\n return urls, filename\n else:\n # Required for Jackett providers that use magnet redirects\n # See: https://github.com/pymedusa/Medusa/issues/3435\n if self.kind() == 'TorznabProvider':\n redirect_url = self.get_redirect_url(result.url)\n if redirect_url != result.url:\n result.url = redirect_url\n return self._make_url(result)\n\n urls = [result.url]\n\n result_name = sanitize_filename(result.name)\n filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)\n\n return urls, filename\n", "path": "medusa/providers/torrent/torrent_provider.py"}]}
| 2,929 | 210 |
gh_patches_debug_57398
|
rasdani/github-patches
|
git_diff
|
translate__pootle-5797
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pootle_fs not expiring cache_keys
When a project uses pootle FS, stats are not updated. We have to manually call `pootle flush_cache --lru --django-cache` to update it manually.
</issue>
<code>
[start of pootle/apps/pootle_revision/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save, pre_delete
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import revision_updater
13 from pootle_app.models import Directory
14 from pootle_data.models import StoreData
15 from pootle_store.models import Store
16
17
18 @receiver(post_save, sender=StoreData)
19 def handle_storedata_save(**kwargs):
20 revision_updater.get(Store)(
21 context=kwargs["instance"].store).update(keys=["stats", "checks"])
22
23
24 @receiver(post_save, sender=Directory)
25 def handle_directory_save(**kwargs):
26 if kwargs.get("created"):
27 return
28 revision_updater.get(Directory)(
29 context=kwargs["instance"]).update(keys=["stats", "checks"])
30
31
32 @receiver(pre_delete, sender=Directory)
33 def handle_directory_delete(**kwargs):
34 revision_updater.get(Directory)(
35 context=kwargs["instance"].parent).update(keys=["stats", "checks"])
36
[end of pootle/apps/pootle_revision/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py
--- a/pootle/apps/pootle_revision/receivers.py
+++ b/pootle/apps/pootle_revision/receivers.py
@@ -23,10 +23,12 @@
@receiver(post_save, sender=Directory)
def handle_directory_save(**kwargs):
- if kwargs.get("created"):
- return
+ context = (
+ kwargs["instance"].parent
+ if kwargs.get("created")
+ else kwargs["instance"])
revision_updater.get(Directory)(
- context=kwargs["instance"]).update(keys=["stats", "checks"])
+ context=context).update(keys=["stats", "checks"])
@receiver(pre_delete, sender=Directory)
|
{"golden_diff": "diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py\n--- a/pootle/apps/pootle_revision/receivers.py\n+++ b/pootle/apps/pootle_revision/receivers.py\n@@ -23,10 +23,12 @@\n \n @receiver(post_save, sender=Directory)\n def handle_directory_save(**kwargs):\n- if kwargs.get(\"created\"):\n- return\n+ context = (\n+ kwargs[\"instance\"].parent\n+ if kwargs.get(\"created\")\n+ else kwargs[\"instance\"])\n revision_updater.get(Directory)(\n- context=kwargs[\"instance\"]).update(keys=[\"stats\", \"checks\"])\n+ context=context).update(keys=[\"stats\", \"checks\"])\n \n \n @receiver(pre_delete, sender=Directory)\n", "issue": "pootle_fs not expiring cache_keys\nWhen a project uses pootle FS, stats are not updated. We have to manually call `pootle flush_cache --lru --django-cache` to update it manually.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import revision_updater\nfrom pootle_app.models import Directory\nfrom pootle_data.models import StoreData\nfrom pootle_store.models import Store\n\n\n@receiver(post_save, sender=StoreData)\ndef handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(post_save, sender=Directory)\ndef handle_directory_save(**kwargs):\n if kwargs.get(\"created\"):\n return\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"]).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(pre_delete, sender=Directory)\ndef handle_directory_delete(**kwargs):\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n", "path": "pootle/apps/pootle_revision/receivers.py"}]}
| 926 | 178 |
gh_patches_debug_22625
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleDetection-391
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
请问模型裁剪只能针对yolov3么?
如果想对faster-rcnn的模型进行裁剪,应该怎么做呢?
谢谢!
</issue>
<code>
[start of slim/prune/eval.py]
1 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18
19 import os
20
21
22 def set_paddle_flags(**kwargs):
23 for key, value in kwargs.items():
24 if os.environ.get(key, None) is None:
25 os.environ[key] = str(value)
26
27
28 # NOTE(paddle-dev): All of these flags should be set before
29 # `import paddle`. Otherwise, it would not take any effect.
30 set_paddle_flags(
31 FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory
32 )
33
34 import paddle.fluid as fluid
35 from paddleslim.prune import Pruner
36 from paddleslim.analysis import flops
37
38 from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results
39 import ppdet.utils.checkpoint as checkpoint
40 from ppdet.utils.check import check_gpu, check_version
41
42 from ppdet.data.reader import create_reader
43
44 from ppdet.core.workspace import load_config, merge_config, create
45 from ppdet.utils.cli import ArgsParser
46
47 import logging
48 FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
49 logging.basicConfig(level=logging.INFO, format=FORMAT)
50 logger = logging.getLogger(__name__)
51
52
53 def main():
54 """
55 Main evaluate function
56 """
57 cfg = load_config(FLAGS.config)
58 if 'architecture' in cfg:
59 main_arch = cfg.architecture
60 else:
61 raise ValueError("'architecture' not specified in config file.")
62
63 merge_config(FLAGS.opt)
64 # check if set use_gpu=True in paddlepaddle cpu version
65 check_gpu(cfg.use_gpu)
66 # check if paddlepaddle version is satisfied
67 check_version()
68
69 multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)
70
71 # define executor
72 place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
73 exe = fluid.Executor(place)
74
75 # build program
76 model = create(main_arch)
77 startup_prog = fluid.Program()
78 eval_prog = fluid.Program()
79 with fluid.program_guard(eval_prog, startup_prog):
80 with fluid.unique_name.guard():
81 inputs_def = cfg['EvalReader']['inputs_def']
82 feed_vars, loader = model.build_inputs(**inputs_def)
83 if multi_scale_test is None:
84 fetches = model.eval(feed_vars)
85 else:
86 fetches = model.eval(feed_vars, multi_scale_test)
87 eval_prog = eval_prog.clone(True)
88
89 reader = create_reader(cfg.EvalReader)
90 loader.set_sample_list_generator(reader, place)
91
92 dataset = cfg['EvalReader']['dataset']
93
94 # eval already exists json file
95 if FLAGS.json_eval:
96 logger.info(
97 "In json_eval mode, PaddleDetection will evaluate json files in "
98 "output_eval directly. And proposal.json, bbox.json and mask.json "
99 "will be detected by default.")
100 json_eval_results(
101 cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset)
102 return
103
104 pruned_params = FLAGS.pruned_params
105 assert (
106 FLAGS.pruned_params is not None
107 ), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
108 pruned_params = FLAGS.pruned_params.strip().split(",")
109 logger.info("pruned params: {}".format(pruned_params))
110 pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")]
111 logger.info("pruned ratios: {}".format(pruned_ratios))
112 assert (len(pruned_params) == len(pruned_ratios)
113 ), "The length of pruned params and pruned ratios should be equal."
114 assert (pruned_ratios > [0] * len(pruned_ratios) and
115 pruned_ratios < [1] * len(pruned_ratios)
116 ), "The elements of pruned ratios should be in range (0, 1)."
117
118 base_flops = flops(eval_prog)
119 pruner = Pruner()
120 eval_prog, _, _ = pruner.prune(
121 eval_prog,
122 fluid.global_scope(),
123 params=pruned_params,
124 ratios=pruned_ratios,
125 place=place,
126 only_graph=True)
127 pruned_flops = flops(eval_prog)
128 logger.info("pruned FLOPS: {}".format(
129 float(base_flops - pruned_flops) / base_flops))
130
131 compile_program = fluid.compiler.CompiledProgram(
132 eval_prog).with_data_parallel()
133
134 assert cfg.metric != 'OID', "eval process of OID dataset \
135 is not supported."
136
137 if cfg.metric == "WIDERFACE":
138 raise ValueError("metric type {} does not support in tools/eval.py, "
139 "please use tools/face_eval.py".format(cfg.metric))
140 assert cfg.metric in ['COCO', 'VOC'], \
141 "unknown metric type {}".format(cfg.metric)
142 extra_keys = []
143
144 if cfg.metric == 'COCO':
145 extra_keys = ['im_info', 'im_id', 'im_shape']
146 if cfg.metric == 'VOC':
147 extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
148
149 keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
150
151 # whether output bbox is normalized in model output layer
152 is_bbox_normalized = False
153 if hasattr(model, 'is_bbox_normalized') and \
154 callable(model.is_bbox_normalized):
155 is_bbox_normalized = model.is_bbox_normalized()
156
157 sub_eval_prog = None
158 sub_keys = None
159 sub_values = None
160 # build sub-program
161 if 'Mask' in main_arch and multi_scale_test:
162 sub_eval_prog = fluid.Program()
163 with fluid.program_guard(sub_eval_prog, startup_prog):
164 with fluid.unique_name.guard():
165 inputs_def = cfg['EvalReader']['inputs_def']
166 inputs_def['mask_branch'] = True
167 feed_vars, eval_loader = model.build_inputs(**inputs_def)
168 sub_fetches = model.eval(
169 feed_vars, multi_scale_test, mask_branch=True)
170 assert cfg.metric == 'COCO'
171 extra_keys = ['im_id', 'im_shape']
172 sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog,
173 extra_keys)
174 sub_eval_prog = sub_eval_prog.clone(True)
175
176 # load model
177 exe.run(startup_prog)
178 if 'weights' in cfg:
179 checkpoint.load_checkpoint(exe, eval_prog, cfg.weights)
180
181 results = eval_run(exe, compile_program, loader, keys, values, cls, cfg,
182 sub_eval_prog, sub_keys, sub_values)
183
184 # evaluation
185 resolution = None
186 if 'mask' in results[0]:
187 resolution = model.mask_head.resolution
188 # if map_type not set, use default 11point, only use in VOC eval
189 map_type = cfg.map_type if 'map_type' in cfg else '11point'
190 eval_results(
191 results,
192 cfg.metric,
193 cfg.num_classes,
194 resolution,
195 is_bbox_normalized,
196 FLAGS.output_eval,
197 map_type,
198 dataset=dataset)
199
200
201 if __name__ == '__main__':
202 parser = ArgsParser()
203 parser.add_argument(
204 "--json_eval",
205 action='store_true',
206 default=False,
207 help="Whether to re eval with already exists bbox.json or mask.json")
208 parser.add_argument(
209 "-f",
210 "--output_eval",
211 default=None,
212 type=str,
213 help="Evaluation file directory, default is current directory.")
214
215 parser.add_argument(
216 "-p",
217 "--pruned_params",
218 default=None,
219 type=str,
220 help="The parameters to be pruned when calculating sensitivities.")
221 parser.add_argument(
222 "--pruned_ratios",
223 default=None,
224 type=str,
225 help="The ratios pruned iteratively for each parameter when calculating sensitivities."
226 )
227
228 FLAGS = parser.parse_args()
229 main()
230
[end of slim/prune/eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/slim/prune/eval.py b/slim/prune/eval.py
--- a/slim/prune/eval.py
+++ b/slim/prune/eval.py
@@ -86,6 +86,7 @@
fetches = model.eval(feed_vars, multi_scale_test)
eval_prog = eval_prog.clone(True)
+ exe.run(startup_prog)
reader = create_reader(cfg.EvalReader)
loader.set_sample_list_generator(reader, place)
@@ -123,7 +124,7 @@
params=pruned_params,
ratios=pruned_ratios,
place=place,
- only_graph=True)
+ only_graph=False)
pruned_flops = flops(eval_prog)
logger.info("pruned FLOPS: {}".format(
float(base_flops - pruned_flops) / base_flops))
@@ -174,7 +175,6 @@
sub_eval_prog = sub_eval_prog.clone(True)
# load model
- exe.run(startup_prog)
if 'weights' in cfg:
checkpoint.load_checkpoint(exe, eval_prog, cfg.weights)
|
{"golden_diff": "diff --git a/slim/prune/eval.py b/slim/prune/eval.py\n--- a/slim/prune/eval.py\n+++ b/slim/prune/eval.py\n@@ -86,6 +86,7 @@\n fetches = model.eval(feed_vars, multi_scale_test)\n eval_prog = eval_prog.clone(True)\n \n+ exe.run(startup_prog)\n reader = create_reader(cfg.EvalReader)\n loader.set_sample_list_generator(reader, place)\n \n@@ -123,7 +124,7 @@\n params=pruned_params,\n ratios=pruned_ratios,\n place=place,\n- only_graph=True)\n+ only_graph=False)\n pruned_flops = flops(eval_prog)\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n@@ -174,7 +175,6 @@\n sub_eval_prog = sub_eval_prog.clone(True)\n \n # load model\n- exe.run(startup_prog)\n if 'weights' in cfg:\n checkpoint.load_checkpoint(exe, eval_prog, cfg.weights)\n", "issue": "\u8bf7\u95ee\u6a21\u578b\u88c1\u526a\u53ea\u80fd\u9488\u5bf9yolov3\u4e48\uff1f\n\u5982\u679c\u60f3\u5bf9faster-rcnn\u7684\u6a21\u578b\u8fdb\u884c\u88c1\u526a\uff0c\u5e94\u8be5\u600e\u4e48\u505a\u5462\uff1f\r\n\r\n\u8c22\u8c22\uff01\n", "before_files": [{"content": "# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n\ndef set_paddle_flags(**kwargs):\n for key, value in kwargs.items():\n if os.environ.get(key, None) is None:\n os.environ[key] = str(value)\n\n\n# NOTE(paddle-dev): All of these flags should be set before\n# `import paddle`. Otherwise, it would not take any effect.\nset_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n)\n\nimport paddle.fluid as fluid\nfrom paddleslim.prune import Pruner\nfrom paddleslim.analysis import flops\n\nfrom ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.utils.check import check_gpu, check_version\n\nfrom ppdet.data.reader import create_reader\n\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.cli import ArgsParser\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"\n Main evaluate function\n \"\"\"\n cfg = load_config(FLAGS.config)\n if 'architecture' in cfg:\n main_arch = cfg.architecture\n else:\n raise ValueError(\"'architecture' not specified in config file.\")\n\n merge_config(FLAGS.opt)\n # check if set use_gpu=True in paddlepaddle cpu version\n check_gpu(cfg.use_gpu)\n # check if paddlepaddle version is satisfied\n check_version()\n\n multi_scale_test = getattr(cfg, 'MultiScaleTEST', None)\n\n # define executor\n place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n # build program\n model = create(main_arch)\n startup_prog = fluid.Program()\n eval_prog = fluid.Program()\n with fluid.program_guard(eval_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['EvalReader']['inputs_def']\n feed_vars, loader = model.build_inputs(**inputs_def)\n if multi_scale_test is None:\n fetches = model.eval(feed_vars)\n else:\n fetches = model.eval(feed_vars, multi_scale_test)\n eval_prog = eval_prog.clone(True)\n\n reader = create_reader(cfg.EvalReader)\n loader.set_sample_list_generator(reader, place)\n\n dataset = cfg['EvalReader']['dataset']\n\n # eval already exists json file\n if FLAGS.json_eval:\n logger.info(\n \"In json_eval mode, PaddleDetection will evaluate json files in \"\n \"output_eval directly. And proposal.json, bbox.json and mask.json \"\n \"will be detected by default.\")\n json_eval_results(\n cfg.metric, json_directory=FLAGS.output_eval, dataset=dataset)\n return\n\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n ), \"FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option.\"\n pruned_params = FLAGS.pruned_params.strip().split(\",\")\n logger.info(\"pruned params: {}\".format(pruned_params))\n pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(\",\")]\n logger.info(\"pruned ratios: {}\".format(pruned_ratios))\n assert (len(pruned_params) == len(pruned_ratios)\n ), \"The length of pruned params and pruned ratios should be equal.\"\n assert (pruned_ratios > [0] * len(pruned_ratios) and\n pruned_ratios < [1] * len(pruned_ratios)\n ), \"The elements of pruned ratios should be in range (0, 1).\"\n\n base_flops = flops(eval_prog)\n pruner = Pruner()\n eval_prog, _, _ = pruner.prune(\n eval_prog,\n fluid.global_scope(),\n params=pruned_params,\n ratios=pruned_ratios,\n place=place,\n only_graph=True)\n pruned_flops = flops(eval_prog)\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n\n compile_program = fluid.compiler.CompiledProgram(\n eval_prog).with_data_parallel()\n\n assert cfg.metric != 'OID', \"eval process of OID dataset \\\n is not supported.\"\n\n if cfg.metric == \"WIDERFACE\":\n raise ValueError(\"metric type {} does not support in tools/eval.py, \"\n \"please use tools/face_eval.py\".format(cfg.metric))\n assert cfg.metric in ['COCO', 'VOC'], \\\n \"unknown metric type {}\".format(cfg.metric)\n extra_keys = []\n\n if cfg.metric == 'COCO':\n extra_keys = ['im_info', 'im_id', 'im_shape']\n if cfg.metric == 'VOC':\n extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']\n\n keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)\n\n # whether output bbox is normalized in model output layer\n is_bbox_normalized = False\n if hasattr(model, 'is_bbox_normalized') and \\\n callable(model.is_bbox_normalized):\n is_bbox_normalized = model.is_bbox_normalized()\n\n sub_eval_prog = None\n sub_keys = None\n sub_values = None\n # build sub-program\n if 'Mask' in main_arch and multi_scale_test:\n sub_eval_prog = fluid.Program()\n with fluid.program_guard(sub_eval_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['EvalReader']['inputs_def']\n inputs_def['mask_branch'] = True\n feed_vars, eval_loader = model.build_inputs(**inputs_def)\n sub_fetches = model.eval(\n feed_vars, multi_scale_test, mask_branch=True)\n assert cfg.metric == 'COCO'\n extra_keys = ['im_id', 'im_shape']\n sub_keys, sub_values, _ = parse_fetches(sub_fetches, sub_eval_prog,\n extra_keys)\n sub_eval_prog = sub_eval_prog.clone(True)\n\n # load model\n exe.run(startup_prog)\n if 'weights' in cfg:\n checkpoint.load_checkpoint(exe, eval_prog, cfg.weights)\n\n results = eval_run(exe, compile_program, loader, keys, values, cls, cfg,\n sub_eval_prog, sub_keys, sub_values)\n\n # evaluation\n resolution = None\n if 'mask' in results[0]:\n resolution = model.mask_head.resolution\n # if map_type not set, use default 11point, only use in VOC eval\n map_type = cfg.map_type if 'map_type' in cfg else '11point'\n eval_results(\n results,\n cfg.metric,\n cfg.num_classes,\n resolution,\n is_bbox_normalized,\n FLAGS.output_eval,\n map_type,\n dataset=dataset)\n\n\nif __name__ == '__main__':\n parser = ArgsParser()\n parser.add_argument(\n \"--json_eval\",\n action='store_true',\n default=False,\n help=\"Whether to re eval with already exists bbox.json or mask.json\")\n parser.add_argument(\n \"-f\",\n \"--output_eval\",\n default=None,\n type=str,\n help=\"Evaluation file directory, default is current directory.\")\n\n parser.add_argument(\n \"-p\",\n \"--pruned_params\",\n default=None,\n type=str,\n help=\"The parameters to be pruned when calculating sensitivities.\")\n parser.add_argument(\n \"--pruned_ratios\",\n default=None,\n type=str,\n help=\"The ratios pruned iteratively for each parameter when calculating sensitivities.\"\n )\n\n FLAGS = parser.parse_args()\n main()\n", "path": "slim/prune/eval.py"}]}
| 2,994 | 250 |
gh_patches_debug_34855
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-5869
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.bloomberg: error: unmatched '{' in format spec
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.6.2
### Description
It's quite a strange error. Seems like there is a change to the JSON data return from Bloomberg, or it is corrupted.
### Debug log
```text
$ streamlink --loglevel=debug https://www.bloomberg.com/live/us
[session][debug] Loading plugin: bloomberg
[cli][debug] OS: macOS 10.16
[cli][debug] Python: 3.9.12
[cli][debug] OpenSSL: OpenSSL 1.1.1n 15 Mar 2022
[cli][debug] Streamlink: 6.6.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2021.10.8
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.8.0
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.19.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.27.1
[cli][debug] trio: 0.22.2
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.1.1
[cli][debug] urllib3: 1.26.9
[cli][debug] websocket-client: 1.6.3
[cli][debug] Arguments:
[cli][debug] url=https://www.bloomberg.com/live/us
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin bloomberg for URL https://www.bloomberg.com/live/us
error: unmatched '{' in format spec
```
</issue>
<code>
[start of src/streamlink/plugins/bloomberg.py]
1 """
2 $description America-based television network centred towards business and capital market programming.
3 $url bloomberg.com
4 $type live, vod
5 $metadata title
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, PluginError, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(r"""
20 https?://(?:www\.)?bloomberg\.com/
21 (?:
22 (?P<live>live)(?:/(?P<channel>[^/]+))?
23 |
24 news/videos/[^/]+/[^/]+
25 )
26 """, re.VERBOSE))
27 class Bloomberg(Plugin):
28 LIVE_API_URL = "https://cdn.gotraffic.net/projector/latest/assets/config/config.min.json?v=1"
29 VOD_API_URL = "https://www.bloomberg.com/api/embed?id={0}"
30 DEFAULT_CHANNEL = "us"
31
32 def _get_live_streams(self, data, channel):
33 schema_live_ids = validate.Schema(
34 {"live": {"channels": {"byChannelId": {
35 channel: validate.all(
36 {"liveId": str},
37 validate.get("liveId"),
38 ),
39 }}}},
40 validate.get(("live", "channels", "byChannelId", channel)),
41 )
42 try:
43 live_id = schema_live_ids.validate(data)
44 except PluginError:
45 log.error(f"Could not find liveId for channel '{channel}'")
46 return
47
48 log.debug(f"Found liveId: {live_id}")
49 return self.session.http.get(self.LIVE_API_URL, schema=validate.Schema(
50 validate.parse_json(),
51 {"livestreams": {
52 live_id: {
53 validate.optional("cdns"): validate.all(
54 [{"streams": [{
55 "url": validate.url(),
56 }]}],
57 validate.transform(lambda x: [urls["url"] for y in x for urls in y["streams"]]),
58 ),
59 },
60 }},
61 validate.get(("livestreams", live_id, "cdns")),
62 ))
63
64 def _get_vod_streams(self, data):
65 schema_vod_list = validate.Schema(
66 validate.any(
67 validate.all(
68 {"video": {"videoStory": dict}},
69 validate.get(("video", "videoStory")),
70 ),
71 validate.all(
72 {"quicktakeVideo": {"videoStory": dict}},
73 validate.get(("quicktakeVideo", "videoStory")),
74 ),
75 ),
76 {"video": {
77 "bmmrId": str,
78 }},
79 validate.get(("video", "bmmrId")),
80 )
81 schema_url = validate.all(
82 {"url": validate.url()},
83 validate.get("url"),
84 )
85
86 try:
87 video_id = schema_vod_list.validate(data)
88 except PluginError:
89 log.error("Could not find videoId")
90 return
91
92 log.debug(f"Found videoId: {video_id}")
93 vod_url = self.VOD_API_URL.format(video_id)
94 secureStreams, streams, self.title = self.session.http.get(vod_url, schema=validate.Schema(
95 validate.parse_json(),
96 {
97 validate.optional("secureStreams"): [schema_url],
98 validate.optional("streams"): [schema_url],
99 "title": str,
100 },
101 validate.union_get("secureStreams", "streams", "title"),
102 ))
103
104 return secureStreams or streams
105
106 def _get_streams(self):
107 del self.session.http.headers["Accept-Encoding"]
108
109 try:
110 data = self.session.http.get(self.url, schema=validate.Schema(
111 validate.parse_html(),
112 validate.xml_xpath_string(".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()"),
113 str,
114 validate.regex(re.compile(r"^\s*window\.__PRELOADED_STATE__\s*=\s*({.+})\s*;?\s*$", re.DOTALL)),
115 validate.get(1),
116 validate.parse_json(),
117 ))
118 except PluginError:
119 log.error("Could not find JSON data. Invalid URL or bot protection...")
120 return
121
122 if self.match.group("live"):
123 streams = self._get_live_streams(data, self.match.group("channel") or self.DEFAULT_CHANNEL)
124 else:
125 streams = self._get_vod_streams(data)
126
127 if streams:
128 # just return the first stream
129 return HLSStream.parse_variant_playlist(self.session, streams[0])
130
131
132 __plugin__ = Bloomberg
133
[end of src/streamlink/plugins/bloomberg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/streamlink/plugins/bloomberg.py b/src/streamlink/plugins/bloomberg.py
--- a/src/streamlink/plugins/bloomberg.py
+++ b/src/streamlink/plugins/bloomberg.py
@@ -16,14 +16,14 @@
log = logging.getLogger(__name__)
-@pluginmatcher(re.compile(r"""
- https?://(?:www\.)?bloomberg\.com/
- (?:
- (?P<live>live)(?:/(?P<channel>[^/]+))?
- |
- news/videos/[^/]+/[^/]+
- )
-""", re.VERBOSE))
+@pluginmatcher(
+ name="live",
+ pattern=re.compile(r"https?://(?:www\.)?bloomberg\.com/live(?:/(?P<channel>[^/]+))?"),
+)
+@pluginmatcher(
+ name="vod",
+ pattern=re.compile(r"https?://(?:www\.)?bloomberg\.com/news/videos/[^/]+/[^/]+"),
+)
class Bloomberg(Plugin):
LIVE_API_URL = "https://cdn.gotraffic.net/projector/latest/assets/config/config.min.json?v=1"
VOD_API_URL = "https://www.bloomberg.com/api/embed?id={0}"
@@ -106,21 +106,23 @@
def _get_streams(self):
del self.session.http.headers["Accept-Encoding"]
- try:
- data = self.session.http.get(self.url, schema=validate.Schema(
- validate.parse_html(),
- validate.xml_xpath_string(".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()"),
- str,
- validate.regex(re.compile(r"^\s*window\.__PRELOADED_STATE__\s*=\s*({.+})\s*;?\s*$", re.DOTALL)),
- validate.get(1),
- validate.parse_json(),
- ))
- except PluginError:
+ data = self.session.http.get(self.url, schema=validate.Schema(
+ validate.parse_html(),
+ validate.xml_xpath_string(".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()"),
+ validate.none_or_all(
+ re.compile(r"\bwindow\.__PRELOADED_STATE__\s*=\s*(?P<json>{.+?})\s*;(?:\s|$)"),
+ validate.none_or_all(
+ validate.get("json"),
+ validate.parse_json(),
+ ),
+ ),
+ ))
+ if not data:
log.error("Could not find JSON data. Invalid URL or bot protection...")
return
- if self.match.group("live"):
- streams = self._get_live_streams(data, self.match.group("channel") or self.DEFAULT_CHANNEL)
+ if self.matches["live"]:
+ streams = self._get_live_streams(data, self.match["channel"] or self.DEFAULT_CHANNEL)
else:
streams = self._get_vod_streams(data)
|
{"golden_diff": "diff --git a/src/streamlink/plugins/bloomberg.py b/src/streamlink/plugins/bloomberg.py\n--- a/src/streamlink/plugins/bloomberg.py\n+++ b/src/streamlink/plugins/bloomberg.py\n@@ -16,14 +16,14 @@\n log = logging.getLogger(__name__)\n \n \n-@pluginmatcher(re.compile(r\"\"\"\n- https?://(?:www\\.)?bloomberg\\.com/\n- (?:\n- (?P<live>live)(?:/(?P<channel>[^/]+))?\n- |\n- news/videos/[^/]+/[^/]+\n- )\n-\"\"\", re.VERBOSE))\n+@pluginmatcher(\n+ name=\"live\",\n+ pattern=re.compile(r\"https?://(?:www\\.)?bloomberg\\.com/live(?:/(?P<channel>[^/]+))?\"),\n+)\n+@pluginmatcher(\n+ name=\"vod\",\n+ pattern=re.compile(r\"https?://(?:www\\.)?bloomberg\\.com/news/videos/[^/]+/[^/]+\"),\n+)\n class Bloomberg(Plugin):\n LIVE_API_URL = \"https://cdn.gotraffic.net/projector/latest/assets/config/config.min.json?v=1\"\n VOD_API_URL = \"https://www.bloomberg.com/api/embed?id={0}\"\n@@ -106,21 +106,23 @@\n def _get_streams(self):\n del self.session.http.headers[\"Accept-Encoding\"]\n \n- try:\n- data = self.session.http.get(self.url, schema=validate.Schema(\n- validate.parse_html(),\n- validate.xml_xpath_string(\".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()\"),\n- str,\n- validate.regex(re.compile(r\"^\\s*window\\.__PRELOADED_STATE__\\s*=\\s*({.+})\\s*;?\\s*$\", re.DOTALL)),\n- validate.get(1),\n- validate.parse_json(),\n- ))\n- except PluginError:\n+ data = self.session.http.get(self.url, schema=validate.Schema(\n+ validate.parse_html(),\n+ validate.xml_xpath_string(\".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()\"),\n+ validate.none_or_all(\n+ re.compile(r\"\\bwindow\\.__PRELOADED_STATE__\\s*=\\s*(?P<json>{.+?})\\s*;(?:\\s|$)\"),\n+ validate.none_or_all(\n+ validate.get(\"json\"),\n+ validate.parse_json(),\n+ ),\n+ ),\n+ ))\n+ if not data:\n log.error(\"Could not find JSON data. Invalid URL or bot protection...\")\n return\n \n- if self.match.group(\"live\"):\n- streams = self._get_live_streams(data, self.match.group(\"channel\") or self.DEFAULT_CHANNEL)\n+ if self.matches[\"live\"]:\n+ streams = self._get_live_streams(data, self.match[\"channel\"] or self.DEFAULT_CHANNEL)\n else:\n streams = self._get_vod_streams(data)\n", "issue": "plugins.bloomberg: error: unmatched '{' in format spec\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.6.2\n\n### Description\n\nIt's quite a strange error. Seems like there is a change to the JSON data return from Bloomberg, or it is corrupted.\n\n### Debug log\n\n```text\n$ streamlink --loglevel=debug https://www.bloomberg.com/live/us\r\n[session][debug] Loading plugin: bloomberg\r\n[cli][debug] OS: macOS 10.16\r\n[cli][debug] Python: 3.9.12\r\n[cli][debug] OpenSSL: OpenSSL 1.1.1n 15 Mar 2022\r\n[cli][debug] Streamlink: 6.6.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2021.10.8\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.8.0\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.19.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.27.1\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.1.1\r\n[cli][debug] urllib3: 1.26.9\r\n[cli][debug] websocket-client: 1.6.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.bloomberg.com/live/us\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin bloomberg for URL https://www.bloomberg.com/live/us\r\nerror: unmatched '{' in format spec\n```\n\n", "before_files": [{"content": "\"\"\"\n$description America-based television network centred towards business and capital market programming.\n$url bloomberg.com\n$type live, vod\n$metadata title\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?bloomberg\\.com/\n (?:\n (?P<live>live)(?:/(?P<channel>[^/]+))?\n |\n news/videos/[^/]+/[^/]+\n )\n\"\"\", re.VERBOSE))\nclass Bloomberg(Plugin):\n LIVE_API_URL = \"https://cdn.gotraffic.net/projector/latest/assets/config/config.min.json?v=1\"\n VOD_API_URL = \"https://www.bloomberg.com/api/embed?id={0}\"\n DEFAULT_CHANNEL = \"us\"\n\n def _get_live_streams(self, data, channel):\n schema_live_ids = validate.Schema(\n {\"live\": {\"channels\": {\"byChannelId\": {\n channel: validate.all(\n {\"liveId\": str},\n validate.get(\"liveId\"),\n ),\n }}}},\n validate.get((\"live\", \"channels\", \"byChannelId\", channel)),\n )\n try:\n live_id = schema_live_ids.validate(data)\n except PluginError:\n log.error(f\"Could not find liveId for channel '{channel}'\")\n return\n\n log.debug(f\"Found liveId: {live_id}\")\n return self.session.http.get(self.LIVE_API_URL, schema=validate.Schema(\n validate.parse_json(),\n {\"livestreams\": {\n live_id: {\n validate.optional(\"cdns\"): validate.all(\n [{\"streams\": [{\n \"url\": validate.url(),\n }]}],\n validate.transform(lambda x: [urls[\"url\"] for y in x for urls in y[\"streams\"]]),\n ),\n },\n }},\n validate.get((\"livestreams\", live_id, \"cdns\")),\n ))\n\n def _get_vod_streams(self, data):\n schema_vod_list = validate.Schema(\n validate.any(\n validate.all(\n {\"video\": {\"videoStory\": dict}},\n validate.get((\"video\", \"videoStory\")),\n ),\n validate.all(\n {\"quicktakeVideo\": {\"videoStory\": dict}},\n validate.get((\"quicktakeVideo\", \"videoStory\")),\n ),\n ),\n {\"video\": {\n \"bmmrId\": str,\n }},\n validate.get((\"video\", \"bmmrId\")),\n )\n schema_url = validate.all(\n {\"url\": validate.url()},\n validate.get(\"url\"),\n )\n\n try:\n video_id = schema_vod_list.validate(data)\n except PluginError:\n log.error(\"Could not find videoId\")\n return\n\n log.debug(f\"Found videoId: {video_id}\")\n vod_url = self.VOD_API_URL.format(video_id)\n secureStreams, streams, self.title = self.session.http.get(vod_url, schema=validate.Schema(\n validate.parse_json(),\n {\n validate.optional(\"secureStreams\"): [schema_url],\n validate.optional(\"streams\"): [schema_url],\n \"title\": str,\n },\n validate.union_get(\"secureStreams\", \"streams\", \"title\"),\n ))\n\n return secureStreams or streams\n\n def _get_streams(self):\n del self.session.http.headers[\"Accept-Encoding\"]\n\n try:\n data = self.session.http.get(self.url, schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[contains(text(),'window.__PRELOADED_STATE__')][1]/text()\"),\n str,\n validate.regex(re.compile(r\"^\\s*window\\.__PRELOADED_STATE__\\s*=\\s*({.+})\\s*;?\\s*$\", re.DOTALL)),\n validate.get(1),\n validate.parse_json(),\n ))\n except PluginError:\n log.error(\"Could not find JSON data. Invalid URL or bot protection...\")\n return\n\n if self.match.group(\"live\"):\n streams = self._get_live_streams(data, self.match.group(\"channel\") or self.DEFAULT_CHANNEL)\n else:\n streams = self._get_vod_streams(data)\n\n if streams:\n # just return the first stream\n return HLSStream.parse_variant_playlist(self.session, streams[0])\n\n\n__plugin__ = Bloomberg\n", "path": "src/streamlink/plugins/bloomberg.py"}]}
| 2,378 | 645 |
gh_patches_debug_17276
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-2164
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow no-passphrase private keys
### Describe the bug
The Snowflake connector (at least) requires a passphrase in the profile file to open a private key connection.
### Steps To Reproduce
Create a dbt target like the following:
```
qa:
type: snowflake
account: my_account
user: my_user
role: ANALYST
# Keypair config
private_key_path: "path/to/my/no/passphrase/key"
private_key_passphrase: None
database: DB
warehouse: WH
schema: PUBLIC
threads: 1
client_session_keep_alive: False
```
Attempt to run against said DBT target. DBT will fail because no passphrase is provided. If, instead, a passphrase is provided, the connection will fail because the key is not encrypted.
### Expected behavior
Perhaps a warning in output that unencrypted keys are not the norm, requiring additional setting of override field in profile. If that's set, go ahead with the unencrypted key.
### System information
**Which database are you using dbt with?**
- [ ] postgres
- [ ] redshift
- [ ] bigquery
- [ x] snowflake
- [ ] other (specify: ____________)
**The output of `dbt --version`:**
```
0.14.2
```
**The operating system you're using:**
OSX
**The output of `python --version`:**
Python 3.7.3
</issue>
<code>
[start of plugins/snowflake/dbt/adapters/snowflake/connections.py]
1 import base64
2 import datetime
3 import pytz
4 import re
5 from contextlib import contextmanager
6 from dataclasses import dataclass
7 from io import StringIO
8 from typing import Optional
9
10 from cryptography.hazmat.backends import default_backend
11 from cryptography.hazmat.primitives import serialization
12 import requests
13 import snowflake.connector
14 import snowflake.connector.errors
15
16 from dbt.exceptions import (
17 InternalException, RuntimeException, FailedToConnectException,
18 DatabaseException, warn_or_error
19 )
20 from dbt.adapters.base import Credentials
21 from dbt.adapters.sql import SQLConnectionManager
22 from dbt.logger import GLOBAL_LOGGER as logger
23
24
25 _TOKEN_REQUEST_URL = 'https://{}.snowflakecomputing.com/oauth/token-request'
26
27
28 @dataclass
29 class SnowflakeCredentials(Credentials):
30 account: str
31 user: str
32 warehouse: Optional[str]
33 role: Optional[str]
34 password: Optional[str]
35 authenticator: Optional[str]
36 private_key_path: Optional[str]
37 private_key_passphrase: Optional[str]
38 token: Optional[str]
39 oauth_client_id: Optional[str]
40 oauth_client_secret: Optional[str]
41 client_session_keep_alive: bool = False
42
43 def __post_init__(self):
44 if (
45 self.authenticator != 'oauth' and
46 (self.oauth_client_secret or self.oauth_client_id or self.token)
47 ):
48 # the user probably forgot to set 'authenticator' like I keep doing
49 warn_or_error(
50 'Authenticator is not set to oauth, but an oauth-only '
51 'parameter is set! Did you mean to set authenticator: oauth?'
52 )
53
54 @property
55 def type(self):
56 return 'snowflake'
57
58 def _connection_keys(self):
59 return (
60 'account', 'user', 'database', 'schema', 'warehouse', 'role',
61 'client_session_keep_alive'
62 )
63
64 def auth_args(self):
65 # Pull all of the optional authentication args for the connector,
66 # let connector handle the actual arg validation
67 result = {}
68 if self.password:
69 result['password'] = self.password
70 if self.authenticator:
71 result['authenticator'] = self.authenticator
72 if self.authenticator == 'oauth':
73 token = self.token
74 # if we have a client ID/client secret, the token is a refresh
75 # token, not an access token
76 if self.oauth_client_id and self.oauth_client_secret:
77 token = self._get_access_token()
78 elif self.oauth_client_id:
79 warn_or_error(
80 'Invalid profile: got an oauth_client_id, but not an '
81 'oauth_client_secret!'
82 )
83 elif self.oauth_client_secret:
84 warn_or_error(
85 'Invalid profile: got an oauth_client_secret, but not '
86 'an oauth_client_id!'
87 )
88
89 result['token'] = token
90 result['private_key'] = self._get_private_key()
91 return result
92
93 def _get_access_token(self) -> str:
94 if self.authenticator != 'oauth':
95 raise InternalException('Can only get access tokens for oauth')
96 missing = any(
97 x is None for x in
98 (self.oauth_client_id, self.oauth_client_secret, self.token)
99 )
100 if missing:
101 raise InternalException(
102 'need a client ID a client secret, and a refresh token to get '
103 'an access token'
104 )
105 # should the full url be a config item?
106 token_url = _TOKEN_REQUEST_URL.format(self.account)
107 # I think this is only used to redirect on success, which we ignore
108 # (it does not have to match the integration's settings in snowflake)
109 redirect_uri = 'http://localhost:9999'
110 data = {
111 'grant_type': 'refresh_token',
112 'refresh_token': self.token,
113 'redirect_uri': redirect_uri
114 }
115
116 auth = base64.b64encode(
117 f'{self.oauth_client_id}:{self.oauth_client_secret}'
118 .encode('ascii')
119 ).decode('ascii')
120 headers = {
121 'Authorization': f'Basic {auth}',
122 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8'
123 }
124 result = requests.post(token_url, headers=headers, data=data)
125 result_json = result.json()
126 if 'access_token' not in result_json:
127 raise DatabaseException(f'Did not get a token: {result_json}')
128 return result_json['access_token']
129
130 def _get_private_key(self):
131 """Get Snowflake private key by path or None."""
132 if not self.private_key_path or self.private_key_passphrase is None:
133 return None
134
135 with open(self.private_key_path, 'rb') as key:
136 p_key = serialization.load_pem_private_key(
137 key.read(),
138 password=self.private_key_passphrase.encode(),
139 backend=default_backend())
140
141 return p_key.private_bytes(
142 encoding=serialization.Encoding.DER,
143 format=serialization.PrivateFormat.PKCS8,
144 encryption_algorithm=serialization.NoEncryption())
145
146
147 class SnowflakeConnectionManager(SQLConnectionManager):
148 TYPE = 'snowflake'
149
150 @contextmanager
151 def exception_handler(self, sql):
152 try:
153 yield
154 except snowflake.connector.errors.ProgrammingError as e:
155 msg = str(e)
156
157 logger.debug('Snowflake error: {}'.format(msg))
158
159 if 'Empty SQL statement' in msg:
160 logger.debug("got empty sql statement, moving on")
161 elif 'This session does not have a current database' in msg:
162 self.release()
163 raise FailedToConnectException(
164 ('{}\n\nThis error sometimes occurs when invalid '
165 'credentials are provided, or when your default role '
166 'does not have access to use the specified database. '
167 'Please double check your profile and try again.')
168 .format(msg))
169 else:
170 self.release()
171 raise DatabaseException(msg)
172 except Exception as e:
173 logger.debug("Error running SQL: {}", sql)
174 logger.debug("Rolling back transaction.")
175 self.release()
176 if isinstance(e, RuntimeException):
177 # during a sql query, an internal to dbt exception was raised.
178 # this sounds a lot like a signal handler and probably has
179 # useful information, so raise it without modification.
180 raise
181 raise RuntimeException(str(e)) from e
182
183 @classmethod
184 def open(cls, connection):
185 if connection.state == 'open':
186 logger.debug('Connection is already open, skipping open.')
187 return connection
188
189 try:
190 creds = connection.credentials
191
192 handle = snowflake.connector.connect(
193 account=creds.account,
194 user=creds.user,
195 database=creds.database,
196 schema=creds.schema,
197 warehouse=creds.warehouse,
198 role=creds.role,
199 autocommit=False,
200 client_session_keep_alive=creds.client_session_keep_alive,
201 application='dbt',
202 **creds.auth_args()
203 )
204
205 connection.handle = handle
206 connection.state = 'open'
207 except snowflake.connector.errors.Error as e:
208 logger.debug("Got an error when attempting to open a snowflake "
209 "connection: '{}'"
210 .format(e))
211
212 connection.handle = None
213 connection.state = 'fail'
214
215 raise FailedToConnectException(str(e))
216
217 def cancel(self, connection):
218 handle = connection.handle
219 sid = handle.session_id
220
221 connection_name = connection.name
222
223 sql = 'select system$abort_session({})'.format(sid)
224
225 logger.debug("Cancelling query '{}' ({})".format(connection_name, sid))
226
227 _, cursor = self.add_query(sql)
228 res = cursor.fetchone()
229
230 logger.debug("Cancel query '{}': {}".format(connection_name, res))
231
232 @classmethod
233 def get_status(cls, cursor):
234 state = cursor.sqlstate
235
236 if state is None:
237 state = 'SUCCESS'
238
239 return "{} {}".format(state, cursor.rowcount)
240
241 @classmethod
242 def _split_queries(cls, sql):
243 "Splits sql statements at semicolons into discrete queries"
244
245 sql_s = str(sql)
246 sql_buf = StringIO(sql_s)
247 split_query = snowflake.connector.util_text.split_statements(sql_buf)
248 return [part[0] for part in split_query]
249
250 @classmethod
251 def process_results(cls, column_names, rows):
252 # Override for Snowflake. The datetime objects returned by
253 # snowflake-connector-python are not pickleable, so we need
254 # to replace them with sane timezones
255 fixed = []
256 for row in rows:
257 fixed_row = []
258 for col in row:
259 if isinstance(col, datetime.datetime) and col.tzinfo:
260 offset = col.utcoffset()
261 offset_seconds = offset.total_seconds()
262 new_timezone = pytz.FixedOffset(offset_seconds // 60)
263 col = col.astimezone(tz=new_timezone)
264 fixed_row.append(col)
265
266 fixed.append(fixed_row)
267
268 return super().process_results(column_names, fixed)
269
270 def add_query(self, sql, auto_begin=True,
271 bindings=None, abridge_sql_log=False):
272
273 connection = None
274 cursor = None
275
276 if bindings:
277 # The snowflake connector is more strict than, eg., psycopg2 -
278 # which allows any iterable thing to be passed as a binding.
279 bindings = tuple(bindings)
280
281 queries = self._split_queries(sql)
282
283 for individual_query in queries:
284 # hack -- after the last ';', remove comments and don't run
285 # empty queries. this avoids using exceptions as flow control,
286 # and also allows us to return the status of the last cursor
287 without_comments = re.sub(
288 re.compile('^.*(--.*)$', re.MULTILINE),
289 '', individual_query).strip()
290
291 if without_comments == "":
292 continue
293
294 connection, cursor = super().add_query(
295 individual_query, auto_begin,
296 bindings=bindings,
297 abridge_sql_log=abridge_sql_log
298 )
299
300 if cursor is None:
301 conn = self.get_thread_connection()
302 if conn is None or conn.name is None:
303 conn_name = '<None>'
304 else:
305 conn_name = conn.name
306
307 raise RuntimeException(
308 "Tried to run an empty query on model '{}'. If you are "
309 "conditionally running\nsql, eg. in a model hook, make "
310 "sure your `else` clause contains valid sql!\n\n"
311 "Provided SQL:\n{}"
312 .format(conn_name, sql)
313 )
314
315 return connection, cursor
316
317 @classmethod
318 def _rollback_handle(cls, connection):
319 """On snowflake, rolling back the handle of an aborted session raises
320 an exception.
321 """
322 logger.debug('initiating rollback')
323 try:
324 connection.handle.rollback()
325 except snowflake.connector.errors.ProgrammingError as e:
326 msg = str(e)
327 if 'Session no longer exists' not in msg:
328 raise
329
[end of plugins/snowflake/dbt/adapters/snowflake/connections.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugins/snowflake/dbt/adapters/snowflake/connections.py b/plugins/snowflake/dbt/adapters/snowflake/connections.py
--- a/plugins/snowflake/dbt/adapters/snowflake/connections.py
+++ b/plugins/snowflake/dbt/adapters/snowflake/connections.py
@@ -129,13 +129,18 @@
def _get_private_key(self):
"""Get Snowflake private key by path or None."""
- if not self.private_key_path or self.private_key_passphrase is None:
+ if not self.private_key_path:
return None
+ if self.private_key_passphrase:
+ encoded_passphrase = self.private_key_passphrase.encode()
+ else:
+ encoded_passphrase = None
+
with open(self.private_key_path, 'rb') as key:
p_key = serialization.load_pem_private_key(
key.read(),
- password=self.private_key_passphrase.encode(),
+ password=encoded_passphrase,
backend=default_backend())
return p_key.private_bytes(
|
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/connections.py b/plugins/snowflake/dbt/adapters/snowflake/connections.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/connections.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/connections.py\n@@ -129,13 +129,18 @@\n \n def _get_private_key(self):\n \"\"\"Get Snowflake private key by path or None.\"\"\"\n- if not self.private_key_path or self.private_key_passphrase is None:\n+ if not self.private_key_path:\n return None\n \n+ if self.private_key_passphrase:\n+ encoded_passphrase = self.private_key_passphrase.encode()\n+ else:\n+ encoded_passphrase = None\n+\n with open(self.private_key_path, 'rb') as key:\n p_key = serialization.load_pem_private_key(\n key.read(),\n- password=self.private_key_passphrase.encode(),\n+ password=encoded_passphrase,\n backend=default_backend())\n \n return p_key.private_bytes(\n", "issue": "Allow no-passphrase private keys\n### Describe the bug\r\nThe Snowflake connector (at least) requires a passphrase in the profile file to open a private key connection.\r\n\r\n### Steps To Reproduce\r\nCreate a dbt target like the following:\r\n```\r\n qa:\r\n type: snowflake\r\n account: my_account\r\n user: my_user\r\n role: ANALYST\r\n\r\n # Keypair config\r\n private_key_path: \"path/to/my/no/passphrase/key\"\r\n private_key_passphrase: None\r\n\r\n database: DB\r\n warehouse: WH\r\n schema: PUBLIC\r\n threads: 1\r\n client_session_keep_alive: False\r\n```\r\nAttempt to run against said DBT target. DBT will fail because no passphrase is provided. If, instead, a passphrase is provided, the connection will fail because the key is not encrypted.\r\n\r\n### Expected behavior\r\nPerhaps a warning in output that unencrypted keys are not the norm, requiring additional setting of override field in profile. If that's set, go ahead with the unencrypted key.\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [ ] bigquery\r\n- [ x] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\n0.14.2\r\n```\r\n\r\n**The operating system you're using:**\r\nOSX\r\n**The output of `python --version`:**\r\nPython 3.7.3\r\n\n", "before_files": [{"content": "import base64\nimport datetime\nimport pytz\nimport re\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom io import StringIO\nfrom typing import Optional\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nimport requests\nimport snowflake.connector\nimport snowflake.connector.errors\n\nfrom dbt.exceptions import (\n InternalException, RuntimeException, FailedToConnectException,\n DatabaseException, warn_or_error\n)\nfrom dbt.adapters.base import Credentials\nfrom dbt.adapters.sql import SQLConnectionManager\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\n\n_TOKEN_REQUEST_URL = 'https://{}.snowflakecomputing.com/oauth/token-request'\n\n\n@dataclass\nclass SnowflakeCredentials(Credentials):\n account: str\n user: str\n warehouse: Optional[str]\n role: Optional[str]\n password: Optional[str]\n authenticator: Optional[str]\n private_key_path: Optional[str]\n private_key_passphrase: Optional[str]\n token: Optional[str]\n oauth_client_id: Optional[str]\n oauth_client_secret: Optional[str]\n client_session_keep_alive: bool = False\n\n def __post_init__(self):\n if (\n self.authenticator != 'oauth' and\n (self.oauth_client_secret or self.oauth_client_id or self.token)\n ):\n # the user probably forgot to set 'authenticator' like I keep doing\n warn_or_error(\n 'Authenticator is not set to oauth, but an oauth-only '\n 'parameter is set! Did you mean to set authenticator: oauth?'\n )\n\n @property\n def type(self):\n return 'snowflake'\n\n def _connection_keys(self):\n return (\n 'account', 'user', 'database', 'schema', 'warehouse', 'role',\n 'client_session_keep_alive'\n )\n\n def auth_args(self):\n # Pull all of the optional authentication args for the connector,\n # let connector handle the actual arg validation\n result = {}\n if self.password:\n result['password'] = self.password\n if self.authenticator:\n result['authenticator'] = self.authenticator\n if self.authenticator == 'oauth':\n token = self.token\n # if we have a client ID/client secret, the token is a refresh\n # token, not an access token\n if self.oauth_client_id and self.oauth_client_secret:\n token = self._get_access_token()\n elif self.oauth_client_id:\n warn_or_error(\n 'Invalid profile: got an oauth_client_id, but not an '\n 'oauth_client_secret!'\n )\n elif self.oauth_client_secret:\n warn_or_error(\n 'Invalid profile: got an oauth_client_secret, but not '\n 'an oauth_client_id!'\n )\n\n result['token'] = token\n result['private_key'] = self._get_private_key()\n return result\n\n def _get_access_token(self) -> str:\n if self.authenticator != 'oauth':\n raise InternalException('Can only get access tokens for oauth')\n missing = any(\n x is None for x in\n (self.oauth_client_id, self.oauth_client_secret, self.token)\n )\n if missing:\n raise InternalException(\n 'need a client ID a client secret, and a refresh token to get '\n 'an access token'\n )\n # should the full url be a config item?\n token_url = _TOKEN_REQUEST_URL.format(self.account)\n # I think this is only used to redirect on success, which we ignore\n # (it does not have to match the integration's settings in snowflake)\n redirect_uri = 'http://localhost:9999'\n data = {\n 'grant_type': 'refresh_token',\n 'refresh_token': self.token,\n 'redirect_uri': redirect_uri\n }\n\n auth = base64.b64encode(\n f'{self.oauth_client_id}:{self.oauth_client_secret}'\n .encode('ascii')\n ).decode('ascii')\n headers = {\n 'Authorization': f'Basic {auth}',\n 'Content-type': 'application/x-www-form-urlencoded;charset=utf-8'\n }\n result = requests.post(token_url, headers=headers, data=data)\n result_json = result.json()\n if 'access_token' not in result_json:\n raise DatabaseException(f'Did not get a token: {result_json}')\n return result_json['access_token']\n\n def _get_private_key(self):\n \"\"\"Get Snowflake private key by path or None.\"\"\"\n if not self.private_key_path or self.private_key_passphrase is None:\n return None\n\n with open(self.private_key_path, 'rb') as key:\n p_key = serialization.load_pem_private_key(\n key.read(),\n password=self.private_key_passphrase.encode(),\n backend=default_backend())\n\n return p_key.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption())\n\n\nclass SnowflakeConnectionManager(SQLConnectionManager):\n TYPE = 'snowflake'\n\n @contextmanager\n def exception_handler(self, sql):\n try:\n yield\n except snowflake.connector.errors.ProgrammingError as e:\n msg = str(e)\n\n logger.debug('Snowflake error: {}'.format(msg))\n\n if 'Empty SQL statement' in msg:\n logger.debug(\"got empty sql statement, moving on\")\n elif 'This session does not have a current database' in msg:\n self.release()\n raise FailedToConnectException(\n ('{}\\n\\nThis error sometimes occurs when invalid '\n 'credentials are provided, or when your default role '\n 'does not have access to use the specified database. '\n 'Please double check your profile and try again.')\n .format(msg))\n else:\n self.release()\n raise DatabaseException(msg)\n except Exception as e:\n logger.debug(\"Error running SQL: {}\", sql)\n logger.debug(\"Rolling back transaction.\")\n self.release()\n if isinstance(e, RuntimeException):\n # during a sql query, an internal to dbt exception was raised.\n # this sounds a lot like a signal handler and probably has\n # useful information, so raise it without modification.\n raise\n raise RuntimeException(str(e)) from e\n\n @classmethod\n def open(cls, connection):\n if connection.state == 'open':\n logger.debug('Connection is already open, skipping open.')\n return connection\n\n try:\n creds = connection.credentials\n\n handle = snowflake.connector.connect(\n account=creds.account,\n user=creds.user,\n database=creds.database,\n schema=creds.schema,\n warehouse=creds.warehouse,\n role=creds.role,\n autocommit=False,\n client_session_keep_alive=creds.client_session_keep_alive,\n application='dbt',\n **creds.auth_args()\n )\n\n connection.handle = handle\n connection.state = 'open'\n except snowflake.connector.errors.Error as e:\n logger.debug(\"Got an error when attempting to open a snowflake \"\n \"connection: '{}'\"\n .format(e))\n\n connection.handle = None\n connection.state = 'fail'\n\n raise FailedToConnectException(str(e))\n\n def cancel(self, connection):\n handle = connection.handle\n sid = handle.session_id\n\n connection_name = connection.name\n\n sql = 'select system$abort_session({})'.format(sid)\n\n logger.debug(\"Cancelling query '{}' ({})\".format(connection_name, sid))\n\n _, cursor = self.add_query(sql)\n res = cursor.fetchone()\n\n logger.debug(\"Cancel query '{}': {}\".format(connection_name, res))\n\n @classmethod\n def get_status(cls, cursor):\n state = cursor.sqlstate\n\n if state is None:\n state = 'SUCCESS'\n\n return \"{} {}\".format(state, cursor.rowcount)\n\n @classmethod\n def _split_queries(cls, sql):\n \"Splits sql statements at semicolons into discrete queries\"\n\n sql_s = str(sql)\n sql_buf = StringIO(sql_s)\n split_query = snowflake.connector.util_text.split_statements(sql_buf)\n return [part[0] for part in split_query]\n\n @classmethod\n def process_results(cls, column_names, rows):\n # Override for Snowflake. The datetime objects returned by\n # snowflake-connector-python are not pickleable, so we need\n # to replace them with sane timezones\n fixed = []\n for row in rows:\n fixed_row = []\n for col in row:\n if isinstance(col, datetime.datetime) and col.tzinfo:\n offset = col.utcoffset()\n offset_seconds = offset.total_seconds()\n new_timezone = pytz.FixedOffset(offset_seconds // 60)\n col = col.astimezone(tz=new_timezone)\n fixed_row.append(col)\n\n fixed.append(fixed_row)\n\n return super().process_results(column_names, fixed)\n\n def add_query(self, sql, auto_begin=True,\n bindings=None, abridge_sql_log=False):\n\n connection = None\n cursor = None\n\n if bindings:\n # The snowflake connector is more strict than, eg., psycopg2 -\n # which allows any iterable thing to be passed as a binding.\n bindings = tuple(bindings)\n\n queries = self._split_queries(sql)\n\n for individual_query in queries:\n # hack -- after the last ';', remove comments and don't run\n # empty queries. this avoids using exceptions as flow control,\n # and also allows us to return the status of the last cursor\n without_comments = re.sub(\n re.compile('^.*(--.*)$', re.MULTILINE),\n '', individual_query).strip()\n\n if without_comments == \"\":\n continue\n\n connection, cursor = super().add_query(\n individual_query, auto_begin,\n bindings=bindings,\n abridge_sql_log=abridge_sql_log\n )\n\n if cursor is None:\n conn = self.get_thread_connection()\n if conn is None or conn.name is None:\n conn_name = '<None>'\n else:\n conn_name = conn.name\n\n raise RuntimeException(\n \"Tried to run an empty query on model '{}'. If you are \"\n \"conditionally running\\nsql, eg. in a model hook, make \"\n \"sure your `else` clause contains valid sql!\\n\\n\"\n \"Provided SQL:\\n{}\"\n .format(conn_name, sql)\n )\n\n return connection, cursor\n\n @classmethod\n def _rollback_handle(cls, connection):\n \"\"\"On snowflake, rolling back the handle of an aborted session raises\n an exception.\n \"\"\"\n logger.debug('initiating rollback')\n try:\n connection.handle.rollback()\n except snowflake.connector.errors.ProgrammingError as e:\n msg = str(e)\n if 'Session no longer exists' not in msg:\n raise\n", "path": "plugins/snowflake/dbt/adapters/snowflake/connections.py"}]}
| 4,078 | 230 |
gh_patches_debug_5639
|
rasdani/github-patches
|
git_diff
|
mars-project__mars-613
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]The parameter `open_browser` of `new_cluster` doesn't work
**Describe the bug**
In `new_cluster`, we use
```python
open_browser = open_browser or options.deploy.open_browser
```
to decide if we should open the browser after web worker available. When `open_browser` is `False`, it will still fall back to `options.deploy.open_browser` and open the browser.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version: 3.7
2. The version of Mars you use: master
3. Versions of crucial packages, such as numpy, scipy and protobuf
4. Full stack of the error.
5. Minimized code to reproduce the error.
**Expected behavior**
The web browser shouldn't be opened when `open_browser` is `False`.
</issue>
<code>
[start of mars/deploy/local/core.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2018 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 from __future__ import print_function
18
19 import atexit
20 import multiprocessing
21 import os
22 import signal
23 import sys
24 import time
25
26 from ...actors import create_actor_pool
27 from ...compat import six, TimeoutError # pylint: disable=W0622
28 from ...config import options
29 from ...lib import gipc
30 from ...resource import cpu_count
31 from ...scheduler.service import SchedulerService
32 from ...session import new_session
33 from ...utils import get_next_port
34 from ...worker.service import WorkerService
35 from .distributor import gen_distributor
36
37 _local_cluster_clients = dict()
38 atexit.register(lambda: [v.stop() for v in list(_local_cluster_clients.values())])
39
40
41 class LocalDistributedCluster(object):
42
43 # at least 2 process are required by scheduler and worker
44 MIN_SCHEDULER_N_PROCESS = 2
45 MIN_WORKER_N_PROCESS = 2
46
47 def __init__(self, endpoint, n_process=None, scheduler_n_process=None,
48 worker_n_process=None, ignore_avail_mem=True, shared_memory=None):
49 self._endpoint = endpoint
50
51 self._started = False
52 self._stopped = False
53
54 self._pool = None
55 self._scheduler_service = SchedulerService()
56 self._worker_service = WorkerService(ignore_avail_mem=ignore_avail_mem,
57 cache_mem_limit=shared_memory)
58
59 self._scheduler_n_process, self._worker_n_process = \
60 self._calc_scheduler_worker_n_process(n_process,
61 scheduler_n_process,
62 worker_n_process)
63
64 @property
65 def pool(self):
66 return self._pool
67
68 @classmethod
69 def _calc_scheduler_worker_n_process(cls, n_process, scheduler_n_process, worker_n_process,
70 calc_cpu_count=cpu_count):
71 n_scheduler, n_worker = scheduler_n_process, worker_n_process
72
73 if n_scheduler is None and n_worker is None:
74 n_scheduler = cls.MIN_SCHEDULER_N_PROCESS
75 n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler
76 n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)
77 elif n_scheduler is None or n_worker is None:
78 # one of scheduler and worker n_process provided
79 if n_scheduler is None:
80 n_process = n_process if n_process is not None else calc_cpu_count()
81 n_scheduler = max(n_process - n_worker, cls.MIN_SCHEDULER_N_PROCESS)
82 else:
83 assert n_worker is None
84 n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler
85 n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)
86
87 return n_scheduler, n_worker
88
89 def _make_sure_scheduler_ready(self, timeout=120):
90 check_start_time = time.time()
91 while True:
92 workers_meta = self._scheduler_service._resource_ref.get_workers_meta()
93 if not workers_meta:
94 # wait for worker to report status
95 self._pool.sleep(.5)
96 if time.time() - check_start_time > timeout: # pragma: no cover
97 raise TimeoutError('Check worker ready timed out.')
98 else:
99 break
100
101 def start_service(self):
102 if self._started:
103 return
104 self._started = True
105
106 # start plasma
107 self._worker_service.start_plasma()
108
109 # start actor pool
110 n_process = self._scheduler_n_process + self._worker_n_process
111 distributor = gen_distributor(self._scheduler_n_process, self._worker_n_process)
112 self._pool = create_actor_pool(self._endpoint, n_process, distributor=distributor)
113
114 # start scheduler first
115 self._scheduler_service.start(self._endpoint, None, self._pool)
116
117 # start worker next
118 self._worker_service.start(self._endpoint, self._pool, distributed=False,
119 schedulers=[self._endpoint],
120 process_start_index=self._scheduler_n_process)
121
122 # make sure scheduler is ready
123 self._make_sure_scheduler_ready()
124
125 def stop_service(self):
126 if self._stopped:
127 return
128
129 self._stopped = True
130 try:
131 self._scheduler_service.stop(self._pool)
132 self._worker_service.stop()
133 finally:
134 self._pool.stop()
135
136 def serve_forever(self):
137 try:
138 self._pool.join()
139 finally:
140 self.stop_service()
141
142 def __enter__(self):
143 self.start_service()
144 return self
145
146 def __exit__(self, *_):
147 self.stop_service()
148
149
150 def gen_endpoint(address):
151 port = None
152 tries = 5 # retry for 5 times
153
154 for i in range(tries):
155 try:
156 port = get_next_port()
157 break
158 except SystemError:
159 if i < tries - 1:
160 continue
161 raise
162
163 return '{0}:{1}'.format(address, port)
164
165
166 def _start_cluster(endpoint, event, n_process=None, shared_memory=None, **kw):
167 cluster = LocalDistributedCluster(endpoint, n_process=n_process,
168 shared_memory=shared_memory, **kw)
169 cluster.start_service()
170 event.set()
171 try:
172 cluster.serve_forever()
173 finally:
174 cluster.stop_service()
175
176
177 def _start_cluster_process(endpoint, n_process, shared_memory, **kw):
178 event = multiprocessing.Event()
179
180 kw = kw.copy()
181 kw['n_process'] = n_process
182 kw['shared_memory'] = shared_memory or '20%'
183 process = gipc.start_process(_start_cluster, args=(endpoint, event), kwargs=kw)
184
185 while True:
186 event.wait(5)
187 if not event.is_set():
188 # service not started yet
189 continue
190 if not process.is_alive():
191 raise SystemError('New local cluster failed')
192 else:
193 break
194
195 return process
196
197
198 def _start_web(scheduler_address, ui_port, event):
199 import gevent.monkey
200 gevent.monkey.patch_all(thread=False)
201
202 from ...web import MarsWeb
203
204 web = MarsWeb(ui_port, scheduler_address)
205 try:
206 web.start(event=event, block=True)
207 finally:
208 web.stop()
209
210
211 def _start_web_process(scheduler_endpoint, web_endpoint):
212 web_event = multiprocessing.Event()
213 ui_port = int(web_endpoint.rsplit(':', 1)[1])
214 web_process = gipc.start_process(
215 _start_web, args=(scheduler_endpoint, ui_port, web_event), daemon=True)
216
217 while True:
218 web_event.wait(5)
219 if not web_event.is_set():
220 # web not started yet
221 continue
222 if not web_process.is_alive():
223 raise SystemError('New web interface failed')
224 else:
225 break
226
227 return web_process
228
229
230 class LocalDistributedClusterClient(object):
231 def __init__(self, endpoint, web_endpoint, cluster_process, web_process):
232 self._cluster_process = cluster_process
233 self._web_process = web_process
234 self._endpoint = endpoint
235 self._web_endpoint = web_endpoint
236 self._session = new_session(endpoint).as_default()
237
238 @property
239 def endpoint(self):
240 return self._endpoint
241
242 @property
243 def web_endpoint(self):
244 return self._web_endpoint
245
246 @property
247 def session(self):
248 return self._session
249
250 def __enter__(self):
251 return self
252
253 def __exit__(self, *_):
254 self.stop()
255
256 @staticmethod
257 def _ensure_process_finish(proc):
258 if proc is None or not proc.is_alive():
259 return
260 proc.join(3)
261
262 # in case the process does not finish
263 if proc.is_alive(): # pragma: no cover
264 try:
265 import psutil
266 for subproc in psutil.Process(proc.pid).children(recursive=True):
267 try:
268 subproc.kill()
269 except psutil.NoSuchProcess: # pragma: no cover
270 pass
271 except ImportError:
272 pass
273 finally:
274 proc.terminate()
275
276 def stop(self):
277 try:
278 del _local_cluster_clients[id(self)]
279 except KeyError: # pragma: no cover
280 pass
281
282 if self._cluster_process.is_alive():
283 os.kill(self._cluster_process.pid, signal.SIGINT)
284 if self._web_process is not None and self._web_process.is_alive():
285 os.kill(self._web_process.pid, signal.SIGINT)
286
287 self._ensure_process_finish(self._cluster_process)
288 self._ensure_process_finish(self._web_process)
289
290
291 def new_cluster(address='0.0.0.0', web=False, n_process=None, shared_memory=None,
292 open_browser=None, **kw):
293 open_browser = open_browser or options.deploy.open_browser
294 endpoint = gen_endpoint(address)
295 web_endpoint = None
296 if web is True:
297 web_endpoint = gen_endpoint('0.0.0.0')
298 elif isinstance(web, six.string_types):
299 if ':' in web:
300 web_endpoint = web
301 else:
302 web_endpoint = gen_endpoint(web)
303
304 process = _start_cluster_process(endpoint, n_process, shared_memory, **kw)
305
306 web_process = None
307 if web_endpoint:
308 web_process = _start_web_process(endpoint, web_endpoint)
309 print('Web endpoint started at http://%s' % web_endpoint, file=sys.stderr)
310 if open_browser:
311 import webbrowser
312 webbrowser.open_new_tab('http://%s' % web_endpoint)
313
314 client = LocalDistributedClusterClient(endpoint, web_endpoint, process, web_process)
315 _local_cluster_clients[id(client)] = client
316 return client
317
[end of mars/deploy/local/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mars/deploy/local/core.py b/mars/deploy/local/core.py
--- a/mars/deploy/local/core.py
+++ b/mars/deploy/local/core.py
@@ -290,7 +290,8 @@
def new_cluster(address='0.0.0.0', web=False, n_process=None, shared_memory=None,
open_browser=None, **kw):
- open_browser = open_browser or options.deploy.open_browser
+ if open_browser is None:
+ open_browser = options.deploy.open_browser
endpoint = gen_endpoint(address)
web_endpoint = None
if web is True:
|
{"golden_diff": "diff --git a/mars/deploy/local/core.py b/mars/deploy/local/core.py\n--- a/mars/deploy/local/core.py\n+++ b/mars/deploy/local/core.py\n@@ -290,7 +290,8 @@\n \n def new_cluster(address='0.0.0.0', web=False, n_process=None, shared_memory=None,\n open_browser=None, **kw):\n- open_browser = open_browser or options.deploy.open_browser\n+ if open_browser is None:\n+ open_browser = options.deploy.open_browser\n endpoint = gen_endpoint(address)\n web_endpoint = None\n if web is True:\n", "issue": "[BUG]The parameter `open_browser` of `new_cluster` doesn't work\n**Describe the bug**\r\n\r\nIn `new_cluster`, we use \r\n\r\n```python\r\nopen_browser = open_browser or options.deploy.open_browser\r\n```\r\n\r\nto decide if we should open the browser after web worker available. When `open_browser` is `False`, it will still fall back to `options.deploy.open_browser` and open the browser.\r\n\r\n**To Reproduce**\r\n\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version: 3.7\r\n2. The version of Mars you use: master\r\n3. Versions of crucial packages, such as numpy, scipy and protobuf\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n**Expected behavior**\r\n\r\nThe web browser shouldn't be opened when `open_browser` is `False`.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport atexit\nimport multiprocessing\nimport os\nimport signal\nimport sys\nimport time\n\nfrom ...actors import create_actor_pool\nfrom ...compat import six, TimeoutError # pylint: disable=W0622\nfrom ...config import options\nfrom ...lib import gipc\nfrom ...resource import cpu_count\nfrom ...scheduler.service import SchedulerService\nfrom ...session import new_session\nfrom ...utils import get_next_port\nfrom ...worker.service import WorkerService\nfrom .distributor import gen_distributor\n\n_local_cluster_clients = dict()\natexit.register(lambda: [v.stop() for v in list(_local_cluster_clients.values())])\n\n\nclass LocalDistributedCluster(object):\n\n # at least 2 process are required by scheduler and worker\n MIN_SCHEDULER_N_PROCESS = 2\n MIN_WORKER_N_PROCESS = 2\n\n def __init__(self, endpoint, n_process=None, scheduler_n_process=None,\n worker_n_process=None, ignore_avail_mem=True, shared_memory=None):\n self._endpoint = endpoint\n\n self._started = False\n self._stopped = False\n\n self._pool = None\n self._scheduler_service = SchedulerService()\n self._worker_service = WorkerService(ignore_avail_mem=ignore_avail_mem,\n cache_mem_limit=shared_memory)\n\n self._scheduler_n_process, self._worker_n_process = \\\n self._calc_scheduler_worker_n_process(n_process,\n scheduler_n_process,\n worker_n_process)\n\n @property\n def pool(self):\n return self._pool\n\n @classmethod\n def _calc_scheduler_worker_n_process(cls, n_process, scheduler_n_process, worker_n_process,\n calc_cpu_count=cpu_count):\n n_scheduler, n_worker = scheduler_n_process, worker_n_process\n\n if n_scheduler is None and n_worker is None:\n n_scheduler = cls.MIN_SCHEDULER_N_PROCESS\n n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler\n n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)\n elif n_scheduler is None or n_worker is None:\n # one of scheduler and worker n_process provided\n if n_scheduler is None:\n n_process = n_process if n_process is not None else calc_cpu_count()\n n_scheduler = max(n_process - n_worker, cls.MIN_SCHEDULER_N_PROCESS)\n else:\n assert n_worker is None\n n_process = n_process if n_process is not None else calc_cpu_count() + n_scheduler\n n_worker = max(n_process - n_scheduler, cls.MIN_WORKER_N_PROCESS)\n\n return n_scheduler, n_worker\n\n def _make_sure_scheduler_ready(self, timeout=120):\n check_start_time = time.time()\n while True:\n workers_meta = self._scheduler_service._resource_ref.get_workers_meta()\n if not workers_meta:\n # wait for worker to report status\n self._pool.sleep(.5)\n if time.time() - check_start_time > timeout: # pragma: no cover\n raise TimeoutError('Check worker ready timed out.')\n else:\n break\n\n def start_service(self):\n if self._started:\n return\n self._started = True\n\n # start plasma\n self._worker_service.start_plasma()\n\n # start actor pool\n n_process = self._scheduler_n_process + self._worker_n_process\n distributor = gen_distributor(self._scheduler_n_process, self._worker_n_process)\n self._pool = create_actor_pool(self._endpoint, n_process, distributor=distributor)\n\n # start scheduler first\n self._scheduler_service.start(self._endpoint, None, self._pool)\n\n # start worker next\n self._worker_service.start(self._endpoint, self._pool, distributed=False,\n schedulers=[self._endpoint],\n process_start_index=self._scheduler_n_process)\n\n # make sure scheduler is ready\n self._make_sure_scheduler_ready()\n\n def stop_service(self):\n if self._stopped:\n return\n\n self._stopped = True\n try:\n self._scheduler_service.stop(self._pool)\n self._worker_service.stop()\n finally:\n self._pool.stop()\n\n def serve_forever(self):\n try:\n self._pool.join()\n finally:\n self.stop_service()\n\n def __enter__(self):\n self.start_service()\n return self\n\n def __exit__(self, *_):\n self.stop_service()\n\n\ndef gen_endpoint(address):\n port = None\n tries = 5 # retry for 5 times\n\n for i in range(tries):\n try:\n port = get_next_port()\n break\n except SystemError:\n if i < tries - 1:\n continue\n raise\n\n return '{0}:{1}'.format(address, port)\n\n\ndef _start_cluster(endpoint, event, n_process=None, shared_memory=None, **kw):\n cluster = LocalDistributedCluster(endpoint, n_process=n_process,\n shared_memory=shared_memory, **kw)\n cluster.start_service()\n event.set()\n try:\n cluster.serve_forever()\n finally:\n cluster.stop_service()\n\n\ndef _start_cluster_process(endpoint, n_process, shared_memory, **kw):\n event = multiprocessing.Event()\n\n kw = kw.copy()\n kw['n_process'] = n_process\n kw['shared_memory'] = shared_memory or '20%'\n process = gipc.start_process(_start_cluster, args=(endpoint, event), kwargs=kw)\n\n while True:\n event.wait(5)\n if not event.is_set():\n # service not started yet\n continue\n if not process.is_alive():\n raise SystemError('New local cluster failed')\n else:\n break\n\n return process\n\n\ndef _start_web(scheduler_address, ui_port, event):\n import gevent.monkey\n gevent.monkey.patch_all(thread=False)\n\n from ...web import MarsWeb\n\n web = MarsWeb(ui_port, scheduler_address)\n try:\n web.start(event=event, block=True)\n finally:\n web.stop()\n\n\ndef _start_web_process(scheduler_endpoint, web_endpoint):\n web_event = multiprocessing.Event()\n ui_port = int(web_endpoint.rsplit(':', 1)[1])\n web_process = gipc.start_process(\n _start_web, args=(scheduler_endpoint, ui_port, web_event), daemon=True)\n\n while True:\n web_event.wait(5)\n if not web_event.is_set():\n # web not started yet\n continue\n if not web_process.is_alive():\n raise SystemError('New web interface failed')\n else:\n break\n\n return web_process\n\n\nclass LocalDistributedClusterClient(object):\n def __init__(self, endpoint, web_endpoint, cluster_process, web_process):\n self._cluster_process = cluster_process\n self._web_process = web_process\n self._endpoint = endpoint\n self._web_endpoint = web_endpoint\n self._session = new_session(endpoint).as_default()\n\n @property\n def endpoint(self):\n return self._endpoint\n\n @property\n def web_endpoint(self):\n return self._web_endpoint\n\n @property\n def session(self):\n return self._session\n\n def __enter__(self):\n return self\n\n def __exit__(self, *_):\n self.stop()\n\n @staticmethod\n def _ensure_process_finish(proc):\n if proc is None or not proc.is_alive():\n return\n proc.join(3)\n\n # in case the process does not finish\n if proc.is_alive(): # pragma: no cover\n try:\n import psutil\n for subproc in psutil.Process(proc.pid).children(recursive=True):\n try:\n subproc.kill()\n except psutil.NoSuchProcess: # pragma: no cover\n pass\n except ImportError:\n pass\n finally:\n proc.terminate()\n\n def stop(self):\n try:\n del _local_cluster_clients[id(self)]\n except KeyError: # pragma: no cover\n pass\n\n if self._cluster_process.is_alive():\n os.kill(self._cluster_process.pid, signal.SIGINT)\n if self._web_process is not None and self._web_process.is_alive():\n os.kill(self._web_process.pid, signal.SIGINT)\n\n self._ensure_process_finish(self._cluster_process)\n self._ensure_process_finish(self._web_process)\n\n\ndef new_cluster(address='0.0.0.0', web=False, n_process=None, shared_memory=None,\n open_browser=None, **kw):\n open_browser = open_browser or options.deploy.open_browser\n endpoint = gen_endpoint(address)\n web_endpoint = None\n if web is True:\n web_endpoint = gen_endpoint('0.0.0.0')\n elif isinstance(web, six.string_types):\n if ':' in web:\n web_endpoint = web\n else:\n web_endpoint = gen_endpoint(web)\n\n process = _start_cluster_process(endpoint, n_process, shared_memory, **kw)\n\n web_process = None\n if web_endpoint:\n web_process = _start_web_process(endpoint, web_endpoint)\n print('Web endpoint started at http://%s' % web_endpoint, file=sys.stderr)\n if open_browser:\n import webbrowser\n webbrowser.open_new_tab('http://%s' % web_endpoint)\n\n client = LocalDistributedClusterClient(endpoint, web_endpoint, process, web_process)\n _local_cluster_clients[id(client)] = client\n return client\n", "path": "mars/deploy/local/core.py"}]}
| 3,773 | 137 |
gh_patches_debug_37997
|
rasdani/github-patches
|
git_diff
|
elastic__apm-agent-python-1037
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Inaccurate transaction names for FastAPI sub-applications
**Description**
For requests to endpoints defined in FastAPI sub-applications, the mount path is chosen as the transaction name. I expected the full route of the endpoint.
**To Reproduce**
1. Run the following simple FastAPI app:
```python
import uvicorn
from elasticapm.contrib.starlette import ElasticAPM, make_apm_client
from fastapi import FastAPI
app = FastAPI()
sub = FastAPI()
app.mount("/sub", sub)
apm = make_apm_client(
{
"SERVICE_NAME": "sub-app-test",
}
)
app.add_middleware(ElasticAPM, client=apm)
@sub.get("/hi")
async def hi():
return "hi"
@sub.get("/bye")
async def bye():
return "bye"
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8888)
```
2.
- **Observed behavior**
The transactions of `/sub/hi` and `/sub/bye` are both named `/sub` and grouped.

- **Expected behavior**
The transactions of `/sub/hi` and `/sub/bye` are named according to the full route.
**Environment**
- OS:
- Client: Windows
- Server: Ubuntu
- Python version: 3.7.3
- Framework and version: `fastapi==0.61.2`
- APM Server version: docker image `elasticsearch/elasticsearch:7.10.2`
- Agent version: `elastic-apm==6.0.0`
</issue>
<code>
[start of elasticapm/contrib/starlette/__init__.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31
32 from __future__ import absolute_import
33
34 import starlette
35 from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
36 from starlette.requests import Request
37 from starlette.responses import Response
38 from starlette.routing import Match
39 from starlette.types import ASGIApp
40
41 import elasticapm
42 import elasticapm.instrumentation.control
43 from elasticapm.base import Client
44 from elasticapm.conf import constants
45 from elasticapm.contrib.asyncio.traces import set_context
46 from elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response
47 from elasticapm.utils.disttracing import TraceParent
48 from elasticapm.utils.logging import get_logger
49
50 logger = get_logger("elasticapm.errors.client")
51
52
53 def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:
54 """Builds ElasticAPM client.
55
56 Args:
57 config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.
58 client_cls (Client): Must be Client or its child.
59 **defaults: Additional parameters for Client. See `elasticapm.base.Client`
60
61 Returns:
62 Client
63 """
64 if "framework_name" not in defaults:
65 defaults["framework_name"] = "starlette"
66 defaults["framework_version"] = starlette.__version__
67
68 return client_cls(config, **defaults)
69
70
71 class ElasticAPM(BaseHTTPMiddleware):
72 """
73 Starlette / FastAPI middleware for Elastic APM capturing.
74
75 >>> elasticapm = make_apm_client({
76 >>> 'SERVICE_NAME': 'myapp',
77 >>> 'DEBUG': True,
78 >>> 'SERVER_URL': 'http://localhost:8200',
79 >>> 'CAPTURE_HEADERS': True,
80 >>> 'CAPTURE_BODY': 'all'
81 >>> })
82
83 >>> app.add_middleware(ElasticAPM, client=elasticapm)
84
85 Pass an arbitrary APP_NAME and SECRET_TOKEN::
86
87 >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')
88
89 Pass an explicit client::
90
91 >>> elasticapm = ElasticAPM(app, client=client)
92
93 Automatically configure logging::
94
95 >>> elasticapm = ElasticAPM(app, logging=True)
96
97 Capture an exception::
98
99 >>> try:
100 >>> 1 / 0
101 >>> except ZeroDivisionError:
102 >>> elasticapm.capture_exception()
103
104 Capture a message::
105
106 >>> elasticapm.capture_message('hello, world!')
107 """
108
109 def __init__(self, app: ASGIApp, client: Client):
110 """
111
112 Args:
113 app (ASGIApp): Starlette app
114 client (Client): ElasticAPM Client
115 """
116 self.client = client
117
118 if self.client.config.instrument and self.client.config.enabled:
119 elasticapm.instrumentation.control.instrument()
120
121 super().__init__(app)
122
123 async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:
124 """Processes the whole request APM capturing.
125
126 Args:
127 request (Request)
128 call_next (RequestResponseEndpoint): Next request process in Starlette.
129
130 Returns:
131 Response
132 """
133 await self._request_started(request)
134
135 try:
136 response = await call_next(request)
137 elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)
138 except Exception:
139 await self.capture_exception(
140 context={"request": await get_data_from_request(request, self.client.config, constants.ERROR)}
141 )
142 elasticapm.set_transaction_result("HTTP 5xx", override=False)
143 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)
144 elasticapm.set_context({"status_code": 500}, "response")
145
146 raise
147 else:
148 await self._request_finished(response)
149 finally:
150 self.client.end_transaction()
151
152 return response
153
154 async def capture_exception(self, *args, **kwargs):
155 """Captures your exception.
156
157 Args:
158 *args:
159 **kwargs:
160 """
161 self.client.capture_exception(*args, **kwargs)
162
163 async def capture_message(self, *args, **kwargs):
164 """Captures your message.
165
166 Args:
167 *args: Whatever
168 **kwargs: Whatever
169 """
170 self.client.capture_message(*args, **kwargs)
171
172 async def _request_started(self, request: Request):
173 """Captures the begin of the request processing to APM.
174
175 Args:
176 request (Request)
177 """
178 # When we consume the body, we replace the streaming mechanism with
179 # a mocked version -- this workaround came from
180 # https://github.com/encode/starlette/issues/495#issuecomment-513138055
181 # and we call the workaround here to make sure that regardless of
182 # `capture_body` settings, we will have access to the body if we need it.
183 if self.client.config.capture_body != "off":
184 await get_body(request)
185
186 if not self.client.should_ignore_url(request.url.path):
187 trace_parent = TraceParent.from_headers(dict(request.headers))
188 self.client.begin_transaction("request", trace_parent=trace_parent)
189
190 await set_context(
191 lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request"
192 )
193 transaction_name = self.get_route_name(request) or request.url.path
194 elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False)
195
196 async def _request_finished(self, response: Response):
197 """Captures the end of the request processing to APM.
198
199 Args:
200 response (Response)
201 """
202 await set_context(
203 lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), "response"
204 )
205
206 result = "HTTP {}xx".format(response.status_code // 100)
207 elasticapm.set_transaction_result(result, override=False)
208
209 def get_route_name(self, request: Request) -> str:
210 route_name = None
211 app = request.app
212 scope = request.scope
213 routes = app.routes
214
215 for route in routes:
216 match, _ = route.matches(scope)
217 if match == Match.FULL:
218 route_name = route.path
219 break
220 elif match == Match.PARTIAL and route_name is None:
221 route_name = route.path
222 # Starlette magically redirects requests if the path matches a route name with a trailing slash
223 # appended or removed. To not spam the transaction names list, we do the same here and put these
224 # redirects all in the same "redirect trailing slashes" transaction name
225 if not route_name and app.router.redirect_slashes and scope["path"] != "/":
226 redirect_scope = dict(scope)
227 if scope["path"].endswith("/"):
228 redirect_scope["path"] = scope["path"][:-1]
229 trim = True
230 else:
231 redirect_scope["path"] = scope["path"] + "/"
232 trim = False
233 for route in routes:
234 match, _ = route.matches(redirect_scope)
235 if match != Match.NONE:
236 route_name = route.path + "/" if trim else route.path[:-1]
237 break
238 return route_name
239
[end of elasticapm/contrib/starlette/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py
--- a/elasticapm/contrib/starlette/__init__.py
+++ b/elasticapm/contrib/starlette/__init__.py
@@ -35,7 +35,7 @@
from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
from starlette.requests import Request
from starlette.responses import Response
-from starlette.routing import Match
+from starlette.routing import Match, Mount
from starlette.types import ASGIApp
import elasticapm
@@ -207,18 +207,11 @@
elasticapm.set_transaction_result(result, override=False)
def get_route_name(self, request: Request) -> str:
- route_name = None
app = request.app
scope = request.scope
routes = app.routes
+ route_name = self._get_route_name(scope, routes)
- for route in routes:
- match, _ = route.matches(scope)
- if match == Match.FULL:
- route_name = route.path
- break
- elif match == Match.PARTIAL and route_name is None:
- route_name = route.path
# Starlette magically redirects requests if the path matches a route name with a trailing slash
# appended or removed. To not spam the transaction names list, we do the same here and put these
# redirects all in the same "redirect trailing slashes" transaction name
@@ -230,9 +223,23 @@
else:
redirect_scope["path"] = scope["path"] + "/"
trim = False
- for route in routes:
- match, _ = route.matches(redirect_scope)
- if match != Match.NONE:
- route_name = route.path + "/" if trim else route.path[:-1]
- break
+
+ route_name = self._get_route_name(redirect_scope, routes)
+ route_name = route_name + "/" if trim else route_name[:-1]
return route_name
+
+ def _get_route_name(self, scope, routes, route_name=None):
+ for route in routes:
+ match, child_scope = route.matches(scope)
+ if match == Match.FULL:
+ route_name = route.path
+ child_scope = {**scope, **child_scope}
+ if isinstance(route, Mount):
+ child_route_name = self._get_route_name(child_scope, route.routes, route_name)
+ if child_route_name is None:
+ route_name = None
+ else:
+ route_name += child_route_name
+ return route_name
+ elif match == Match.PARTIAL and route_name is None:
+ route_name = route.path
|
{"golden_diff": "diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py\n--- a/elasticapm/contrib/starlette/__init__.py\n+++ b/elasticapm/contrib/starlette/__init__.py\n@@ -35,7 +35,7 @@\n from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\n from starlette.requests import Request\n from starlette.responses import Response\n-from starlette.routing import Match\n+from starlette.routing import Match, Mount\n from starlette.types import ASGIApp\n \n import elasticapm\n@@ -207,18 +207,11 @@\n elasticapm.set_transaction_result(result, override=False)\n \n def get_route_name(self, request: Request) -> str:\n- route_name = None\n app = request.app\n scope = request.scope\n routes = app.routes\n+ route_name = self._get_route_name(scope, routes)\n \n- for route in routes:\n- match, _ = route.matches(scope)\n- if match == Match.FULL:\n- route_name = route.path\n- break\n- elif match == Match.PARTIAL and route_name is None:\n- route_name = route.path\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n@@ -230,9 +223,23 @@\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n- for route in routes:\n- match, _ = route.matches(redirect_scope)\n- if match != Match.NONE:\n- route_name = route.path + \"/\" if trim else route.path[:-1]\n- break\n+\n+ route_name = self._get_route_name(redirect_scope, routes)\n+ route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n+\n+ def _get_route_name(self, scope, routes, route_name=None):\n+ for route in routes:\n+ match, child_scope = route.matches(scope)\n+ if match == Match.FULL:\n+ route_name = route.path\n+ child_scope = {**scope, **child_scope}\n+ if isinstance(route, Mount):\n+ child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n+ if child_route_name is None:\n+ route_name = None\n+ else:\n+ route_name += child_route_name\n+ return route_name\n+ elif match == Match.PARTIAL and route_name is None:\n+ route_name = route.path\n", "issue": "Inaccurate transaction names for FastAPI sub-applications\n**Description**\r\nFor requests to endpoints defined in FastAPI sub-applications, the mount path is chosen as the transaction name. I expected the full route of the endpoint.\r\n\r\n**To Reproduce**\r\n\r\n1. Run the following simple FastAPI app:\r\n\r\n```python\r\nimport uvicorn\r\nfrom elasticapm.contrib.starlette import ElasticAPM, make_apm_client\r\nfrom fastapi import FastAPI\r\n\r\napp = FastAPI()\r\nsub = FastAPI()\r\napp.mount(\"/sub\", sub)\r\n\r\napm = make_apm_client(\r\n {\r\n \"SERVICE_NAME\": \"sub-app-test\",\r\n }\r\n)\r\n\r\napp.add_middleware(ElasticAPM, client=apm)\r\n\r\n\r\[email protected](\"/hi\")\r\nasync def hi():\r\n return \"hi\"\r\n\r\n\r\[email protected](\"/bye\")\r\nasync def bye():\r\n return \"bye\"\r\n\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"0.0.0.0\", port=8888)\r\n```\r\n\r\n2. \r\n- **Observed behavior**\r\nThe transactions of `/sub/hi` and `/sub/bye` are both named `/sub` and grouped.\r\n\r\n\r\n- **Expected behavior**\r\nThe transactions of `/sub/hi` and `/sub/bye` are named according to the full route.\r\n\r\n\r\n**Environment**\r\n- OS: \r\n - Client: Windows\r\n - Server: Ubuntu\r\n- Python version: 3.7.3\r\n- Framework and version: `fastapi==0.61.2`\r\n- APM Server version: docker image `elasticsearch/elasticsearch:7.10.2` \r\n- Agent version: `elastic-apm==6.0.0`\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n route_name = None\n app = request.app\n scope = request.scope\n routes = app.routes\n\n for route in routes:\n match, _ = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n break\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n for route in routes:\n match, _ = route.matches(redirect_scope)\n if match != Match.NONE:\n route_name = route.path + \"/\" if trim else route.path[:-1]\n break\n return route_name\n", "path": "elasticapm/contrib/starlette/__init__.py"}]}
| 3,473 | 607 |
gh_patches_debug_61634
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-484
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Metrics] add indexing synthetic sugar
Idea is to improve the current implementation of `Metric` and to be able to do the following:
```
# A custom class ConfusionMatrix
cm = ConfusionMatrix(num_classes=3, output_transform=output_gt_predicted_classes_bg)
# Instead of below lines
# from ignite.metrics import MetricsLambda
# IoU = MetricsLambda(lambda res: res[1:], (cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag())))
# We could have:
IoU = (cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag()))[1:]
mIoU = IoU.mean()
```
cc @zasdfgbnm
</issue>
<code>
[start of ignite/metrics/metric.py]
1 from abc import ABCMeta, abstractmethod
2 from ignite._six import with_metaclass
3 from ignite.engine import Events
4 import torch
5
6
7 class Metric(with_metaclass(ABCMeta, object)):
8 """
9 Base class for all Metrics.
10
11 Args:
12 output_transform (callable, optional): a callable that is used to transform the
13 :class:`~ignite.engine.Engine`'s `process_function`'s output into the
14 form expected by the metric. This can be useful if, for example, you have a multi-output model and
15 you want to compute the metric with respect to one of the outputs.
16
17 """
18
19 def __init__(self, output_transform=lambda x: x):
20 self._output_transform = output_transform
21 self.reset()
22
23 @abstractmethod
24 def reset(self):
25 """
26 Resets the metric to it's initial state.
27
28 This is called at the start of each epoch.
29 """
30 pass
31
32 @abstractmethod
33 def update(self, output):
34 """
35 Updates the metric's state using the passed batch output.
36
37 This is called once for each batch.
38
39 Args:
40 output: the is the output from the engine's process function.
41 """
42 pass
43
44 @abstractmethod
45 def compute(self):
46 """
47 Computes the metric based on it's accumulated state.
48
49 This is called at the end of each epoch.
50
51 Returns:
52 Any: the actual quantity of interest.
53
54 Raises:
55 NotComputableError: raised when the metric cannot be computed.
56 """
57 pass
58
59 def started(self, engine):
60 self.reset()
61
62 @torch.no_grad()
63 def iteration_completed(self, engine):
64 output = self._output_transform(engine.state.output)
65 self.update(output)
66
67 def completed(self, engine, name):
68 result = self.compute()
69 if torch.is_tensor(result) and len(result.shape) == 0:
70 result = result.item()
71 engine.state.metrics[name] = result
72
73 def attach(self, engine, name):
74 engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)
75 if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):
76 engine.add_event_handler(Events.EPOCH_STARTED, self.started)
77 if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):
78 engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
79
80 def __add__(self, other):
81 from ignite.metrics import MetricsLambda
82 return MetricsLambda(lambda x, y: x + y, self, other)
83
84 def __radd__(self, other):
85 from ignite.metrics import MetricsLambda
86 return MetricsLambda(lambda x, y: x + y, other, self)
87
88 def __sub__(self, other):
89 from ignite.metrics import MetricsLambda
90 return MetricsLambda(lambda x, y: x - y, self, other)
91
92 def __rsub__(self, other):
93 from ignite.metrics import MetricsLambda
94 return MetricsLambda(lambda x, y: x - y, other, self)
95
96 def __mul__(self, other):
97 from ignite.metrics import MetricsLambda
98 return MetricsLambda(lambda x, y: x * y, self, other)
99
100 def __rmul__(self, other):
101 from ignite.metrics import MetricsLambda
102 return MetricsLambda(lambda x, y: x * y, other, self)
103
104 def __pow__(self, other):
105 from ignite.metrics import MetricsLambda
106 return MetricsLambda(lambda x, y: x ** y, self, other)
107
108 def __rpow__(self, other):
109 from ignite.metrics import MetricsLambda
110 return MetricsLambda(lambda x, y: x ** y, other, self)
111
112 def __mod__(self, other):
113 from ignite.metrics import MetricsLambda
114 return MetricsLambda(lambda x, y: x % y, self, other)
115
116 def __div__(self, other):
117 from ignite.metrics import MetricsLambda
118 return MetricsLambda(lambda x, y: x.__div__(y), self, other)
119
120 def __rdiv__(self, other):
121 from ignite.metrics import MetricsLambda
122 return MetricsLambda(lambda x, y: x.__div__(y), other, self)
123
124 def __truediv__(self, other):
125 from ignite.metrics import MetricsLambda
126 return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
127
128 def __rtruediv__(self, other):
129 from ignite.metrics import MetricsLambda
130 return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)
131
132 def __floordiv__(self, other):
133 from ignite.metrics import MetricsLambda
134 return MetricsLambda(lambda x, y: x // y, self, other)
135
136 def __getattr__(self, attr):
137 from ignite.metrics import MetricsLambda
138
139 def fn(x, *args, **kwargs):
140 return getattr(x, attr)(*args, **kwargs)
141
142 def wrapper(*args, **kwargs):
143 return MetricsLambda(fn, self, *args, **kwargs)
144 return wrapper
145
[end of ignite/metrics/metric.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py
--- a/ignite/metrics/metric.py
+++ b/ignite/metrics/metric.py
@@ -142,3 +142,7 @@
def wrapper(*args, **kwargs):
return MetricsLambda(fn, self, *args, **kwargs)
return wrapper
+
+ def __getitem__(self, index):
+ from ignite.metrics import MetricsLambda
+ return MetricsLambda(lambda x: x[index], self)
|
{"golden_diff": "diff --git a/ignite/metrics/metric.py b/ignite/metrics/metric.py\n--- a/ignite/metrics/metric.py\n+++ b/ignite/metrics/metric.py\n@@ -142,3 +142,7 @@\n def wrapper(*args, **kwargs):\n return MetricsLambda(fn, self, *args, **kwargs)\n return wrapper\n+\n+ def __getitem__(self, index):\n+ from ignite.metrics import MetricsLambda\n+ return MetricsLambda(lambda x: x[index], self)\n", "issue": "[Metrics] add indexing synthetic sugar\nIdea is to improve the current implementation of `Metric` and to be able to do the following:\r\n```\r\n# A custom class ConfusionMatrix\r\ncm = ConfusionMatrix(num_classes=3, output_transform=output_gt_predicted_classes_bg)\r\n\r\n# Instead of below lines\r\n# from ignite.metrics import MetricsLambda\r\n# IoU = MetricsLambda(lambda res: res[1:], (cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag())))\r\n# We could have: \r\nIoU = (cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag()))[1:]\r\nmIoU = IoU.mean()\r\n```\r\n\r\ncc @zasdfgbnm \n", "before_files": [{"content": "from abc import ABCMeta, abstractmethod\nfrom ignite._six import with_metaclass\nfrom ignite.engine import Events\nimport torch\n\n\nclass Metric(with_metaclass(ABCMeta, object)):\n \"\"\"\n Base class for all Metrics.\n\n Args:\n output_transform (callable, optional): a callable that is used to transform the\n :class:`~ignite.engine.Engine`'s `process_function`'s output into the\n form expected by the metric. This can be useful if, for example, you have a multi-output model and\n you want to compute the metric with respect to one of the outputs.\n\n \"\"\"\n\n def __init__(self, output_transform=lambda x: x):\n self._output_transform = output_transform\n self.reset()\n\n @abstractmethod\n def reset(self):\n \"\"\"\n Resets the metric to it's initial state.\n\n This is called at the start of each epoch.\n \"\"\"\n pass\n\n @abstractmethod\n def update(self, output):\n \"\"\"\n Updates the metric's state using the passed batch output.\n\n This is called once for each batch.\n\n Args:\n output: the is the output from the engine's process function.\n \"\"\"\n pass\n\n @abstractmethod\n def compute(self):\n \"\"\"\n Computes the metric based on it's accumulated state.\n\n This is called at the end of each epoch.\n\n Returns:\n Any: the actual quantity of interest.\n\n Raises:\n NotComputableError: raised when the metric cannot be computed.\n \"\"\"\n pass\n\n def started(self, engine):\n self.reset()\n\n @torch.no_grad()\n def iteration_completed(self, engine):\n output = self._output_transform(engine.state.output)\n self.update(output)\n\n def completed(self, engine, name):\n result = self.compute()\n if torch.is_tensor(result) and len(result.shape) == 0:\n result = result.item()\n engine.state.metrics[name] = result\n\n def attach(self, engine, name):\n engine.add_event_handler(Events.EPOCH_COMPLETED, self.completed, name)\n if not engine.has_event_handler(self.started, Events.EPOCH_STARTED):\n engine.add_event_handler(Events.EPOCH_STARTED, self.started)\n if not engine.has_event_handler(self.iteration_completed, Events.ITERATION_COMPLETED):\n engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)\n\n def __add__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, self, other)\n\n def __radd__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x + y, other, self)\n\n def __sub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, self, other)\n\n def __rsub__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x - y, other, self)\n\n def __mul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, self, other)\n\n def __rmul__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x * y, other, self)\n\n def __pow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, self, other)\n\n def __rpow__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x ** y, other, self)\n\n def __mod__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x % y, self, other)\n\n def __div__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), self, other)\n\n def __rdiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__div__(y), other, self)\n\n def __truediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)\n\n def __rtruediv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)\n\n def __floordiv__(self, other):\n from ignite.metrics import MetricsLambda\n return MetricsLambda(lambda x, y: x // y, self, other)\n\n def __getattr__(self, attr):\n from ignite.metrics import MetricsLambda\n\n def fn(x, *args, **kwargs):\n return getattr(x, attr)(*args, **kwargs)\n\n def wrapper(*args, **kwargs):\n return MetricsLambda(fn, self, *args, **kwargs)\n return wrapper\n", "path": "ignite/metrics/metric.py"}]}
| 2,119 | 114 |
gh_patches_debug_27040
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1005
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Imported ratings added as reviews
During a goodreads import, star ratings seem to be added as Reviews, rather than ReviewRatings
</issue>
<code>
[start of bookwyrm/importers/importer.py]
1 """ handle reading a csv from an external service, defaults are from GoodReads """
2 import csv
3 import logging
4
5 from bookwyrm import models
6 from bookwyrm.models import ImportJob, ImportItem
7 from bookwyrm.tasks import app
8
9 logger = logging.getLogger(__name__)
10
11
12 class Importer:
13 """Generic class for csv data import from an outside service"""
14
15 service = "Unknown"
16 delimiter = ","
17 encoding = "UTF-8"
18 mandatory_fields = ["Title", "Author"]
19
20 def create_job(self, user, csv_file, include_reviews, privacy):
21 """check over a csv and creates a database entry for the job"""
22 job = ImportJob.objects.create(
23 user=user, include_reviews=include_reviews, privacy=privacy
24 )
25 for index, entry in enumerate(
26 list(csv.DictReader(csv_file, delimiter=self.delimiter))
27 ):
28 if not all(x in entry for x in self.mandatory_fields):
29 raise ValueError("Author and title must be in data.")
30 entry = self.parse_fields(entry)
31 self.save_item(job, index, entry)
32 return job
33
34 def save_item(self, job, index, data): # pylint: disable=no-self-use
35 """creates and saves an import item"""
36 ImportItem(job=job, index=index, data=data).save()
37
38 def parse_fields(self, entry):
39 """updates csv data with additional info"""
40 entry.update({"import_source": self.service})
41 return entry
42
43 def create_retry_job(self, user, original_job, items):
44 """retry items that didn't import"""
45 job = ImportJob.objects.create(
46 user=user,
47 include_reviews=original_job.include_reviews,
48 privacy=original_job.privacy,
49 retry=True,
50 )
51 for item in items:
52 self.save_item(job, item.index, item.data)
53 return job
54
55 def start_import(self, job):
56 """initalizes a csv import job"""
57 result = import_data.delay(self.service, job.id)
58 job.task_id = result.id
59 job.save()
60
61
62 @app.task
63 def import_data(source, job_id):
64 """does the actual lookup work in a celery task"""
65 job = ImportJob.objects.get(id=job_id)
66 try:
67 for item in job.items.all():
68 try:
69 item.resolve()
70 except Exception as e: # pylint: disable=broad-except
71 logger.exception(e)
72 item.fail_reason = "Error loading book"
73 item.save()
74 continue
75
76 if item.book:
77 item.save()
78
79 # shelves book and handles reviews
80 handle_imported_book(
81 source, job.user, item, job.include_reviews, job.privacy
82 )
83 else:
84 item.fail_reason = "Could not find a match for book"
85 item.save()
86 finally:
87 job.complete = True
88 job.save()
89
90
91 def handle_imported_book(source, user, item, include_reviews, privacy):
92 """process a csv and then post about it"""
93 if isinstance(item.book, models.Work):
94 item.book = item.book.default_edition
95 if not item.book:
96 return
97
98 existing_shelf = models.ShelfBook.objects.filter(book=item.book, user=user).exists()
99
100 # shelve the book if it hasn't been shelved already
101 if item.shelf and not existing_shelf:
102 desired_shelf = models.Shelf.objects.get(identifier=item.shelf, user=user)
103 models.ShelfBook.objects.create(book=item.book, shelf=desired_shelf, user=user)
104
105 for read in item.reads:
106 # check for an existing readthrough with the same dates
107 if models.ReadThrough.objects.filter(
108 user=user,
109 book=item.book,
110 start_date=read.start_date,
111 finish_date=read.finish_date,
112 ).exists():
113 continue
114 read.book = item.book
115 read.user = user
116 read.save()
117
118 if include_reviews and (item.rating or item.review):
119 review_title = (
120 "Review of {!r} on {!r}".format(
121 item.book.title,
122 source,
123 )
124 if item.review
125 else ""
126 )
127
128 # we don't know the publication date of the review,
129 # but "now" is a bad guess
130 published_date_guess = item.date_read or item.date_added
131 models.Review.objects.create(
132 user=user,
133 book=item.book,
134 name=review_title,
135 content=item.review,
136 rating=item.rating,
137 published_date=published_date_guess,
138 privacy=privacy,
139 )
140
[end of bookwyrm/importers/importer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/importers/importer.py b/bookwyrm/importers/importer.py
--- a/bookwyrm/importers/importer.py
+++ b/bookwyrm/importers/importer.py
@@ -116,24 +116,33 @@
read.save()
if include_reviews and (item.rating or item.review):
- review_title = (
- "Review of {!r} on {!r}".format(
- item.book.title,
- source,
- )
- if item.review
- else ""
- )
-
# we don't know the publication date of the review,
# but "now" is a bad guess
published_date_guess = item.date_read or item.date_added
- models.Review.objects.create(
- user=user,
- book=item.book,
- name=review_title,
- content=item.review,
- rating=item.rating,
- published_date=published_date_guess,
- privacy=privacy,
- )
+ if item.review:
+ review_title = (
+ "Review of {!r} on {!r}".format(
+ item.book.title,
+ source,
+ )
+ if item.review
+ else ""
+ )
+ models.Review.objects.create(
+ user=user,
+ book=item.book,
+ name=review_title,
+ content=item.review,
+ rating=item.rating,
+ published_date=published_date_guess,
+ privacy=privacy,
+ )
+ else:
+ # just a rating
+ models.ReviewRating.objects.create(
+ user=user,
+ book=item.book,
+ rating=item.rating,
+ published_date=published_date_guess,
+ privacy=privacy,
+ )
|
{"golden_diff": "diff --git a/bookwyrm/importers/importer.py b/bookwyrm/importers/importer.py\n--- a/bookwyrm/importers/importer.py\n+++ b/bookwyrm/importers/importer.py\n@@ -116,24 +116,33 @@\n read.save()\n \n if include_reviews and (item.rating or item.review):\n- review_title = (\n- \"Review of {!r} on {!r}\".format(\n- item.book.title,\n- source,\n- )\n- if item.review\n- else \"\"\n- )\n-\n # we don't know the publication date of the review,\n # but \"now\" is a bad guess\n published_date_guess = item.date_read or item.date_added\n- models.Review.objects.create(\n- user=user,\n- book=item.book,\n- name=review_title,\n- content=item.review,\n- rating=item.rating,\n- published_date=published_date_guess,\n- privacy=privacy,\n- )\n+ if item.review:\n+ review_title = (\n+ \"Review of {!r} on {!r}\".format(\n+ item.book.title,\n+ source,\n+ )\n+ if item.review\n+ else \"\"\n+ )\n+ models.Review.objects.create(\n+ user=user,\n+ book=item.book,\n+ name=review_title,\n+ content=item.review,\n+ rating=item.rating,\n+ published_date=published_date_guess,\n+ privacy=privacy,\n+ )\n+ else:\n+ # just a rating\n+ models.ReviewRating.objects.create(\n+ user=user,\n+ book=item.book,\n+ rating=item.rating,\n+ published_date=published_date_guess,\n+ privacy=privacy,\n+ )\n", "issue": "Imported ratings added as reviews\nDuring a goodreads import, star ratings seem to be added as Reviews, rather than ReviewRatings\n", "before_files": [{"content": "\"\"\" handle reading a csv from an external service, defaults are from GoodReads \"\"\"\nimport csv\nimport logging\n\nfrom bookwyrm import models\nfrom bookwyrm.models import ImportJob, ImportItem\nfrom bookwyrm.tasks import app\n\nlogger = logging.getLogger(__name__)\n\n\nclass Importer:\n \"\"\"Generic class for csv data import from an outside service\"\"\"\n\n service = \"Unknown\"\n delimiter = \",\"\n encoding = \"UTF-8\"\n mandatory_fields = [\"Title\", \"Author\"]\n\n def create_job(self, user, csv_file, include_reviews, privacy):\n \"\"\"check over a csv and creates a database entry for the job\"\"\"\n job = ImportJob.objects.create(\n user=user, include_reviews=include_reviews, privacy=privacy\n )\n for index, entry in enumerate(\n list(csv.DictReader(csv_file, delimiter=self.delimiter))\n ):\n if not all(x in entry for x in self.mandatory_fields):\n raise ValueError(\"Author and title must be in data.\")\n entry = self.parse_fields(entry)\n self.save_item(job, index, entry)\n return job\n\n def save_item(self, job, index, data): # pylint: disable=no-self-use\n \"\"\"creates and saves an import item\"\"\"\n ImportItem(job=job, index=index, data=data).save()\n\n def parse_fields(self, entry):\n \"\"\"updates csv data with additional info\"\"\"\n entry.update({\"import_source\": self.service})\n return entry\n\n def create_retry_job(self, user, original_job, items):\n \"\"\"retry items that didn't import\"\"\"\n job = ImportJob.objects.create(\n user=user,\n include_reviews=original_job.include_reviews,\n privacy=original_job.privacy,\n retry=True,\n )\n for item in items:\n self.save_item(job, item.index, item.data)\n return job\n\n def start_import(self, job):\n \"\"\"initalizes a csv import job\"\"\"\n result = import_data.delay(self.service, job.id)\n job.task_id = result.id\n job.save()\n\n\[email protected]\ndef import_data(source, job_id):\n \"\"\"does the actual lookup work in a celery task\"\"\"\n job = ImportJob.objects.get(id=job_id)\n try:\n for item in job.items.all():\n try:\n item.resolve()\n except Exception as e: # pylint: disable=broad-except\n logger.exception(e)\n item.fail_reason = \"Error loading book\"\n item.save()\n continue\n\n if item.book:\n item.save()\n\n # shelves book and handles reviews\n handle_imported_book(\n source, job.user, item, job.include_reviews, job.privacy\n )\n else:\n item.fail_reason = \"Could not find a match for book\"\n item.save()\n finally:\n job.complete = True\n job.save()\n\n\ndef handle_imported_book(source, user, item, include_reviews, privacy):\n \"\"\"process a csv and then post about it\"\"\"\n if isinstance(item.book, models.Work):\n item.book = item.book.default_edition\n if not item.book:\n return\n\n existing_shelf = models.ShelfBook.objects.filter(book=item.book, user=user).exists()\n\n # shelve the book if it hasn't been shelved already\n if item.shelf and not existing_shelf:\n desired_shelf = models.Shelf.objects.get(identifier=item.shelf, user=user)\n models.ShelfBook.objects.create(book=item.book, shelf=desired_shelf, user=user)\n\n for read in item.reads:\n # check for an existing readthrough with the same dates\n if models.ReadThrough.objects.filter(\n user=user,\n book=item.book,\n start_date=read.start_date,\n finish_date=read.finish_date,\n ).exists():\n continue\n read.book = item.book\n read.user = user\n read.save()\n\n if include_reviews and (item.rating or item.review):\n review_title = (\n \"Review of {!r} on {!r}\".format(\n item.book.title,\n source,\n )\n if item.review\n else \"\"\n )\n\n # we don't know the publication date of the review,\n # but \"now\" is a bad guess\n published_date_guess = item.date_read or item.date_added\n models.Review.objects.create(\n user=user,\n book=item.book,\n name=review_title,\n content=item.review,\n rating=item.rating,\n published_date=published_date_guess,\n privacy=privacy,\n )\n", "path": "bookwyrm/importers/importer.py"}]}
| 1,845 | 380 |
gh_patches_debug_22233
|
rasdani/github-patches
|
git_diff
|
statsmodels__statsmodels-4999
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[MAINT/CLN] remove function explicitly marked as duplicate
In the function docstring:
`duplicate: Skipper added sm.tools.drop_missing`
<b>update</b> The relevant function is not used outside this module; nor is the other function in this module.
</issue>
<code>
[start of statsmodels/tools/wrappers.py]
1 # -*- coding: utf-8 -*-
2 """Convenience Wrappers
3
4 Created on Sat Oct 30 14:56:35 2010
5
6 Author: josef-pktd
7 License: BSD
8 """
9
10 import numpy as np
11 import statsmodels.api as sm
12 from statsmodels import GLS, WLS, OLS
13
14 def remove_nanrows(y, x):
15 '''remove common rows in [y,x] that contain at least one nan
16
17 TODO: this should be made more flexible,
18 arbitrary number of arrays and 1d or 2d arrays
19
20 duplicate: Skipper added sm.tools.drop_missing
21
22 '''
23 mask = ~np.isnan(y)
24 mask *= ~(np.isnan(x).any(-1)) #* or &
25 y = y[mask]
26 x = x[mask]
27 return y, x
28
29
30 def linmod(y, x, weights=None, sigma=None, add_const=True, filter_missing=True,
31 **kwds):
32 '''get linear model with extra options for entry
33
34 dispatches to regular model class and does not wrap the output
35
36 If several options are exclusive, for example sigma and weights, then the
37 chosen class depends on the implementation sequence.
38 '''
39
40 if filter_missing:
41 y, x = remove_nanrows(y, x)
42 #do the same for masked arrays
43
44 if add_const:
45 x = sm.add_constant(x, prepend=True)
46
47 if not sigma is None:
48 return GLS(y, x, sigma=sigma, **kwds)
49 elif not weights is None:
50 return WLS(y, x, weights=weights, **kwds)
51 else:
52 return OLS(y, x, **kwds)
53
[end of statsmodels/tools/wrappers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/statsmodels/tools/wrappers.py b/statsmodels/tools/wrappers.py
deleted file mode 100644
--- a/statsmodels/tools/wrappers.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Convenience Wrappers
-
-Created on Sat Oct 30 14:56:35 2010
-
-Author: josef-pktd
-License: BSD
-"""
-
-import numpy as np
-import statsmodels.api as sm
-from statsmodels import GLS, WLS, OLS
-
-def remove_nanrows(y, x):
- '''remove common rows in [y,x] that contain at least one nan
-
- TODO: this should be made more flexible,
- arbitrary number of arrays and 1d or 2d arrays
-
- duplicate: Skipper added sm.tools.drop_missing
-
- '''
- mask = ~np.isnan(y)
- mask *= ~(np.isnan(x).any(-1)) #* or &
- y = y[mask]
- x = x[mask]
- return y, x
-
-
-def linmod(y, x, weights=None, sigma=None, add_const=True, filter_missing=True,
- **kwds):
- '''get linear model with extra options for entry
-
- dispatches to regular model class and does not wrap the output
-
- If several options are exclusive, for example sigma and weights, then the
- chosen class depends on the implementation sequence.
- '''
-
- if filter_missing:
- y, x = remove_nanrows(y, x)
- #do the same for masked arrays
-
- if add_const:
- x = sm.add_constant(x, prepend=True)
-
- if not sigma is None:
- return GLS(y, x, sigma=sigma, **kwds)
- elif not weights is None:
- return WLS(y, x, weights=weights, **kwds)
- else:
- return OLS(y, x, **kwds)
|
{"golden_diff": "diff --git a/statsmodels/tools/wrappers.py b/statsmodels/tools/wrappers.py\ndeleted file mode 100644\n--- a/statsmodels/tools/wrappers.py\n+++ /dev/null\n@@ -1,52 +0,0 @@\n-# -*- coding: utf-8 -*-\n-\"\"\"Convenience Wrappers\n-\n-Created on Sat Oct 30 14:56:35 2010\n-\n-Author: josef-pktd\n-License: BSD\n-\"\"\"\n-\n-import numpy as np\n-import statsmodels.api as sm\n-from statsmodels import GLS, WLS, OLS\n-\n-def remove_nanrows(y, x):\n- '''remove common rows in [y,x] that contain at least one nan\n-\n- TODO: this should be made more flexible,\n- arbitrary number of arrays and 1d or 2d arrays\n-\n- duplicate: Skipper added sm.tools.drop_missing\n-\n- '''\n- mask = ~np.isnan(y)\n- mask *= ~(np.isnan(x).any(-1)) #* or &\n- y = y[mask]\n- x = x[mask]\n- return y, x\n-\n-\n-def linmod(y, x, weights=None, sigma=None, add_const=True, filter_missing=True,\n- **kwds):\n- '''get linear model with extra options for entry\n-\n- dispatches to regular model class and does not wrap the output\n-\n- If several options are exclusive, for example sigma and weights, then the\n- chosen class depends on the implementation sequence.\n- '''\n-\n- if filter_missing:\n- y, x = remove_nanrows(y, x)\n- #do the same for masked arrays\n-\n- if add_const:\n- x = sm.add_constant(x, prepend=True)\n-\n- if not sigma is None:\n- return GLS(y, x, sigma=sigma, **kwds)\n- elif not weights is None:\n- return WLS(y, x, weights=weights, **kwds)\n- else:\n- return OLS(y, x, **kwds)\n", "issue": "[MAINT/CLN] remove function explicitly marked as duplicate\nIn the function docstring:\r\n`duplicate: Skipper added sm.tools.drop_missing`\r\n\r\n<b>update</b> The relevant function is not used outside this module; nor is the other function in this module.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Convenience Wrappers\n\nCreated on Sat Oct 30 14:56:35 2010\n\nAuthor: josef-pktd\nLicense: BSD\n\"\"\"\n\nimport numpy as np\nimport statsmodels.api as sm\nfrom statsmodels import GLS, WLS, OLS\n\ndef remove_nanrows(y, x):\n '''remove common rows in [y,x] that contain at least one nan\n\n TODO: this should be made more flexible,\n arbitrary number of arrays and 1d or 2d arrays\n\n duplicate: Skipper added sm.tools.drop_missing\n\n '''\n mask = ~np.isnan(y)\n mask *= ~(np.isnan(x).any(-1)) #* or &\n y = y[mask]\n x = x[mask]\n return y, x\n\n\ndef linmod(y, x, weights=None, sigma=None, add_const=True, filter_missing=True,\n **kwds):\n '''get linear model with extra options for entry\n\n dispatches to regular model class and does not wrap the output\n\n If several options are exclusive, for example sigma and weights, then the\n chosen class depends on the implementation sequence.\n '''\n\n if filter_missing:\n y, x = remove_nanrows(y, x)\n #do the same for masked arrays\n\n if add_const:\n x = sm.add_constant(x, prepend=True)\n\n if not sigma is None:\n return GLS(y, x, sigma=sigma, **kwds)\n elif not weights is None:\n return WLS(y, x, weights=weights, **kwds)\n else:\n return OLS(y, x, **kwds)\n", "path": "statsmodels/tools/wrappers.py"}]}
| 1,068 | 464 |
gh_patches_debug_4669
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1441
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change in Python 3.9.5 (and 3.8.10) causes Faker's list_module() to fail
* Faker version: 8.1.2
* OS: macOS 11.3.1
A [regression in Python](https://bugs.python.org/issue44061) breaks Faker, specifically [this line of code in Faker](https://github.com/joke2k/faker/blob/master/faker/utils/loading.py#L35) that calls `pkgutil.iter_modules([path])`.
It's not clear to me from the discussion in that python bug report exactly how they intend to resolve the issue, but I thought I'd flag this here.
### Steps to reproduce
1. Install python 3.9.5 or 3.8.10
1. Install faker
1. `import faker`
### Expected behavior
`import faker` should succeed
### Actual behavior
`import faker` raises an exception
```shell
>>> import faker
>>> import faker
Traceback (most recent call last):
File "/python/3.9/lib/python3.9/pkgutil.py", line 416, in get_importer
importer = sys.path_importer_cache[path_item]
KeyError: PosixPath('/venv/lib/python3.9/site-packages/faker/providers')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/venv/lib/python3.9/site-packages/faker/__init__.py", line 1, in <module>
from faker.factory import Factory
File "/venv/lib/python3.9/site-packages/faker/factory.py", line 7, in <module>
from faker.config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS
File "/venv/lib/python3.9/site-packages/faker/config.py", line 11, in <module>
PROVIDERS = find_available_providers(
File "/venv/lib/python3.9/site-packages/faker/utils/loading.py", line 57, in find_available_providers
for mod in list_module(providers_mod) if mod != '__pycache__'
File "/venv/lib/python3.9/site-packages/faker/utils/loading.py", line 35, in list_module
return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
File "/venv/lib/python3.9/site-packages/faker/utils/loading.py", line 35, in <listcomp>
return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
File "/python/3.9/lib/python3.9/pkgutil.py", line 130, in iter_modules
for i in importers:
File "/python/3.9/lib/python3.9/pkgutil.py", line 420, in get_importer
importer = path_hook(path_item)
File "<frozen importlib._bootstrap_external>", line 1601, in path_hook_for_FileFinder
File "<frozen importlib._bootstrap_external>", line 1476, in __init__
File "<frozen importlib._bootstrap_external>", line 177, in _path_isabs
AttributeError: 'PosixPath' object has no attribute 'startswith'
```
</issue>
<code>
[start of faker/utils/loading.py]
1 import pkgutil
2 import sys
3
4 from importlib import import_module
5 from pathlib import Path
6 from types import ModuleType
7 from typing import List, Set
8
9
10 def get_path(module: ModuleType) -> str:
11 if getattr(sys, 'frozen', False):
12 # frozen
13
14 if getattr(sys, '_MEIPASS', False):
15 # PyInstaller
16 lib_dir = Path(getattr(sys, '_MEIPASS'))
17 else:
18 # others
19 lib_dir = Path(sys.executable).parent / 'lib'
20
21 path = lib_dir.joinpath(*module.__package__.split("."))
22 else:
23 # unfrozen
24 path = Path(module.__file__).parent
25 return str(path)
26
27
28 def list_module(module: ModuleType) -> List[str]:
29 path = get_path(module)
30
31 if getattr(sys, '_MEIPASS', False):
32 # PyInstaller
33 return [file.parent.name for file in Path(path).glob('*/__init__.py')]
34 else:
35 return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
36
37
38 def find_available_locales(providers: List[str]) -> List[str]:
39 available_locales: Set[str] = set()
40
41 for provider_path in providers:
42
43 provider_module = import_module(provider_path)
44 if getattr(provider_module, 'localized', False):
45 langs = list_module(provider_module)
46 available_locales.update(langs)
47 available_locales: List[str] = sorted(available_locales)
48 return available_locales
49
50
51 def find_available_providers(modules: List[ModuleType]) -> List[str]:
52 available_providers = set()
53 for providers_mod in modules:
54 if providers_mod.__package__:
55 providers = [
56 '.'.join([providers_mod.__package__, mod])
57 for mod in list_module(providers_mod) if mod != '__pycache__'
58 ]
59 available_providers.update(providers)
60 return sorted(available_providers)
61
[end of faker/utils/loading.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/faker/utils/loading.py b/faker/utils/loading.py
--- a/faker/utils/loading.py
+++ b/faker/utils/loading.py
@@ -32,7 +32,7 @@
# PyInstaller
return [file.parent.name for file in Path(path).glob('*/__init__.py')]
else:
- return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]
+ return [name for _, name, is_pkg in pkgutil.iter_modules([str(path)]) if is_pkg]
def find_available_locales(providers: List[str]) -> List[str]:
|
{"golden_diff": "diff --git a/faker/utils/loading.py b/faker/utils/loading.py\n--- a/faker/utils/loading.py\n+++ b/faker/utils/loading.py\n@@ -32,7 +32,7 @@\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n- return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n+ return [name for _, name, is_pkg in pkgutil.iter_modules([str(path)]) if is_pkg]\n \n \n def find_available_locales(providers: List[str]) -> List[str]:\n", "issue": "Change in Python 3.9.5 (and 3.8.10) causes Faker's list_module() to fail\n* Faker version: 8.1.2\r\n* OS: macOS 11.3.1\r\n\r\nA [regression in Python](https://bugs.python.org/issue44061) breaks Faker, specifically [this line of code in Faker](https://github.com/joke2k/faker/blob/master/faker/utils/loading.py#L35) that calls `pkgutil.iter_modules([path])`.\r\n\r\nIt's not clear to me from the discussion in that python bug report exactly how they intend to resolve the issue, but I thought I'd flag this here.\r\n\r\n### Steps to reproduce\r\n\r\n1. Install python 3.9.5 or 3.8.10\r\n1. Install faker\r\n1. `import faker`\r\n\r\n### Expected behavior\r\n\r\n`import faker` should succeed\r\n\r\n### Actual behavior\r\n\r\n`import faker` raises an exception\r\n\r\n```shell\r\n>>> import faker\r\n>>> import faker\r\nTraceback (most recent call last):\r\n File \"/python/3.9/lib/python3.9/pkgutil.py\", line 416, in get_importer\r\n importer = sys.path_importer_cache[path_item]\r\nKeyError: PosixPath('/venv/lib/python3.9/site-packages/faker/providers')\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/venv/lib/python3.9/site-packages/faker/__init__.py\", line 1, in <module>\r\n from faker.factory import Factory\r\n File \"/venv/lib/python3.9/site-packages/faker/factory.py\", line 7, in <module>\r\n from faker.config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS\r\n File \"/venv/lib/python3.9/site-packages/faker/config.py\", line 11, in <module>\r\n PROVIDERS = find_available_providers(\r\n File \"/venv/lib/python3.9/site-packages/faker/utils/loading.py\", line 57, in find_available_providers\r\n for mod in list_module(providers_mod) if mod != '__pycache__'\r\n File \"/venv/lib/python3.9/site-packages/faker/utils/loading.py\", line 35, in list_module\r\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\r\n File \"/venv/lib/python3.9/site-packages/faker/utils/loading.py\", line 35, in <listcomp>\r\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\r\n File \"/python/3.9/lib/python3.9/pkgutil.py\", line 130, in iter_modules\r\n for i in importers:\r\n File \"/python/3.9/lib/python3.9/pkgutil.py\", line 420, in get_importer\r\n importer = path_hook(path_item)\r\n File \"<frozen importlib._bootstrap_external>\", line 1601, in path_hook_for_FileFinder\r\n File \"<frozen importlib._bootstrap_external>\", line 1476, in __init__\r\n File \"<frozen importlib._bootstrap_external>\", line 177, in _path_isabs\r\nAttributeError: 'PosixPath' object has no attribute 'startswith'\r\n```\n", "before_files": [{"content": "import pkgutil\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import List, Set\n\n\ndef get_path(module: ModuleType) -> str:\n if getattr(sys, 'frozen', False):\n # frozen\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n lib_dir = Path(getattr(sys, '_MEIPASS'))\n else:\n # others\n lib_dir = Path(sys.executable).parent / 'lib'\n\n path = lib_dir.joinpath(*module.__package__.split(\".\"))\n else:\n # unfrozen\n path = Path(module.__file__).parent\n return str(path)\n\n\ndef list_module(module: ModuleType) -> List[str]:\n path = get_path(module)\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n\n\ndef find_available_locales(providers: List[str]) -> List[str]:\n available_locales: Set[str] = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n available_locales: List[str] = sorted(available_locales)\n return available_locales\n\n\ndef find_available_providers(modules: List[ModuleType]) -> List[str]:\n available_providers = set()\n for providers_mod in modules:\n if providers_mod.__package__:\n providers = [\n '.'.join([providers_mod.__package__, mod])\n for mod in list_module(providers_mod) if mod != '__pycache__'\n ]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}]}
| 1,803 | 135 |
gh_patches_debug_14375
|
rasdani/github-patches
|
git_diff
|
mabel-dev__opteryx-1467
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🪲 Column Names not Aliased
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
Example from user
~~~sql
SELECT *
FROM $planets AS P
INNER JOIN $satellites AS S
ON P.id = S.id
~~~
Simplified example
~~~sql
SELECT *
FROM $planets
INNER JOIN $satellites
ON $planets.id = $satellites.id
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
</issue>
<code>
[start of opteryx/operators/exit_node.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """
14 Exit Node
15
16 This is a SQL Query Execution Plan Node.
17
18 This does the final preparation before returning results to users.
19
20 This does two things that the projection node doesn't do:
21 - renames columns from the internal names
22 - removes all columns not being returned to the user
23
24 This node doesn't do any calculations, it is a pure Projection.
25 """
26 import time
27 from typing import Generator
28
29 from opteryx.exceptions import AmbiguousIdentifierError
30 from opteryx.exceptions import InvalidInternalStateError
31 from opteryx.models import QueryProperties
32 from opteryx.operators import BasePlanNode
33
34
35 class ExitNode(BasePlanNode):
36 def __init__(self, properties: QueryProperties, **config):
37 super().__init__(properties=properties)
38 self.columns = config.get("columns", [])
39
40 @property
41 def config(self): # pragma: no cover
42 return None
43
44 @property
45 def name(self): # pragma: no cover
46 return "Exit"
47
48 def execute(self) -> Generator:
49 start = time.monotonic_ns()
50 morsels = self._producers[0] # type:ignore
51
52 final_columns = []
53 final_names = []
54 for column in self.columns:
55 final_columns.append(column.schema_column.identity)
56 final_names.append(column.current_name)
57
58 if len(final_columns) != len(set(final_columns)): # pragma: no cover
59 from collections import Counter
60
61 duplicates = [column for column, count in Counter(final_columns).items() if count > 1]
62 matches = {a for a, b in zip(final_names, final_columns) if b in duplicates}
63 raise AmbiguousIdentifierError(
64 message=f"Query result contains multiple instances of the same column(s) - `{'`, `'.join(matches)}`"
65 )
66
67 self.statistics.time_exiting += time.monotonic_ns() - start
68 for morsel in morsels.execute():
69 start = time.monotonic_ns()
70 if not set(final_columns).issubset(morsel.column_names): # pragma: no cover
71 mapping = {name: int_name for name, int_name in zip(final_columns, final_names)}
72 missing_references = {
73 mapping.get(ref): ref for ref in final_columns if ref not in morsel.column_names
74 }
75
76 raise InvalidInternalStateError(
77 f"The following fields were not in the resultset - {', '.join(missing_references.keys())}"
78 )
79
80 morsel = morsel.select(final_columns)
81 morsel = morsel.rename_columns(final_names)
82
83 self.statistics.time_exiting += time.monotonic_ns() - start
84 yield morsel
85 start = time.monotonic_ns()
86
[end of opteryx/operators/exit_node.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opteryx/operators/exit_node.py b/opteryx/operators/exit_node.py
--- a/opteryx/operators/exit_node.py
+++ b/opteryx/operators/exit_node.py
@@ -64,6 +64,14 @@
message=f"Query result contains multiple instances of the same column(s) - `{'`, `'.join(matches)}`"
)
+ if len(set(final_names)) != len(final_names): # we have duplicate names
+ final_names = []
+ for column in self.columns:
+ if column.schema_column.origin:
+ final_names.append(f"{column.schema_column.origin[0]}.{column.current_name}")
+ else:
+ final_names.append(column.qualified_name)
+
self.statistics.time_exiting += time.monotonic_ns() - start
for morsel in morsels.execute():
start = time.monotonic_ns()
|
{"golden_diff": "diff --git a/opteryx/operators/exit_node.py b/opteryx/operators/exit_node.py\n--- a/opteryx/operators/exit_node.py\n+++ b/opteryx/operators/exit_node.py\n@@ -64,6 +64,14 @@\n message=f\"Query result contains multiple instances of the same column(s) - `{'`, `'.join(matches)}`\"\n )\n \n+ if len(set(final_names)) != len(final_names): # we have duplicate names\n+ final_names = []\n+ for column in self.columns:\n+ if column.schema_column.origin:\n+ final_names.append(f\"{column.schema_column.origin[0]}.{column.current_name}\")\n+ else:\n+ final_names.append(column.qualified_name)\n+\n self.statistics.time_exiting += time.monotonic_ns() - start\n for morsel in morsels.execute():\n start = time.monotonic_ns()\n", "issue": "\ud83e\udeb2 Column Names not Aliased\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\nExample from user\r\n~~~sql\r\nSELECT *\r\n FROM $planets AS P\r\n INNER JOIN $satellites AS S\r\n ON P.id = S.id\r\n~~~\r\n\r\nSimplified example\r\n~~~sql\r\nSELECT *\r\n FROM $planets\r\n INNER JOIN $satellites\r\n ON $planets.id = $satellites.id\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nExit Node\n\nThis is a SQL Query Execution Plan Node.\n\nThis does the final preparation before returning results to users.\n\nThis does two things that the projection node doesn't do:\n - renames columns from the internal names\n - removes all columns not being returned to the user\n\nThis node doesn't do any calculations, it is a pure Projection.\n\"\"\"\nimport time\nfrom typing import Generator\n\nfrom opteryx.exceptions import AmbiguousIdentifierError\nfrom opteryx.exceptions import InvalidInternalStateError\nfrom opteryx.models import QueryProperties\nfrom opteryx.operators import BasePlanNode\n\n\nclass ExitNode(BasePlanNode):\n def __init__(self, properties: QueryProperties, **config):\n super().__init__(properties=properties)\n self.columns = config.get(\"columns\", [])\n\n @property\n def config(self): # pragma: no cover\n return None\n\n @property\n def name(self): # pragma: no cover\n return \"Exit\"\n\n def execute(self) -> Generator:\n start = time.monotonic_ns()\n morsels = self._producers[0] # type:ignore\n\n final_columns = []\n final_names = []\n for column in self.columns:\n final_columns.append(column.schema_column.identity)\n final_names.append(column.current_name)\n\n if len(final_columns) != len(set(final_columns)): # pragma: no cover\n from collections import Counter\n\n duplicates = [column for column, count in Counter(final_columns).items() if count > 1]\n matches = {a for a, b in zip(final_names, final_columns) if b in duplicates}\n raise AmbiguousIdentifierError(\n message=f\"Query result contains multiple instances of the same column(s) - `{'`, `'.join(matches)}`\"\n )\n\n self.statistics.time_exiting += time.monotonic_ns() - start\n for morsel in morsels.execute():\n start = time.monotonic_ns()\n if not set(final_columns).issubset(morsel.column_names): # pragma: no cover\n mapping = {name: int_name for name, int_name in zip(final_columns, final_names)}\n missing_references = {\n mapping.get(ref): ref for ref in final_columns if ref not in morsel.column_names\n }\n\n raise InvalidInternalStateError(\n f\"The following fields were not in the resultset - {', '.join(missing_references.keys())}\"\n )\n\n morsel = morsel.select(final_columns)\n morsel = morsel.rename_columns(final_names)\n\n self.statistics.time_exiting += time.monotonic_ns() - start\n yield morsel\n start = time.monotonic_ns()\n", "path": "opteryx/operators/exit_node.py"}]}
| 1,560 | 198 |
gh_patches_debug_16442
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-2613
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Warn on OpenSSL 0.9.8?
Starting in 3.5 weeks OpenSSL 0.9.8 will officially be unsupported by the upstream team. It's unclear what this will mean for various downstreams (notable RHEL, CentOS, and OS X), but in practice it means there's likely to be a significantly decreased level of attention, research, and patching that goes into it.
I'd like to suggest that, starting with whatever release comes after January 1st, 2016, we emit a warning if users are linked against OpenSSL 0.9.8, suggesting they upgrade to a newer OpenSSL (or OS I guess?).
</issue>
<code>
[start of src/cryptography/hazmat/bindings/openssl/binding.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import collections
8 import os
9 import threading
10 import types
11
12 from cryptography.exceptions import InternalError
13 from cryptography.hazmat.bindings._openssl import ffi, lib
14 from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
15
16
17 _OpenSSLError = collections.namedtuple("_OpenSSLError",
18 ["code", "lib", "func", "reason"])
19
20
21 def _consume_errors(lib):
22 errors = []
23 while True:
24 code = lib.ERR_get_error()
25 if code == 0:
26 break
27
28 err_lib = lib.ERR_GET_LIB(code)
29 err_func = lib.ERR_GET_FUNC(code)
30 err_reason = lib.ERR_GET_REASON(code)
31
32 errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))
33 return errors
34
35
36 def _openssl_assert(lib, ok):
37 if not ok:
38 errors = _consume_errors(lib)
39 raise InternalError(
40 "Unknown OpenSSL error. Please file an issue at https://github.com"
41 "/pyca/cryptography/issues with information on how to reproduce "
42 "this. ({0!r})".format(errors),
43 errors
44 )
45
46
47 @ffi.callback("int (*)(unsigned char *, int)", error=-1)
48 def _osrandom_rand_bytes(buf, size):
49 signed = ffi.cast("char *", buf)
50 result = os.urandom(size)
51 signed[0:size] = result
52 return 1
53
54
55 @ffi.callback("int (*)(void)")
56 def _osrandom_rand_status():
57 return 1
58
59
60 def build_conditional_library(lib, conditional_names):
61 conditional_lib = types.ModuleType("lib")
62 excluded_names = set()
63 for condition, names in conditional_names.items():
64 if not getattr(lib, condition):
65 excluded_names |= set(names)
66
67 for attr in dir(lib):
68 if attr not in excluded_names:
69 setattr(conditional_lib, attr, getattr(lib, attr))
70
71 return conditional_lib
72
73
74 class Binding(object):
75 """
76 OpenSSL API wrapper.
77 """
78 lib = None
79 ffi = ffi
80 _lib_loaded = False
81 _locks = None
82 _lock_cb_handle = None
83 _init_lock = threading.Lock()
84 _lock_init_lock = threading.Lock()
85
86 _osrandom_engine_id = ffi.new("const char[]", b"osrandom")
87 _osrandom_engine_name = ffi.new("const char[]", b"osrandom_engine")
88 _osrandom_method = ffi.new(
89 "RAND_METHOD *",
90 dict(bytes=_osrandom_rand_bytes, pseudorand=_osrandom_rand_bytes,
91 status=_osrandom_rand_status)
92 )
93
94 def __init__(self):
95 self._ensure_ffi_initialized()
96
97 @classmethod
98 def _register_osrandom_engine(cls):
99 _openssl_assert(cls.lib, cls.lib.ERR_peek_error() == 0)
100
101 engine = cls.lib.ENGINE_new()
102 _openssl_assert(cls.lib, engine != cls.ffi.NULL)
103 try:
104 result = cls.lib.ENGINE_set_id(engine, cls._osrandom_engine_id)
105 _openssl_assert(cls.lib, result == 1)
106 result = cls.lib.ENGINE_set_name(engine, cls._osrandom_engine_name)
107 _openssl_assert(cls.lib, result == 1)
108 result = cls.lib.ENGINE_set_RAND(engine, cls._osrandom_method)
109 _openssl_assert(cls.lib, result == 1)
110 result = cls.lib.ENGINE_add(engine)
111 if result != 1:
112 errors = _consume_errors(cls.lib)
113 _openssl_assert(
114 cls.lib,
115 errors[0].reason == cls.lib.ENGINE_R_CONFLICTING_ENGINE_ID
116 )
117
118 finally:
119 result = cls.lib.ENGINE_free(engine)
120 _openssl_assert(cls.lib, result == 1)
121
122 @classmethod
123 def _ensure_ffi_initialized(cls):
124 with cls._init_lock:
125 if not cls._lib_loaded:
126 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
127 cls._lib_loaded = True
128 # initialize the SSL library
129 cls.lib.SSL_library_init()
130 # adds all ciphers/digests for EVP
131 cls.lib.OpenSSL_add_all_algorithms()
132 # loads error strings for libcrypto and libssl functions
133 cls.lib.SSL_load_error_strings()
134 cls._register_osrandom_engine()
135
136 @classmethod
137 def init_static_locks(cls):
138 with cls._lock_init_lock:
139 cls._ensure_ffi_initialized()
140
141 if not cls._lock_cb_handle:
142 cls._lock_cb_handle = cls.ffi.callback(
143 "void(int, int, const char *, int)",
144 cls._lock_cb
145 )
146
147 # Use Python's implementation if available, importing _ssl triggers
148 # the setup for this.
149 __import__("_ssl")
150
151 if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:
152 return
153
154 # If nothing else has setup a locking callback already, we set up
155 # our own
156 num_locks = cls.lib.CRYPTO_num_locks()
157 cls._locks = [threading.Lock() for n in range(num_locks)]
158
159 cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
160
161 @classmethod
162 def _lock_cb(cls, mode, n, file, line):
163 lock = cls._locks[n]
164
165 if mode & cls.lib.CRYPTO_LOCK:
166 lock.acquire()
167 elif mode & cls.lib.CRYPTO_UNLOCK:
168 lock.release()
169 else:
170 raise RuntimeError(
171 "Unknown lock mode {0}: lock={1}, file={2}, line={3}.".format(
172 mode, n, file, line
173 )
174 )
175
176
177 # OpenSSL is not thread safe until the locks are initialized. We call this
178 # method in module scope so that it executes with the import lock. On
179 # Pythons < 3.4 this import lock is a global lock, which can prevent a race
180 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
181 # is per module so this approach will not work.
182 Binding.init_static_locks()
183
[end of src/cryptography/hazmat/bindings/openssl/binding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py
--- a/src/cryptography/hazmat/bindings/openssl/binding.py
+++ b/src/cryptography/hazmat/bindings/openssl/binding.py
@@ -8,6 +8,7 @@
import os
import threading
import types
+import warnings
from cryptography.exceptions import InternalError
from cryptography.hazmat.bindings._openssl import ffi, lib
@@ -180,3 +181,11 @@
# condition registering the OpenSSL locks. On Python 3.4+ the import lock
# is per module so this approach will not work.
Binding.init_static_locks()
+
+if Binding.lib.SSLeay() < 0x10001000:
+ warnings.warn(
+ "OpenSSL versions less than 1.0.1 are no longer supported by the "
+ "OpenSSL project, please upgrade. A future version of cryptography "
+ "will drop support for these versions.",
+ DeprecationWarning
+ )
|
{"golden_diff": "diff --git a/src/cryptography/hazmat/bindings/openssl/binding.py b/src/cryptography/hazmat/bindings/openssl/binding.py\n--- a/src/cryptography/hazmat/bindings/openssl/binding.py\n+++ b/src/cryptography/hazmat/bindings/openssl/binding.py\n@@ -8,6 +8,7 @@\n import os\n import threading\n import types\n+import warnings\n \n from cryptography.exceptions import InternalError\n from cryptography.hazmat.bindings._openssl import ffi, lib\n@@ -180,3 +181,11 @@\n # condition registering the OpenSSL locks. On Python 3.4+ the import lock\n # is per module so this approach will not work.\n Binding.init_static_locks()\n+\n+if Binding.lib.SSLeay() < 0x10001000:\n+ warnings.warn(\n+ \"OpenSSL versions less than 1.0.1 are no longer supported by the \"\n+ \"OpenSSL project, please upgrade. A future version of cryptography \"\n+ \"will drop support for these versions.\",\n+ DeprecationWarning\n+ )\n", "issue": "Warn on OpenSSL 0.9.8?\nStarting in 3.5 weeks OpenSSL 0.9.8 will officially be unsupported by the upstream team. It's unclear what this will mean for various downstreams (notable RHEL, CentOS, and OS X), but in practice it means there's likely to be a significantly decreased level of attention, research, and patching that goes into it.\n\nI'd like to suggest that, starting with whatever release comes after January 1st, 2016, we emit a warning if users are linked against OpenSSL 0.9.8, suggesting they upgrade to a newer OpenSSL (or OS I guess?).\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport collections\nimport os\nimport threading\nimport types\n\nfrom cryptography.exceptions import InternalError\nfrom cryptography.hazmat.bindings._openssl import ffi, lib\nfrom cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES\n\n\n_OpenSSLError = collections.namedtuple(\"_OpenSSLError\",\n [\"code\", \"lib\", \"func\", \"reason\"])\n\n\ndef _consume_errors(lib):\n errors = []\n while True:\n code = lib.ERR_get_error()\n if code == 0:\n break\n\n err_lib = lib.ERR_GET_LIB(code)\n err_func = lib.ERR_GET_FUNC(code)\n err_reason = lib.ERR_GET_REASON(code)\n\n errors.append(_OpenSSLError(code, err_lib, err_func, err_reason))\n return errors\n\n\ndef _openssl_assert(lib, ok):\n if not ok:\n errors = _consume_errors(lib)\n raise InternalError(\n \"Unknown OpenSSL error. Please file an issue at https://github.com\"\n \"/pyca/cryptography/issues with information on how to reproduce \"\n \"this. ({0!r})\".format(errors),\n errors\n )\n\n\[email protected](\"int (*)(unsigned char *, int)\", error=-1)\ndef _osrandom_rand_bytes(buf, size):\n signed = ffi.cast(\"char *\", buf)\n result = os.urandom(size)\n signed[0:size] = result\n return 1\n\n\[email protected](\"int (*)(void)\")\ndef _osrandom_rand_status():\n return 1\n\n\ndef build_conditional_library(lib, conditional_names):\n conditional_lib = types.ModuleType(\"lib\")\n excluded_names = set()\n for condition, names in conditional_names.items():\n if not getattr(lib, condition):\n excluded_names |= set(names)\n\n for attr in dir(lib):\n if attr not in excluded_names:\n setattr(conditional_lib, attr, getattr(lib, attr))\n\n return conditional_lib\n\n\nclass Binding(object):\n \"\"\"\n OpenSSL API wrapper.\n \"\"\"\n lib = None\n ffi = ffi\n _lib_loaded = False\n _locks = None\n _lock_cb_handle = None\n _init_lock = threading.Lock()\n _lock_init_lock = threading.Lock()\n\n _osrandom_engine_id = ffi.new(\"const char[]\", b\"osrandom\")\n _osrandom_engine_name = ffi.new(\"const char[]\", b\"osrandom_engine\")\n _osrandom_method = ffi.new(\n \"RAND_METHOD *\",\n dict(bytes=_osrandom_rand_bytes, pseudorand=_osrandom_rand_bytes,\n status=_osrandom_rand_status)\n )\n\n def __init__(self):\n self._ensure_ffi_initialized()\n\n @classmethod\n def _register_osrandom_engine(cls):\n _openssl_assert(cls.lib, cls.lib.ERR_peek_error() == 0)\n\n engine = cls.lib.ENGINE_new()\n _openssl_assert(cls.lib, engine != cls.ffi.NULL)\n try:\n result = cls.lib.ENGINE_set_id(engine, cls._osrandom_engine_id)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_set_name(engine, cls._osrandom_engine_name)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_set_RAND(engine, cls._osrandom_method)\n _openssl_assert(cls.lib, result == 1)\n result = cls.lib.ENGINE_add(engine)\n if result != 1:\n errors = _consume_errors(cls.lib)\n _openssl_assert(\n cls.lib,\n errors[0].reason == cls.lib.ENGINE_R_CONFLICTING_ENGINE_ID\n )\n\n finally:\n result = cls.lib.ENGINE_free(engine)\n _openssl_assert(cls.lib, result == 1)\n\n @classmethod\n def _ensure_ffi_initialized(cls):\n with cls._init_lock:\n if not cls._lib_loaded:\n cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\n cls._lib_loaded = True\n # initialize the SSL library\n cls.lib.SSL_library_init()\n # adds all ciphers/digests for EVP\n cls.lib.OpenSSL_add_all_algorithms()\n # loads error strings for libcrypto and libssl functions\n cls.lib.SSL_load_error_strings()\n cls._register_osrandom_engine()\n\n @classmethod\n def init_static_locks(cls):\n with cls._lock_init_lock:\n cls._ensure_ffi_initialized()\n\n if not cls._lock_cb_handle:\n cls._lock_cb_handle = cls.ffi.callback(\n \"void(int, int, const char *, int)\",\n cls._lock_cb\n )\n\n # Use Python's implementation if available, importing _ssl triggers\n # the setup for this.\n __import__(\"_ssl\")\n\n if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL:\n return\n\n # If nothing else has setup a locking callback already, we set up\n # our own\n num_locks = cls.lib.CRYPTO_num_locks()\n cls._locks = [threading.Lock() for n in range(num_locks)]\n\n cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)\n\n @classmethod\n def _lock_cb(cls, mode, n, file, line):\n lock = cls._locks[n]\n\n if mode & cls.lib.CRYPTO_LOCK:\n lock.acquire()\n elif mode & cls.lib.CRYPTO_UNLOCK:\n lock.release()\n else:\n raise RuntimeError(\n \"Unknown lock mode {0}: lock={1}, file={2}, line={3}.\".format(\n mode, n, file, line\n )\n )\n\n\n# OpenSSL is not thread safe until the locks are initialized. We call this\n# method in module scope so that it executes with the import lock. On\n# Pythons < 3.4 this import lock is a global lock, which can prevent a race\n# condition registering the OpenSSL locks. On Python 3.4+ the import lock\n# is per module so this approach will not work.\nBinding.init_static_locks()\n", "path": "src/cryptography/hazmat/bindings/openssl/binding.py"}]}
| 2,490 | 243 |
gh_patches_debug_31113
|
rasdani/github-patches
|
git_diff
|
pymeasure__pymeasure-867
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`VISAAdapter` still terminating on default term character in `read_bytes(-1)`
Pretty odd and specific issue, not sure if this belong here or on PyVisa.
When I try to read the complete buffer in a serial connection using the `VISAAdapter`, it still breaks on the byte corresponding to `\n`:
```
def __init__(self, adapter, name="Velleman K8090", timeout=1000, **kwargs):
super().__init__(
adapter,
name=name,
asrl={"baud_rate": 19200},
write_termination="",
read_termination=chr(0x0F),
timeout=timeout,
**kwargs,
)
# ...
def read(self):
response = self.read_bytes(-1)
# `response` will end with "\n", even though there are more bytes in the buffer!
```
Encountered in #859 .
It seems the issue is two fold: in this code any termchar should be ignored and it's even responding to the wrong termchar.
This is with the `pyvisa-py` backend.
</issue>
<code>
[start of pymeasure/adapters/visa.py]
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2023 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import logging
26 from warnings import warn
27
28 import pyvisa
29 import numpy as np
30
31 from .adapter import Adapter
32 from .protocol import ProtocolAdapter
33
34 log = logging.getLogger(__name__)
35 log.addHandler(logging.NullHandler())
36
37
38 # noinspection PyPep8Naming,PyUnresolvedReferences
39 class VISAAdapter(Adapter):
40 """ Adapter class for the VISA library, using PyVISA to communicate with instruments.
41
42 The workhorse of our library, used by most instruments.
43
44 :param resource_name: A
45 `VISA resource string <https://pyvisa.readthedocs.io/en/latest/introduction/names.html>`__
46 or GPIB address integer that identifies the target of the connection
47 :param visa_library: PyVISA VisaLibrary Instance, path of the VISA library or VisaLibrary spec
48 string (``@py`` or ``@ivi``). If not given, the default for the platform will be used.
49 :param preprocess_reply: An optional callable used to preprocess strings
50 received from the instrument. The callable returns the processed string.
51
52 .. deprecated:: 0.11
53 Implement it in the instrument's `read` method instead.
54
55 :param float query_delay: Time in s to wait after writing and before reading.
56
57 .. deprecated:: 0.11
58 Implement it in the instrument's `wait_for` method instead.
59
60 :param log: Parent logger of the 'Adapter' logger.
61 :param \\**kwargs: Keyword arguments for configuring the PyVISA connection.
62
63 :Kwargs:
64 Keyword arguments are used to configure the connection created by PyVISA. This is
65 complicated by the fact that *which* arguments are valid depends on the interface (e.g.
66 serial, GPIB, TCPI/IP, USB) determined by the current ``resource_name``.
67
68 A flexible process is used to easily define reasonable *default values* for
69 different instrument interfaces, but also enable the instrument user to *override any
70 setting* if their situation demands it.
71
72 A kwarg that names a pyVISA interface type (most commonly ``asrl``, ``gpib``, ``tcpip``, or
73 ``usb``) is a dictionary with keyword arguments defining defaults specific to that
74 interface. Example: ``asrl={'baud_rate': 4200}``.
75
76 All other kwargs are either generally valid (e.g. ``timeout=500``) or override any default
77 settings from the interface-specific entries above. For example, passing
78 ``baud_rate=115200`` when connecting via a resource name ``ASRL1`` would override a
79 default of 4200 defined as above.
80
81 See :ref:`connection_settings` for how to tweak settings when *connecting* to an instrument.
82 See :ref:`default_connection_settings` for how to best define default settings when
83 *implementing an instrument*.
84 """
85
86 def __init__(self, resource_name, visa_library='', preprocess_reply=None,
87 query_delay=0, log=None, **kwargs):
88 super().__init__(preprocess_reply=preprocess_reply, log=log)
89 if query_delay:
90 warn(("Parameter `query_delay` is deprecated. "
91 "Implement in Instrument's `wait_for` instead."),
92 FutureWarning)
93 kwargs.setdefault("query_delay", query_delay)
94 self.query_delay = query_delay
95 if isinstance(resource_name, ProtocolAdapter):
96 self.connection = resource_name
97 self.connection.write_raw = self.connection.write_bytes
98 self.read_bytes = self.connection.read_bytes
99 return
100 elif isinstance(resource_name, VISAAdapter):
101 # Allow to reuse the connection.
102 self.resource_name = getattr(resource_name, "resource_name", None)
103 self.connection = resource_name.connection
104 self.manager = resource_name.manager
105 self.query_delay = resource_name.query_delay
106 return
107 elif isinstance(resource_name, int):
108 resource_name = "GPIB0::%d::INSTR" % resource_name
109
110 self.resource_name = resource_name
111 self.manager = pyvisa.ResourceManager(visa_library)
112
113 # Clean up kwargs considering the interface type matching resource_name
114 if_type = self.manager.resource_info(self.resource_name).interface_type
115 for key in list(kwargs.keys()): # iterate over a copy of the keys as we modify kwargs
116 # Remove all interface-specific kwargs:
117 if key in pyvisa.constants.InterfaceType.__members__:
118 if getattr(pyvisa.constants.InterfaceType, key) is if_type:
119 # For the present interface, dump contents into kwargs first if they are not
120 # present already. This way, it is possible to override default values with
121 # kwargs passed to Instrument.__init__()
122 for k, v in kwargs[key].items():
123 kwargs.setdefault(k, v)
124 del kwargs[key]
125
126 self.connection = self.manager.open_resource(
127 resource_name,
128 **kwargs
129 )
130
131 def close(self):
132 """Close the connection.
133
134 .. note::
135
136 This closes the connection to the resource for all adapters using
137 it currently (e.g. different adapters using the same GPIB line).
138 """
139 super().close()
140 try:
141 self.manager.close()
142 except AttributeError:
143 pass # Closed from another adapter using the same connection.
144
145 def _write(self, command, **kwargs):
146 """Write a string command to the instrument appending `write_termination`.
147
148 :param str command: Command string to be sent to the instrument
149 (without termination).
150 :param \\**kwargs: Keyword arguments for the connection itself.
151 """
152 self.connection.write(command, **kwargs)
153
154 def _write_bytes(self, content, **kwargs):
155 """Write the bytes `content` to the instrument.
156
157 :param bytes content: The bytes to write to the instrument.
158 :param \\**kwargs: Keyword arguments for the connection itself.
159 """
160 self.connection.write_raw(content, **kwargs)
161
162 def _read(self, **kwargs):
163 """Read up to (excluding) `read_termination` or the whole read buffer.
164
165 :param \\**kwargs: Keyword arguments for the connection itself.
166 :returns str: ASCII response of the instrument (excluding read_termination).
167 """
168 return self.connection.read(**kwargs)
169
170 def _read_bytes(self, count, break_on_termchar=False, **kwargs):
171 """Read a certain number of bytes from the instrument.
172
173 :param int count: Number of bytes to read. A value of -1 indicates to
174 read from the whole read buffer.
175 :param bool break_on_termchar: Stop reading at a termination character.
176 :param \\**kwargs: Keyword arguments for the connection itself.
177 :returns bytes: Bytes response of the instrument (including termination).
178 """
179 if count >= 0:
180 return self.connection.read_bytes(count, break_on_termchar=break_on_termchar, **kwargs)
181 elif break_on_termchar:
182 return self.connection.read_raw(None, **kwargs)
183 else:
184 read_termination = self.connection.read_termination
185 self.connection.read_termination = None
186 # Try except allows to set the read_termination even after an error.
187 try:
188 return self.connection.read_raw(**kwargs)
189 finally:
190 self.connection.read_termination = read_termination
191
192 def ask(self, command):
193 """ Writes the command to the instrument and returns the resulting
194 ASCII response
195
196 .. deprecated:: 0.11
197 Call `Instrument.ask` instead.
198
199 :param command: SCPI command string to be sent to the instrument
200 :returns: String ASCII response of the instrument
201 """
202 warn("`Adapter.ask` is deprecated, call `Instrument.ask` instead.", FutureWarning)
203 return self.connection.query(command)
204
205 def ask_values(self, command, **kwargs):
206 """ Writes a command to the instrument and returns a list of formatted
207 values from the result. This leverages the `query_ascii_values` method
208 in PyVISA.
209
210 .. deprecated:: 0.11
211 Call `Instrument.values` instead.
212
213 :param command: SCPI command to be sent to the instrument
214 :param \\**kwargs: Key-word arguments to pass onto `query_ascii_values`
215 :returns: Formatted response of the instrument.
216 """
217 warn("`Adapter.ask_values` is deprecated, call `Instrument.values` instead.",
218 FutureWarning)
219
220 return self.connection.query_ascii_values(command, **kwargs)
221
222 def binary_values(self, command, header_bytes=0, dtype=np.float32):
223 """ Returns a numpy array from a query for binary data
224
225 .. deprecated:: 0.11
226 Call `Instrument.binary_values` instead.
227
228 :param command: SCPI command to be sent to the instrument
229 :param header_bytes: Integer number of bytes to ignore in header
230 :param dtype: The NumPy data type to format the values with
231 :returns: NumPy array of values
232 """
233 warn("`Adapter.binary_values` is deprecated, call `Instrument.binary_values` instead.",
234 FutureWarning)
235 self.connection.write(command)
236 binary = self.connection.read_raw()
237 # header = binary[:header_bytes]
238 data = binary[header_bytes:]
239 return np.fromstring(data, dtype=dtype)
240
241 def wait_for_srq(self, timeout=25, delay=0.1):
242 """ Block until a SRQ, and leave the bit high
243
244 :param timeout: Timeout duration in seconds
245 :param delay: Time delay between checking SRQ in seconds
246 """
247 self.connection.wait_for_srq(timeout * 1000)
248
249 def flush_read_buffer(self):
250 """ Flush and discard the input buffer
251
252 As detailed by pyvisa, discard the read buffer contents and if data was present
253 in the read buffer and no END-indicator was present, read from the device until
254 encountering an END indicator (which causes loss of data).
255 """
256 try:
257 self.connection.flush(pyvisa.constants.BufferOperation.discard_read_buffer)
258 except NotImplementedError:
259 # NotImplementedError is raised when using resource types other than `asrl`
260 # in conjunction with pyvisa-py.
261 # Upstream issue: https://github.com/pyvisa/pyvisa-py/issues/348
262 # fake discarding the read buffer by reading all available messages.
263 timeout = self.connection.timeout
264 self.connection.timeout = 0
265 try:
266 self.read_bytes(-1)
267 except pyvisa.errors.VisaIOError:
268 pass
269 finally:
270 self.connection.timeout = timeout
271
272 def __repr__(self):
273 return "<VISAAdapter(resource='%s')>" % self.connection.resource_name
274
[end of pymeasure/adapters/visa.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pymeasure/adapters/visa.py b/pymeasure/adapters/visa.py
--- a/pymeasure/adapters/visa.py
+++ b/pymeasure/adapters/visa.py
@@ -171,7 +171,7 @@
"""Read a certain number of bytes from the instrument.
:param int count: Number of bytes to read. A value of -1 indicates to
- read from the whole read buffer.
+ read from the whole read buffer until timeout.
:param bool break_on_termchar: Stop reading at a termination character.
:param \\**kwargs: Keyword arguments for the connection itself.
:returns bytes: Bytes response of the instrument (including termination).
@@ -181,13 +181,17 @@
elif break_on_termchar:
return self.connection.read_raw(None, **kwargs)
else:
- read_termination = self.connection.read_termination
- self.connection.read_termination = None
- # Try except allows to set the read_termination even after an error.
- try:
- return self.connection.read_raw(**kwargs)
- finally:
- self.connection.read_termination = read_termination
+ # pyvisa's `read_raw` reads until newline, if no termination_character defined
+ # and if not configured to stop at a termination lane etc.
+ # see https://github.com/pyvisa/pyvisa/issues/728
+ result = bytearray()
+ while True:
+ try:
+ result.extend(self.connection.read_bytes(1))
+ except pyvisa.errors.VisaIOError as exc:
+ if exc.error_code == pyvisa.constants.StatusCode.error_timeout:
+ return bytes(result)
+ raise
def ask(self, command):
""" Writes the command to the instrument and returns the resulting
|
{"golden_diff": "diff --git a/pymeasure/adapters/visa.py b/pymeasure/adapters/visa.py\n--- a/pymeasure/adapters/visa.py\n+++ b/pymeasure/adapters/visa.py\n@@ -171,7 +171,7 @@\n \"\"\"Read a certain number of bytes from the instrument.\n \n :param int count: Number of bytes to read. A value of -1 indicates to\n- read from the whole read buffer.\n+ read from the whole read buffer until timeout.\n :param bool break_on_termchar: Stop reading at a termination character.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns bytes: Bytes response of the instrument (including termination).\n@@ -181,13 +181,17 @@\n elif break_on_termchar:\n return self.connection.read_raw(None, **kwargs)\n else:\n- read_termination = self.connection.read_termination\n- self.connection.read_termination = None\n- # Try except allows to set the read_termination even after an error.\n- try:\n- return self.connection.read_raw(**kwargs)\n- finally:\n- self.connection.read_termination = read_termination\n+ # pyvisa's `read_raw` reads until newline, if no termination_character defined\n+ # and if not configured to stop at a termination lane etc.\n+ # see https://github.com/pyvisa/pyvisa/issues/728\n+ result = bytearray()\n+ while True:\n+ try:\n+ result.extend(self.connection.read_bytes(1))\n+ except pyvisa.errors.VisaIOError as exc:\n+ if exc.error_code == pyvisa.constants.StatusCode.error_timeout:\n+ return bytes(result)\n+ raise\n \n def ask(self, command):\n \"\"\" Writes the command to the instrument and returns the resulting\n", "issue": "`VISAAdapter` still terminating on default term character in `read_bytes(-1)`\nPretty odd and specific issue, not sure if this belong here or on PyVisa.\r\n\r\nWhen I try to read the complete buffer in a serial connection using the `VISAAdapter`, it still breaks on the byte corresponding to `\\n`:\r\n\r\n```\r\n def __init__(self, adapter, name=\"Velleman K8090\", timeout=1000, **kwargs):\r\n super().__init__(\r\n adapter,\r\n name=name,\r\n asrl={\"baud_rate\": 19200},\r\n write_termination=\"\",\r\n read_termination=chr(0x0F),\r\n timeout=timeout,\r\n **kwargs,\r\n )\r\n \r\n # ...\r\n\r\n def read(self):\r\n response = self.read_bytes(-1)\r\n\r\n # `response` will end with \"\\n\", even though there are more bytes in the buffer!\r\n```\r\n\r\nEncountered in #859 .\r\n\r\nIt seems the issue is two fold: in this code any termchar should be ignored and it's even responding to the wrong termchar.\r\n\r\nThis is with the `pyvisa-py` backend.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\nfrom warnings import warn\n\nimport pyvisa\nimport numpy as np\n\nfrom .adapter import Adapter\nfrom .protocol import ProtocolAdapter\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\n# noinspection PyPep8Naming,PyUnresolvedReferences\nclass VISAAdapter(Adapter):\n \"\"\" Adapter class for the VISA library, using PyVISA to communicate with instruments.\n\n The workhorse of our library, used by most instruments.\n\n :param resource_name: A\n `VISA resource string <https://pyvisa.readthedocs.io/en/latest/introduction/names.html>`__\n or GPIB address integer that identifies the target of the connection\n :param visa_library: PyVISA VisaLibrary Instance, path of the VISA library or VisaLibrary spec\n string (``@py`` or ``@ivi``). If not given, the default for the platform will be used.\n :param preprocess_reply: An optional callable used to preprocess strings\n received from the instrument. The callable returns the processed string.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `read` method instead.\n\n :param float query_delay: Time in s to wait after writing and before reading.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `wait_for` method instead.\n\n :param log: Parent logger of the 'Adapter' logger.\n :param \\\\**kwargs: Keyword arguments for configuring the PyVISA connection.\n\n :Kwargs:\n Keyword arguments are used to configure the connection created by PyVISA. This is\n complicated by the fact that *which* arguments are valid depends on the interface (e.g.\n serial, GPIB, TCPI/IP, USB) determined by the current ``resource_name``.\n\n A flexible process is used to easily define reasonable *default values* for\n different instrument interfaces, but also enable the instrument user to *override any\n setting* if their situation demands it.\n\n A kwarg that names a pyVISA interface type (most commonly ``asrl``, ``gpib``, ``tcpip``, or\n ``usb``) is a dictionary with keyword arguments defining defaults specific to that\n interface. Example: ``asrl={'baud_rate': 4200}``.\n\n All other kwargs are either generally valid (e.g. ``timeout=500``) or override any default\n settings from the interface-specific entries above. For example, passing\n ``baud_rate=115200`` when connecting via a resource name ``ASRL1`` would override a\n default of 4200 defined as above.\n\n See :ref:`connection_settings` for how to tweak settings when *connecting* to an instrument.\n See :ref:`default_connection_settings` for how to best define default settings when\n *implementing an instrument*.\n \"\"\"\n\n def __init__(self, resource_name, visa_library='', preprocess_reply=None,\n query_delay=0, log=None, **kwargs):\n super().__init__(preprocess_reply=preprocess_reply, log=log)\n if query_delay:\n warn((\"Parameter `query_delay` is deprecated. \"\n \"Implement in Instrument's `wait_for` instead.\"),\n FutureWarning)\n kwargs.setdefault(\"query_delay\", query_delay)\n self.query_delay = query_delay\n if isinstance(resource_name, ProtocolAdapter):\n self.connection = resource_name\n self.connection.write_raw = self.connection.write_bytes\n self.read_bytes = self.connection.read_bytes\n return\n elif isinstance(resource_name, VISAAdapter):\n # Allow to reuse the connection.\n self.resource_name = getattr(resource_name, \"resource_name\", None)\n self.connection = resource_name.connection\n self.manager = resource_name.manager\n self.query_delay = resource_name.query_delay\n return\n elif isinstance(resource_name, int):\n resource_name = \"GPIB0::%d::INSTR\" % resource_name\n\n self.resource_name = resource_name\n self.manager = pyvisa.ResourceManager(visa_library)\n\n # Clean up kwargs considering the interface type matching resource_name\n if_type = self.manager.resource_info(self.resource_name).interface_type\n for key in list(kwargs.keys()): # iterate over a copy of the keys as we modify kwargs\n # Remove all interface-specific kwargs:\n if key in pyvisa.constants.InterfaceType.__members__:\n if getattr(pyvisa.constants.InterfaceType, key) is if_type:\n # For the present interface, dump contents into kwargs first if they are not\n # present already. This way, it is possible to override default values with\n # kwargs passed to Instrument.__init__()\n for k, v in kwargs[key].items():\n kwargs.setdefault(k, v)\n del kwargs[key]\n\n self.connection = self.manager.open_resource(\n resource_name,\n **kwargs\n )\n\n def close(self):\n \"\"\"Close the connection.\n\n .. note::\n\n This closes the connection to the resource for all adapters using\n it currently (e.g. different adapters using the same GPIB line).\n \"\"\"\n super().close()\n try:\n self.manager.close()\n except AttributeError:\n pass # Closed from another adapter using the same connection.\n\n def _write(self, command, **kwargs):\n \"\"\"Write a string command to the instrument appending `write_termination`.\n\n :param str command: Command string to be sent to the instrument\n (without termination).\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n \"\"\"\n self.connection.write(command, **kwargs)\n\n def _write_bytes(self, content, **kwargs):\n \"\"\"Write the bytes `content` to the instrument.\n\n :param bytes content: The bytes to write to the instrument.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n \"\"\"\n self.connection.write_raw(content, **kwargs)\n\n def _read(self, **kwargs):\n \"\"\"Read up to (excluding) `read_termination` or the whole read buffer.\n\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns str: ASCII response of the instrument (excluding read_termination).\n \"\"\"\n return self.connection.read(**kwargs)\n\n def _read_bytes(self, count, break_on_termchar=False, **kwargs):\n \"\"\"Read a certain number of bytes from the instrument.\n\n :param int count: Number of bytes to read. A value of -1 indicates to\n read from the whole read buffer.\n :param bool break_on_termchar: Stop reading at a termination character.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns bytes: Bytes response of the instrument (including termination).\n \"\"\"\n if count >= 0:\n return self.connection.read_bytes(count, break_on_termchar=break_on_termchar, **kwargs)\n elif break_on_termchar:\n return self.connection.read_raw(None, **kwargs)\n else:\n read_termination = self.connection.read_termination\n self.connection.read_termination = None\n # Try except allows to set the read_termination even after an error.\n try:\n return self.connection.read_raw(**kwargs)\n finally:\n self.connection.read_termination = read_termination\n\n def ask(self, command):\n \"\"\" Writes the command to the instrument and returns the resulting\n ASCII response\n\n .. deprecated:: 0.11\n Call `Instrument.ask` instead.\n\n :param command: SCPI command string to be sent to the instrument\n :returns: String ASCII response of the instrument\n \"\"\"\n warn(\"`Adapter.ask` is deprecated, call `Instrument.ask` instead.\", FutureWarning)\n return self.connection.query(command)\n\n def ask_values(self, command, **kwargs):\n \"\"\" Writes a command to the instrument and returns a list of formatted\n values from the result. This leverages the `query_ascii_values` method\n in PyVISA.\n\n .. deprecated:: 0.11\n Call `Instrument.values` instead.\n\n :param command: SCPI command to be sent to the instrument\n :param \\\\**kwargs: Key-word arguments to pass onto `query_ascii_values`\n :returns: Formatted response of the instrument.\n \"\"\"\n warn(\"`Adapter.ask_values` is deprecated, call `Instrument.values` instead.\",\n FutureWarning)\n\n return self.connection.query_ascii_values(command, **kwargs)\n\n def binary_values(self, command, header_bytes=0, dtype=np.float32):\n \"\"\" Returns a numpy array from a query for binary data\n\n .. deprecated:: 0.11\n Call `Instrument.binary_values` instead.\n\n :param command: SCPI command to be sent to the instrument\n :param header_bytes: Integer number of bytes to ignore in header\n :param dtype: The NumPy data type to format the values with\n :returns: NumPy array of values\n \"\"\"\n warn(\"`Adapter.binary_values` is deprecated, call `Instrument.binary_values` instead.\",\n FutureWarning)\n self.connection.write(command)\n binary = self.connection.read_raw()\n # header = binary[:header_bytes]\n data = binary[header_bytes:]\n return np.fromstring(data, dtype=dtype)\n\n def wait_for_srq(self, timeout=25, delay=0.1):\n \"\"\" Block until a SRQ, and leave the bit high\n\n :param timeout: Timeout duration in seconds\n :param delay: Time delay between checking SRQ in seconds\n \"\"\"\n self.connection.wait_for_srq(timeout * 1000)\n\n def flush_read_buffer(self):\n \"\"\" Flush and discard the input buffer\n\n As detailed by pyvisa, discard the read buffer contents and if data was present\n in the read buffer and no END-indicator was present, read from the device until\n encountering an END indicator (which causes loss of data).\n \"\"\"\n try:\n self.connection.flush(pyvisa.constants.BufferOperation.discard_read_buffer)\n except NotImplementedError:\n # NotImplementedError is raised when using resource types other than `asrl`\n # in conjunction with pyvisa-py.\n # Upstream issue: https://github.com/pyvisa/pyvisa-py/issues/348\n # fake discarding the read buffer by reading all available messages.\n timeout = self.connection.timeout\n self.connection.timeout = 0\n try:\n self.read_bytes(-1)\n except pyvisa.errors.VisaIOError:\n pass\n finally:\n self.connection.timeout = timeout\n\n def __repr__(self):\n return \"<VISAAdapter(resource='%s')>\" % self.connection.resource_name\n", "path": "pymeasure/adapters/visa.py"}]}
| 4,039 | 398 |
gh_patches_debug_487
|
rasdani/github-patches
|
git_diff
|
hylang__hy-343
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Translate foo? -> is_foo
Andddd discuss
</issue>
<code>
[start of hy/lex/parser.py]
1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a
4 # copy of this software and associated documentation files (the "Software"),
5 # to deal in the Software without restriction, including without limitation
6 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 # and/or sell copies of the Software, and to permit persons to whom the
8 # Software is furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20
21 import sys
22 from functools import wraps
23
24 from rply import ParserGenerator
25
26 from hy.models.complex import HyComplex
27 from hy.models.dict import HyDict
28 from hy.models.expression import HyExpression
29 from hy.models.float import HyFloat
30 from hy.models.integer import HyInteger
31 from hy.models.keyword import HyKeyword
32 from hy.models.lambdalist import HyLambdaListKeyword
33 from hy.models.list import HyList
34 from hy.models.string import HyString
35 from hy.models.symbol import HySymbol
36
37 from .lexer import lexer
38 from .exceptions import LexException, PrematureEndOfInput
39
40
41 pg = ParserGenerator(
42 [rule.name for rule in lexer.rules] + ['$end'],
43 cache_id="hy_parser"
44 )
45
46
47 def set_boundaries(fun):
48 @wraps(fun)
49 def wrapped(p):
50 start = p[0].source_pos
51 end = p[-1].source_pos
52 ret = fun(p)
53 ret.start_line = start.lineno
54 ret.start_column = start.colno
55 if start is not end:
56 ret.end_line = end.lineno
57 ret.end_column = end.colno
58 else:
59 ret.end_line = start.lineno
60 ret.end_column = start.colno + len(p[0].value)
61 return ret
62 return wrapped
63
64
65 def set_quote_boundaries(fun):
66 @wraps(fun)
67 def wrapped(p):
68 start = p[0].source_pos
69 ret = fun(p)
70 ret.start_line = start.lineno
71 ret.start_column = start.colno
72 ret.end_line = p[-1].end_line
73 ret.end_column = p[-1].end_column
74 return ret
75 return wrapped
76
77
78 @pg.production("main : HASHBANG real_main")
79 def main_hashbang(p):
80 return p[1]
81
82
83 @pg.production("main : real_main")
84 def main(p):
85 return p[0]
86
87
88 @pg.production("real_main : list_contents")
89 def real_main(p):
90 return p[0]
91
92
93 @pg.production("real_main : $end")
94 def real_main_empty(p):
95 return []
96
97
98 @pg.production("paren : LPAREN list_contents RPAREN")
99 @set_boundaries
100 def paren(p):
101 return HyExpression(p[1])
102
103
104 @pg.production("paren : LPAREN RPAREN")
105 @set_boundaries
106 def empty_paren(p):
107 return HyExpression([])
108
109
110 @pg.production("list_contents : term list_contents")
111 def list_contents(p):
112 return [p[0]] + p[1]
113
114
115 @pg.production("list_contents : term")
116 def list_contents_single(p):
117 return [p[0]]
118
119
120 @pg.production("term : identifier")
121 @pg.production("term : paren")
122 @pg.production("term : dict")
123 @pg.production("term : list")
124 @pg.production("term : string")
125 def term(p):
126 return p[0]
127
128
129 @pg.production("term : QUOTE term")
130 @set_quote_boundaries
131 def term_quote(p):
132 return HyExpression([HySymbol("quote"), p[1]])
133
134
135 @pg.production("term : QUASIQUOTE term")
136 @set_quote_boundaries
137 def term_quasiquote(p):
138 return HyExpression([HySymbol("quasiquote"), p[1]])
139
140
141 @pg.production("term : UNQUOTE term")
142 @set_quote_boundaries
143 def term_unquote(p):
144 return HyExpression([HySymbol("unquote"), p[1]])
145
146
147 @pg.production("term : UNQUOTESPLICE term")
148 @set_quote_boundaries
149 def term_unquote_splice(p):
150 return HyExpression([HySymbol("unquote_splice"), p[1]])
151
152
153 @pg.production("dict : LCURLY list_contents RCURLY")
154 @set_boundaries
155 def t_dict(p):
156 return HyDict(p[1])
157
158
159 @pg.production("dict : LCURLY RCURLY")
160 @set_boundaries
161 def empty_dict(p):
162 return HyDict([])
163
164
165 @pg.production("list : LBRACKET list_contents RBRACKET")
166 @set_boundaries
167 def t_list(p):
168 return HyList(p[1])
169
170
171 @pg.production("list : LBRACKET RBRACKET")
172 @set_boundaries
173 def t_empty_list(p):
174 return HyList([])
175
176
177 if sys.version_info[0] >= 3:
178 def uni_hystring(s):
179 return HyString(eval(s))
180 else:
181 def uni_hystring(s):
182 return HyString(eval('u'+s))
183
184
185 @pg.production("string : STRING")
186 @set_boundaries
187 def t_string(p):
188 # remove trailing quote
189 s = p[0].value[:-1]
190 # get the header
191 header, s = s.split('"', 1)
192 # remove unicode marker
193 header = header.replace("u", "")
194 # build python string
195 s = header + '"""' + s + '"""'
196 return uni_hystring(s)
197
198
199 @pg.production("identifier : IDENTIFIER")
200 @set_boundaries
201 def t_identifier(p):
202 obj = p[0].value
203
204 try:
205 return HyInteger(obj)
206 except ValueError:
207 pass
208
209 try:
210 return HyFloat(obj)
211 except ValueError:
212 pass
213
214 if obj != 'j':
215 try:
216 return HyComplex(obj)
217 except ValueError:
218 pass
219
220 table = {
221 "true": "True",
222 "false": "False",
223 "null": "None",
224 }
225
226 if obj in table:
227 return HySymbol(table[obj])
228
229 if obj.startswith(":"):
230 return HyKeyword(obj)
231
232 if obj.startswith("&"):
233 return HyLambdaListKeyword(obj)
234
235 if obj.startswith("*") and obj.endswith("*") and obj not in ("*", "**"):
236 obj = obj[1:-1].upper()
237
238 if "-" in obj and obj != "-":
239 obj = obj.replace("-", "_")
240
241 return HySymbol(obj)
242
243
244 @pg.error
245 def error_handler(token):
246 tokentype = token.gettokentype()
247 if tokentype == '$end':
248 raise PrematureEndOfInput
249 else:
250 raise LexException(
251 "Ran into a %s where it wasn't expected at line %s, column %s" %
252 (tokentype, token.source_pos.lineno, token.source_pos.colno)
253 )
254
255
256 parser = pg.build()
257
[end of hy/lex/parser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hy/lex/parser.py b/hy/lex/parser.py
--- a/hy/lex/parser.py
+++ b/hy/lex/parser.py
@@ -238,6 +238,9 @@
if "-" in obj and obj != "-":
obj = obj.replace("-", "_")
+ if obj.endswith("?") and obj != "?":
+ obj = "is_%s" % (obj[:-1])
+
return HySymbol(obj)
|
{"golden_diff": "diff --git a/hy/lex/parser.py b/hy/lex/parser.py\n--- a/hy/lex/parser.py\n+++ b/hy/lex/parser.py\n@@ -238,6 +238,9 @@\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n \n+ if obj.endswith(\"?\") and obj != \"?\":\n+ obj = \"is_%s\" % (obj[:-1])\n+\n return HySymbol(obj)\n", "issue": "Translate foo? -> is_foo \nAndddd discuss \n\n", "before_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport sys\nfrom functools import wraps\n\nfrom rply import ParserGenerator\n\nfrom hy.models.complex import HyComplex\nfrom hy.models.dict import HyDict\nfrom hy.models.expression import HyExpression\nfrom hy.models.float import HyFloat\nfrom hy.models.integer import HyInteger\nfrom hy.models.keyword import HyKeyword\nfrom hy.models.lambdalist import HyLambdaListKeyword\nfrom hy.models.list import HyList\nfrom hy.models.string import HyString\nfrom hy.models.symbol import HySymbol\n\nfrom .lexer import lexer\nfrom .exceptions import LexException, PrematureEndOfInput\n\n\npg = ParserGenerator(\n [rule.name for rule in lexer.rules] + ['$end'],\n cache_id=\"hy_parser\"\n)\n\n\ndef set_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n end = p[-1].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n if start is not end:\n ret.end_line = end.lineno\n ret.end_column = end.colno\n else:\n ret.end_line = start.lineno\n ret.end_column = start.colno + len(p[0].value)\n return ret\n return wrapped\n\n\ndef set_quote_boundaries(fun):\n @wraps(fun)\n def wrapped(p):\n start = p[0].source_pos\n ret = fun(p)\n ret.start_line = start.lineno\n ret.start_column = start.colno\n ret.end_line = p[-1].end_line\n ret.end_column = p[-1].end_column\n return ret\n return wrapped\n\n\[email protected](\"main : HASHBANG real_main\")\ndef main_hashbang(p):\n return p[1]\n\n\[email protected](\"main : real_main\")\ndef main(p):\n return p[0]\n\n\[email protected](\"real_main : list_contents\")\ndef real_main(p):\n return p[0]\n\n\[email protected](\"real_main : $end\")\ndef real_main_empty(p):\n return []\n\n\[email protected](\"paren : LPAREN list_contents RPAREN\")\n@set_boundaries\ndef paren(p):\n return HyExpression(p[1])\n\n\[email protected](\"paren : LPAREN RPAREN\")\n@set_boundaries\ndef empty_paren(p):\n return HyExpression([])\n\n\[email protected](\"list_contents : term list_contents\")\ndef list_contents(p):\n return [p[0]] + p[1]\n\n\[email protected](\"list_contents : term\")\ndef list_contents_single(p):\n return [p[0]]\n\n\[email protected](\"term : identifier\")\[email protected](\"term : paren\")\[email protected](\"term : dict\")\[email protected](\"term : list\")\[email protected](\"term : string\")\ndef term(p):\n return p[0]\n\n\[email protected](\"term : QUOTE term\")\n@set_quote_boundaries\ndef term_quote(p):\n return HyExpression([HySymbol(\"quote\"), p[1]])\n\n\[email protected](\"term : QUASIQUOTE term\")\n@set_quote_boundaries\ndef term_quasiquote(p):\n return HyExpression([HySymbol(\"quasiquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTE term\")\n@set_quote_boundaries\ndef term_unquote(p):\n return HyExpression([HySymbol(\"unquote\"), p[1]])\n\n\[email protected](\"term : UNQUOTESPLICE term\")\n@set_quote_boundaries\ndef term_unquote_splice(p):\n return HyExpression([HySymbol(\"unquote_splice\"), p[1]])\n\n\[email protected](\"dict : LCURLY list_contents RCURLY\")\n@set_boundaries\ndef t_dict(p):\n return HyDict(p[1])\n\n\[email protected](\"dict : LCURLY RCURLY\")\n@set_boundaries\ndef empty_dict(p):\n return HyDict([])\n\n\[email protected](\"list : LBRACKET list_contents RBRACKET\")\n@set_boundaries\ndef t_list(p):\n return HyList(p[1])\n\n\[email protected](\"list : LBRACKET RBRACKET\")\n@set_boundaries\ndef t_empty_list(p):\n return HyList([])\n\n\nif sys.version_info[0] >= 3:\n def uni_hystring(s):\n return HyString(eval(s))\nelse:\n def uni_hystring(s):\n return HyString(eval('u'+s))\n\n\[email protected](\"string : STRING\")\n@set_boundaries\ndef t_string(p):\n # remove trailing quote\n s = p[0].value[:-1]\n # get the header\n header, s = s.split('\"', 1)\n # remove unicode marker\n header = header.replace(\"u\", \"\")\n # build python string\n s = header + '\"\"\"' + s + '\"\"\"'\n return uni_hystring(s)\n\n\[email protected](\"identifier : IDENTIFIER\")\n@set_boundaries\ndef t_identifier(p):\n obj = p[0].value\n\n try:\n return HyInteger(obj)\n except ValueError:\n pass\n\n try:\n return HyFloat(obj)\n except ValueError:\n pass\n\n if obj != 'j':\n try:\n return HyComplex(obj)\n except ValueError:\n pass\n\n table = {\n \"true\": \"True\",\n \"false\": \"False\",\n \"null\": \"None\",\n }\n\n if obj in table:\n return HySymbol(table[obj])\n\n if obj.startswith(\":\"):\n return HyKeyword(obj)\n\n if obj.startswith(\"&\"):\n return HyLambdaListKeyword(obj)\n\n if obj.startswith(\"*\") and obj.endswith(\"*\") and obj not in (\"*\", \"**\"):\n obj = obj[1:-1].upper()\n\n if \"-\" in obj and obj != \"-\":\n obj = obj.replace(\"-\", \"_\")\n\n return HySymbol(obj)\n\n\[email protected]\ndef error_handler(token):\n tokentype = token.gettokentype()\n if tokentype == '$end':\n raise PrematureEndOfInput\n else:\n raise LexException(\n \"Ran into a %s where it wasn't expected at line %s, column %s\" %\n (tokentype, token.source_pos.lineno, token.source_pos.colno)\n )\n\n\nparser = pg.build()\n", "path": "hy/lex/parser.py"}]}
| 2,831 | 103 |
gh_patches_debug_10936
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2934
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Docs] GitHub reference in changelog rendered as email
### Summary
In [the changelog for v59.0.0](https://setuptools.pypa.io/en/latest/history.html#v59-0-0), a `distutils` commit is referenced using `pypa/distutils@f1b0a2b` in the RST source. Although it should link to pypa/distutils@f1b0a2b (`https://github.com/pypa/distutils/commit/f1b0a2b`) as GitHub automatically does, it instead renders the URL as `mailto:pypa/distutils@f1b0a2b`, which is incorrect.
### OS / Environment
N/a. This is in the source code.
### Additional Information
I would solve this myself, but I don't know the best solution. I can see that some things like GitHub issues are automatically linked, but I'm not sure if it should be added to the linking code in [`docs/conf.py`](https://github.com/pypa/setuptools/blob/4b980ef4072a817aae0da3643d0fa70c30fcb6cf/docs/conf.py) or just manually made a link.
### Code of Conduct
- [X] I agree to follow the PSF Code of Conduct
</issue>
<code>
[start of docs/conf.py]
1 import os
2 import sys
3
4 extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']
5
6 master_doc = "index"
7
8 link_files = {
9 '../CHANGES.rst': dict(
10 using=dict(
11 BB='https://bitbucket.org',
12 GH='https://github.com',
13 ),
14 replace=[
15 dict(
16 pattern=r'(Issue )?#(?P<issue>\d+)',
17 url='{package_url}/issues/{issue}',
18 ),
19 dict(
20 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
21 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
22 ),
23 dict(
24 pattern=r'Distribute #(?P<distribute>\d+)',
25 url='{BB}/tarek/distribute/issue/{distribute}',
26 ),
27 dict(
28 pattern=r'Buildout #(?P<buildout>\d+)',
29 url='{GH}/buildout/buildout/issues/{buildout}',
30 ),
31 dict(
32 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
33 url='http://bugs.python.org/setuptools/issue{old_setuptools}',
34 ),
35 dict(
36 pattern=r'Jython #(?P<jython>\d+)',
37 url='http://bugs.jython.org/issue{jython}',
38 ),
39 dict(
40 pattern=r'(Python #|bpo-)(?P<python>\d+)',
41 url='http://bugs.python.org/issue{python}',
42 ),
43 dict(
44 pattern=r'Interop #(?P<interop>\d+)',
45 url='{GH}/pypa/interoperability-peps/issues/{interop}',
46 ),
47 dict(
48 pattern=r'Pip #(?P<pip>\d+)',
49 url='{GH}/pypa/pip/issues/{pip}',
50 ),
51 dict(
52 pattern=r'Packaging #(?P<packaging>\d+)',
53 url='{GH}/pypa/packaging/issues/{packaging}',
54 ),
55 dict(
56 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
57 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
58 ),
59 dict(
60 pattern=r'PEP[- ](?P<pep_number>\d+)',
61 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
62 ),
63 dict(
64 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
65 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
66 ),
67 dict(
68 pattern=r'pypa/distutils#(?P<distutils>\d+)',
69 url='{GH}/pypa/distutils/issues/{distutils}',
70 ),
71 dict(
72 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
73 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
74 ),
75 ],
76 ),
77 }
78
79 # Be strict about any broken references:
80 nitpicky = True
81
82 # Include Python intersphinx mapping to prevent failures
83 # jaraco/skeleton#51
84 extensions += ['sphinx.ext.intersphinx']
85 intersphinx_mapping = {
86 'python': ('https://docs.python.org/3', None),
87 }
88
89 intersphinx_mapping.update({
90 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)
91 })
92
93 # Add support for linking usernames
94 github_url = 'https://github.com'
95 github_sponsors_url = f'{github_url}/sponsors'
96 extlinks = {
97 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
98 }
99 extensions += ['sphinx.ext.extlinks']
100
101 # Ref: https://github.com/python-attrs/attrs/pull/571/files\
102 # #diff-85987f48f1258d9ee486e3191495582dR82
103 default_role = 'any'
104
105 # HTML theme
106 html_theme = 'furo'
107 html_logo = "images/logo.svg"
108
109 html_theme_options = {
110 "sidebar_hide_name": True,
111 "light_css_variables": {
112 "color-brand-primary": "#336790", # "blue"
113 "color-brand-content": "#336790",
114 },
115 "dark_css_variables": {
116 "color-brand-primary": "#E5B62F", # "yellow"
117 "color-brand-content": "#E5B62F",
118 },
119 }
120
121 # Add support for inline tabs
122 extensions += ['sphinx_inline_tabs']
123
124 # Support for distutils
125
126 # Ref: https://stackoverflow.com/a/30624034/595220
127 nitpick_ignore = [
128 ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs
129 ('envvar', 'DISTUTILS_DEBUG'), # undocumented
130 ('envvar', 'HOME'), # undocumented
131 ('envvar', 'PLAT'), # undocumented
132 ('py:attr', 'CCompiler.language_map'), # undocumented
133 ('py:attr', 'CCompiler.language_order'), # undocumented
134 ('py:class', 'distutils.dist.Distribution'), # undocumented
135 ('py:class', 'distutils.extension.Extension'), # undocumented
136 ('py:class', 'BorlandCCompiler'), # undocumented
137 ('py:class', 'CCompiler'), # undocumented
138 ('py:class', 'CygwinCCompiler'), # undocumented
139 ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented
140 ('py:class', 'FileList'), # undocumented
141 ('py:class', 'IShellLink'), # ref to MS docs
142 ('py:class', 'MSVCCompiler'), # undocumented
143 ('py:class', 'OptionDummy'), # undocumented
144 ('py:class', 'UnixCCompiler'), # undocumented
145 ('py:exc', 'CompileError'), # undocumented
146 ('py:exc', 'DistutilsExecError'), # undocumented
147 ('py:exc', 'DistutilsFileError'), # undocumented
148 ('py:exc', 'LibError'), # undocumented
149 ('py:exc', 'LinkError'), # undocumented
150 ('py:exc', 'PreprocessError'), # undocumented
151 ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented
152 # undocumented:
153 ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),
154 ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented
155 ('py:func', 'distutils.log.debug'), # undocumented
156 ('py:func', 'distutils.spawn.find_executable'), # undocumented
157 ('py:func', 'distutils.spawn.spawn'), # undocumented
158 # TODO: check https://docutils.rtfd.io in the future
159 ('py:mod', 'docutils'), # there's no Sphinx site documenting this
160 ]
161
162 # Allow linking objects on other Sphinx sites seamlessly:
163 intersphinx_mapping.update(
164 python=('https://docs.python.org/3', None),
165 python2=('https://docs.python.org/2', None),
166 )
167
168 # Add support for the unreleased "next-version" change notes
169 extensions += ['sphinxcontrib.towncrier']
170 # Extension needs a path from here to the towncrier config.
171 towncrier_draft_working_directory = '..'
172 # Avoid an empty section for unpublished changes.
173 towncrier_draft_include_empty = False
174
175 extensions += ['jaraco.tidelift']
176
177 # Add icons (aka "favicons") to documentation
178 sys.path.append(os.path.join(os.path.dirname(__file__), '_ext'))
179 extensions += ['_custom_icons']
180
181 # List of dicts with <link> HTML attributes
182 # as defined in https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link
183 # except that ``file`` gets replaced with the correct ``href``
184 icons = [
185 { # "Catch-all" goes first, otherwise some browsers will overwrite
186 "rel": "icon",
187 "type": "image/svg+xml",
188 "file": "images/logo-symbol-only.svg",
189 "sizes": "any"
190 },
191 { # Version with thicker strokes for better visibility at smaller sizes
192 "rel": "icon",
193 "type": "image/svg+xml",
194 "file": "images/favicon.svg",
195 "sizes": "16x16 24x24 32x32 48x48"
196 },
197 # rel="apple-touch-icon" does not support SVG yet
198 ]
199
200 intersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None
201
[end of docs/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -68,6 +68,10 @@
pattern=r'pypa/distutils#(?P<distutils>\d+)',
url='{GH}/pypa/distutils/issues/{distutils}',
),
+ dict(
+ pattern=r'pypa/distutils@(?P<distutils_commit>[\da-f]+)',
+ url='{GH}/pypa/distutils/commit/{distutils_commit}',
+ ),
dict(
pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -68,6 +68,10 @@\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n+ dict(\n+ pattern=r'pypa/distutils@(?P<distutils_commit>[\\da-f]+)',\n+ url='{GH}/pypa/distutils/commit/{distutils_commit}',\n+ ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n", "issue": "[Docs] GitHub reference in changelog rendered as email\n### Summary\n\nIn [the changelog for v59.0.0](https://setuptools.pypa.io/en/latest/history.html#v59-0-0), a `distutils` commit is referenced using `pypa/distutils@f1b0a2b` in the RST source. Although it should link to pypa/distutils@f1b0a2b (`https://github.com/pypa/distutils/commit/f1b0a2b`) as GitHub automatically does, it instead renders the URL as `mailto:pypa/distutils@f1b0a2b`, which is incorrect.\n\n### OS / Environment\n\nN/a. This is in the source code.\n\n### Additional Information\n\nI would solve this myself, but I don't know the best solution. I can see that some things like GitHub issues are automatically linked, but I'm not sure if it should be added to the linking code in [`docs/conf.py`](https://github.com/pypa/setuptools/blob/4b980ef4072a817aae0da3643d0fa70c30fcb6cf/docs/conf.py) or just manually made a link.\n\n### Code of Conduct\n\n- [X] I agree to follow the PSF Code of Conduct\n", "before_files": [{"content": "import os\nimport sys\n\nextensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Include Python intersphinx mapping to prevent failures\n# jaraco/skeleton#51\nextensions += ['sphinx.ext.intersphinx']\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n}\n\nintersphinx_mapping.update({\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n})\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# HTML theme\nhtml_theme = 'furo'\nhtml_logo = \"images/logo.svg\"\n\nhtml_theme_options = {\n \"sidebar_hide_name\": True,\n \"light_css_variables\": {\n \"color-brand-primary\": \"#336790\", # \"blue\"\n \"color-brand-content\": \"#336790\",\n },\n \"dark_css_variables\": {\n \"color-brand-primary\": \"#E5B62F\", # \"yellow\"\n \"color-brand-content\": \"#E5B62F\",\n },\n}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n\n# Support for distutils\n\n# Ref: https://stackoverflow.com/a/30624034/595220\nnitpick_ignore = [\n ('c:func', 'SHGetSpecialFolderPath'), # ref to MS docs\n ('envvar', 'DISTUTILS_DEBUG'), # undocumented\n ('envvar', 'HOME'), # undocumented\n ('envvar', 'PLAT'), # undocumented\n ('py:attr', 'CCompiler.language_map'), # undocumented\n ('py:attr', 'CCompiler.language_order'), # undocumented\n ('py:class', 'distutils.dist.Distribution'), # undocumented\n ('py:class', 'distutils.extension.Extension'), # undocumented\n ('py:class', 'BorlandCCompiler'), # undocumented\n ('py:class', 'CCompiler'), # undocumented\n ('py:class', 'CygwinCCompiler'), # undocumented\n ('py:class', 'distutils.dist.DistributionMetadata'), # undocumented\n ('py:class', 'FileList'), # undocumented\n ('py:class', 'IShellLink'), # ref to MS docs\n ('py:class', 'MSVCCompiler'), # undocumented\n ('py:class', 'OptionDummy'), # undocumented\n ('py:class', 'UnixCCompiler'), # undocumented\n ('py:exc', 'CompileError'), # undocumented\n ('py:exc', 'DistutilsExecError'), # undocumented\n ('py:exc', 'DistutilsFileError'), # undocumented\n ('py:exc', 'LibError'), # undocumented\n ('py:exc', 'LinkError'), # undocumented\n ('py:exc', 'PreprocessError'), # undocumented\n ('py:func', 'distutils.CCompiler.new_compiler'), # undocumented\n # undocumented:\n ('py:func', 'distutils.dist.DistributionMetadata.read_pkg_file'),\n ('py:func', 'distutils.file_util._copy_file_contents'), # undocumented\n ('py:func', 'distutils.log.debug'), # undocumented\n ('py:func', 'distutils.spawn.find_executable'), # undocumented\n ('py:func', 'distutils.spawn.spawn'), # undocumented\n # TODO: check https://docutils.rtfd.io in the future\n ('py:mod', 'docutils'), # there's no Sphinx site documenting this\n]\n\n# Allow linking objects on other Sphinx sites seamlessly:\nintersphinx_mapping.update(\n python=('https://docs.python.org/3', None),\n python2=('https://docs.python.org/2', None),\n)\n\n# Add support for the unreleased \"next-version\" change notes\nextensions += ['sphinxcontrib.towncrier']\n# Extension needs a path from here to the towncrier config.\ntowncrier_draft_working_directory = '..'\n# Avoid an empty section for unpublished changes.\ntowncrier_draft_include_empty = False\n\nextensions += ['jaraco.tidelift']\n\n# Add icons (aka \"favicons\") to documentation\nsys.path.append(os.path.join(os.path.dirname(__file__), '_ext'))\nextensions += ['_custom_icons']\n\n# List of dicts with <link> HTML attributes\n# as defined in https://developer.mozilla.org/en-US/docs/Web/HTML/Element/link\n# except that ``file`` gets replaced with the correct ``href``\nicons = [\n { # \"Catch-all\" goes first, otherwise some browsers will overwrite\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"file\": \"images/logo-symbol-only.svg\",\n \"sizes\": \"any\"\n },\n { # Version with thicker strokes for better visibility at smaller sizes\n \"rel\": \"icon\",\n \"type\": \"image/svg+xml\",\n \"file\": \"images/favicon.svg\",\n \"sizes\": \"16x16 24x24 32x32 48x48\"\n },\n # rel=\"apple-touch-icon\" does not support SVG yet\n]\n\nintersphinx_mapping['pip'] = 'https://pip.pypa.io/en/latest', None\n", "path": "docs/conf.py"}]}
| 3,263 | 179 |
gh_patches_debug_26167
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-969
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Indent JSON data while exporting it as Python code
I was testing out a web API and used the "Export flow as Python code" feature for the first time as user, and noticed an improvement.
Currently we just export the `flow.request.body` as is (independent of it's content type) but mitmproxy's interface is smart and renders different bodies differently (for eg. it indents JSON)
I think we could add this indent behaviour while exporting things as code too.
</issue>
<code>
[start of mitmproxy/flow_export.py]
1 import urllib
2 import netlib.http
3 from textwrap import dedent
4
5
6 def curl_command(flow):
7 data = "curl "
8
9 for k, v in flow.request.headers.fields:
10 data += "-H '%s:%s' " % (k, v)
11
12 if flow.request.method != "GET":
13 data += "-X %s " % flow.request.method
14
15 full_url = flow.request.scheme + "://" + flow.request.host + flow.request.path
16 data += "'%s'" % full_url
17
18 if flow.request.content:
19 data += " --data-binary '%s'" % flow.request.content
20
21 return data
22
23
24 def python_code(flow):
25 code = dedent("""
26 import requests
27
28 url = '{url}'
29 {headers}{params}{data}
30 response = requests.request(
31 method='{method}',
32 url=url,{args}
33 )
34
35 print(response.text)
36 """).strip()
37
38 components = map(lambda x: urllib.quote(x, safe=""), flow.request.path_components)
39 url = flow.request.scheme + "://" + flow.request.host + "/" + "/".join(components)
40
41 args = ""
42 headers = ""
43 if flow.request.headers:
44 lines = [" '%s': '%s',\n" % (k, v) for k, v in flow.request.headers.fields]
45 headers += "\nheaders = {\n%s}\n" % "".join(lines)
46 args += "\n headers=headers,"
47
48 params = ""
49 if flow.request.query:
50 lines = [" '%s': '%s',\n" % (k, v) for k, v in flow.request.query]
51 params = "\nparams = {\n%s}\n" % "".join(lines)
52 args += "\n params=params,"
53
54 data = ""
55 if flow.request.body:
56 data = "\ndata = '''%s'''\n" % flow.request.body
57 args += "\n data=data,"
58
59 code = code.format(
60 url=url,
61 headers=headers,
62 params=params,
63 data=data,
64 method=flow.request.method,
65 args=args,
66 )
67
68 return code
69
70
71 def raw_request(flow):
72 data = netlib.http.http1.assemble_request(flow.request)
73 return data
74
[end of mitmproxy/flow_export.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/flow_export.py b/mitmproxy/flow_export.py
--- a/mitmproxy/flow_export.py
+++ b/mitmproxy/flow_export.py
@@ -1,7 +1,10 @@
+import json
import urllib
-import netlib.http
from textwrap import dedent
+import netlib.http
+from netlib.utils import parse_content_type
+
def curl_command(flow):
data = "curl "
@@ -53,8 +56,16 @@
data = ""
if flow.request.body:
- data = "\ndata = '''%s'''\n" % flow.request.body
- args += "\n data=data,"
+ json_obj = is_json(flow.request.headers, flow.request.body)
+ if json_obj:
+ # Without the separators field json.dumps() produces
+ # trailing white spaces: https://bugs.python.org/issue16333
+ data = json.dumps(json_obj, indent=4, separators=(',', ': '))
+ data = "\njson = %s\n" % data
+ args += "\n json=json,"
+ else:
+ data = "\ndata = '''%s'''\n" % flow.request.body
+ args += "\n data=data,"
code = code.format(
url=url,
@@ -71,3 +82,14 @@
def raw_request(flow):
data = netlib.http.http1.assemble_request(flow.request)
return data
+
+
+def is_json(headers, content):
+ if headers:
+ ct = parse_content_type(headers.get("content-type", ""))
+ if ct and "%s/%s" % (ct[0], ct[1]) == "application/json":
+ try:
+ return json.loads(content)
+ except ValueError:
+ return False
+ return False
|
{"golden_diff": "diff --git a/mitmproxy/flow_export.py b/mitmproxy/flow_export.py\n--- a/mitmproxy/flow_export.py\n+++ b/mitmproxy/flow_export.py\n@@ -1,7 +1,10 @@\n+import json\n import urllib\n-import netlib.http\n from textwrap import dedent\n \n+import netlib.http\n+from netlib.utils import parse_content_type\n+\n \n def curl_command(flow):\n data = \"curl \"\n@@ -53,8 +56,16 @@\n \n data = \"\"\n if flow.request.body:\n- data = \"\\ndata = '''%s'''\\n\" % flow.request.body\n- args += \"\\n data=data,\"\n+ json_obj = is_json(flow.request.headers, flow.request.body)\n+ if json_obj:\n+ # Without the separators field json.dumps() produces\n+ # trailing white spaces: https://bugs.python.org/issue16333\n+ data = json.dumps(json_obj, indent=4, separators=(',', ': '))\n+ data = \"\\njson = %s\\n\" % data\n+ args += \"\\n json=json,\"\n+ else:\n+ data = \"\\ndata = '''%s'''\\n\" % flow.request.body\n+ args += \"\\n data=data,\"\n \n code = code.format(\n url=url,\n@@ -71,3 +82,14 @@\n def raw_request(flow):\n data = netlib.http.http1.assemble_request(flow.request)\n return data\n+\n+\n+def is_json(headers, content):\n+ if headers:\n+ ct = parse_content_type(headers.get(\"content-type\", \"\"))\n+ if ct and \"%s/%s\" % (ct[0], ct[1]) == \"application/json\":\n+ try:\n+ return json.loads(content)\n+ except ValueError:\n+ return False\n+ return False\n", "issue": "Indent JSON data while exporting it as Python code\nI was testing out a web API and used the \"Export flow as Python code\" feature for the first time as user, and noticed an improvement.\n\nCurrently we just export the `flow.request.body` as is (independent of it's content type) but mitmproxy's interface is smart and renders different bodies differently (for eg. it indents JSON)\n\nI think we could add this indent behaviour while exporting things as code too.\n\n", "before_files": [{"content": "import urllib\nimport netlib.http\nfrom textwrap import dedent\n\n\ndef curl_command(flow):\n data = \"curl \"\n\n for k, v in flow.request.headers.fields:\n data += \"-H '%s:%s' \" % (k, v)\n\n if flow.request.method != \"GET\":\n data += \"-X %s \" % flow.request.method\n\n full_url = flow.request.scheme + \"://\" + flow.request.host + flow.request.path\n data += \"'%s'\" % full_url\n\n if flow.request.content:\n data += \" --data-binary '%s'\" % flow.request.content\n\n return data\n\n\ndef python_code(flow):\n code = dedent(\"\"\"\n import requests\n\n url = '{url}'\n {headers}{params}{data}\n response = requests.request(\n method='{method}',\n url=url,{args}\n )\n\n print(response.text)\n \"\"\").strip()\n\n components = map(lambda x: urllib.quote(x, safe=\"\"), flow.request.path_components)\n url = flow.request.scheme + \"://\" + flow.request.host + \"/\" + \"/\".join(components)\n\n args = \"\"\n headers = \"\"\n if flow.request.headers:\n lines = [\" '%s': '%s',\\n\" % (k, v) for k, v in flow.request.headers.fields]\n headers += \"\\nheaders = {\\n%s}\\n\" % \"\".join(lines)\n args += \"\\n headers=headers,\"\n\n params = \"\"\n if flow.request.query:\n lines = [\" '%s': '%s',\\n\" % (k, v) for k, v in flow.request.query]\n params = \"\\nparams = {\\n%s}\\n\" % \"\".join(lines)\n args += \"\\n params=params,\"\n\n data = \"\"\n if flow.request.body:\n data = \"\\ndata = '''%s'''\\n\" % flow.request.body\n args += \"\\n data=data,\"\n\n code = code.format(\n url=url,\n headers=headers,\n params=params,\n data=data,\n method=flow.request.method,\n args=args,\n )\n\n return code\n\n\ndef raw_request(flow):\n data = netlib.http.http1.assemble_request(flow.request)\n return data\n", "path": "mitmproxy/flow_export.py"}]}
| 1,260 | 408 |
gh_patches_debug_12170
|
rasdani/github-patches
|
git_diff
|
obspy__obspy-3209
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pop check_compression from argument list for readers?
I wrote a small io plugin for ObsPy events based on zipped files, see https://github.com/trichter/obspycsv. Because ObsPy automatically unpacks zip files, I had some difficulties to get it working.
I found the check_compression argument in the uncompress_file decorator with which its working fine. I think, however, that it should be popped from the argument list [here](https://github.com/obspy/obspy/blob/master/obspy/core/util/decorator.py#L139). Otherwise:
```py
In [1]: ev = read_events()
In [2]: ev.write('test.xml', 'QUAKEML')
In [3]: read_events('test.xml', check_compression=False)
TypeError: _read_quakeml() got an unexpected keyword argument 'check_compression'
```
Ideally, a plugin could define on its own if the compression check should be skipped, e.g. by setting an additional entry point. I see, however, that this feature needs quite some refactoring of the reader code.
</issue>
<code>
[start of obspy/core/util/decorator.py]
1 # -*- coding: utf-8 -*-
2 """
3 Decorator used in ObsPy.
4
5 :copyright:
6 The ObsPy Development Team ([email protected])
7 :license:
8 GNU Lesser General Public License, Version 3
9 (https://www.gnu.org/copyleft/lesser.html)
10 """
11 import functools
12 import inspect
13 from pathlib import Path
14 import re
15 import socket
16 import tarfile
17 import unittest
18 import warnings
19 import zipfile
20
21 import numpy as np
22 from decorator import decorator
23
24 from obspy.core.util import get_example_file
25 from obspy.core.util.base import NamedTemporaryFile
26 from obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning
27
28
29 def deprecated(warning_msg=None):
30 """
31 This is a decorator which can be used to mark functions as deprecated.
32
33 .. note::
34 Actually, this is not a decorator itself but a decorator factory,
35 returning the correct decorator for the specified options. It can be
36 used just like a decorator.
37
38 It will result in a warning being emitted when the function is used.
39 """
40 @decorator
41 def _deprecated(func, *args, **kwargs):
42 if 'deprecated' in str(func.__doc__).lower():
43 msg = func.__doc__
44 elif warning_msg:
45 msg = warning_msg
46 func.__doc__ = warning_msg
47 else:
48 msg = "Call to deprecated function %s." % func.__name__
49 warnings.warn(msg, category=ObsPyDeprecationWarning, stacklevel=3)
50 return func(*args, **kwargs)
51 return _deprecated
52
53
54 def deprecated_keywords(keywords):
55 """
56 Decorator for marking keywords as deprecated.
57
58 .. note::
59 Actually, this is not a decorator itself but a decorator factory,
60 returning the correct decorator for the specified options. It can be
61 used just like a decorator.
62
63 :type keywords: dict
64 :param keywords: old/new keyword names as key/value pairs.
65 """
66 def fdec(func):
67 fname = func.__name__
68 msg = "Deprecated keyword %s in %s() call - please use %s instead."
69 msg2 = "Deprecated keyword %s in %s() call - ignoring."
70 msg3 = ("Conflicting deprecated keywords (%s) in %s() call"
71 " - please use new '%s' keyword instead.")
72
73 @functools.wraps(func)
74 def echo_func(*args, **kwargs):
75 # check if multiple deprecated keywords get mapped to the same new
76 # keyword
77 new_keyword_appearance_counts = dict.fromkeys(keywords.values(), 0)
78 for key, new_key in keywords.items():
79 if key in kwargs:
80 new_keyword_appearance_counts[new_key] += 1
81 for key_ in keywords.values():
82 # ignore `None` as new value, it means that no mapping is
83 # happening..
84 if key_ is None:
85 continue
86 if new_keyword_appearance_counts[key_] > 1:
87 conflicting_keys = ", ".join(
88 [old_key for old_key, new_key in keywords.items()
89 if new_key == key_])
90 raise Exception(msg3 % (conflicting_keys, fname, new_key))
91 # map deprecated keywords to new keywords
92 for kw in list(kwargs):
93 if kw in keywords:
94 nkw = keywords[kw]
95 if nkw is None:
96 warnings.warn(msg2 % (kw, fname),
97 category=ObsPyDeprecationWarning,
98 stacklevel=3)
99 else:
100 warnings.warn(msg % (kw, fname, nkw),
101 category=ObsPyDeprecationWarning,
102 stacklevel=3)
103 kwargs[nkw] = kwargs[kw]
104 del kwargs[kw]
105 return func(*args, **kwargs)
106 return echo_func
107
108 return fdec
109
110
111 @decorator
112 def skip_on_network_error(func, *args, **kwargs):
113 """
114 Decorator for unittest to mark test routines that fail with certain network
115 errors (e.g. timeouts) as "skipped" rather than "Error".
116 """
117 try:
118 return func(*args, **kwargs)
119 ###################################################
120 # add more except clauses like this to add other
121 # network errors that should be skipped
122 except socket.timeout as e:
123 if str(e) == "timed out":
124 raise unittest.SkipTest(str(e))
125 ###################################################
126 except socket.error as e:
127 if str(e) == "[Errno 110] Connection timed out":
128 raise unittest.SkipTest(str(e))
129 # general except to be able to generally reraise
130 except Exception:
131 raise
132
133
134 @decorator
135 def uncompress_file(func, filename, *args, **kwargs):
136 """
137 Decorator used for temporary uncompressing file if .gz or .bz2 archive.
138 """
139 if not kwargs.pop('check_compression', True):
140 return func(filename, *args, **kwargs)
141 if not isinstance(filename, str):
142 return func(filename, *args, **kwargs)
143 elif not Path(filename).exists():
144 msg = "File not found '%s'" % (filename)
145 raise IOError(msg)
146 # check if we got a compressed file or archive
147 obj_list = []
148 if tarfile.is_tarfile(filename):
149 try:
150 # reading with transparent compression
151 with tarfile.open(filename, 'r|*') as tar:
152 for tarinfo in tar:
153 # only handle regular files
154 if not tarinfo.isfile():
155 continue
156 data = tar.extractfile(tarinfo).read()
157 # Skip empty files - we don't need them no matter what
158 # and it guards against rare cases where waveforms files
159 # are also slightly valid tar-files.
160 if not data:
161 continue
162 obj_list.append(data)
163 except Exception:
164 pass
165 elif zipfile.is_zipfile(filename):
166 try:
167 zip = zipfile.ZipFile(filename)
168 obj_list = [zip.read(name) for name in zip.namelist()]
169 except Exception:
170 pass
171 elif filename.endswith('.bz2'):
172 # bz2 module
173 try:
174 import bz2
175 with open(filename, 'rb') as fp:
176 obj_list.append(bz2.decompress(fp.read()))
177 except Exception:
178 pass
179 elif filename.endswith('.gz'):
180 # gzip module
181 try:
182 import gzip
183 with gzip.open(filename, 'rb') as fp:
184 obj_list.append(fp.read())
185 except Exception:
186 pass
187 # handle results
188 if obj_list:
189 # write results to temporary files
190 result = None
191 for obj in obj_list:
192 with NamedTemporaryFile() as tempfile:
193 tempfile._fileobj.write(obj)
194 stream = func(tempfile.name, *args, **kwargs)
195 # just add other stream objects to first stream
196 if result is None:
197 result = stream
198 else:
199 result += stream
200 else:
201 # no compressions
202 result = func(filename, *args, **kwargs)
203 return result
204
205
206 @decorator
207 def raise_if_masked(func, *args, **kwargs):
208 """
209 Raises if the first argument (self in case of methods) is a Trace with
210 masked values or a Stream containing a Trace with masked values.
211 """
212 arrays = []
213 # first arg seems to be a Stream
214 if hasattr(args[0], "traces"):
215 arrays = [tr.data for tr in args[0]]
216 # first arg seems to be a Trace
217 if hasattr(args[0], "data") and isinstance(args[0].data, np.ndarray):
218 arrays = [args[0].data]
219 for arr in arrays:
220 if np.ma.is_masked(arr):
221 msg = "Trace with masked values found. This is not " + \
222 "supported for this operation. Try the split() " + \
223 "method on Trace/Stream to produce a Stream with " + \
224 "unmasked Traces."
225 raise NotImplementedError(msg)
226 return func(*args, **kwargs)
227
228
229 @decorator
230 def skip_if_no_data(func, *args, **kwargs):
231 """
232 Does nothing if the first argument (self in case of methods) is a Trace
233 with no data in it.
234 """
235 if not args[0]:
236 return
237 return func(*args, **kwargs)
238
239
240 def map_example_filename(arg_kwarg_name):
241 """
242 Decorator that replaces "/path/to/filename" patterns in the arg or kwarg
243 of the specified name with the correct file path. If the pattern is not
244 encountered nothing is done.
245
246 .. note::
247 Actually, this is not a decorator itself but a decorator factory,
248 returning the correct decorator for the specified options. It can be
249 used just like a decorator.
250
251 :type arg_kwarg_name: str
252 :param arg_kwarg_name: name of the arg/kwarg that should be (tried) to map
253 """
254 @decorator
255 def _map_example_filename(func, *args, **kwargs):
256 prefix = '/path/to/'
257 # check kwargs
258 if arg_kwarg_name in kwargs:
259 if isinstance(kwargs[arg_kwarg_name], str):
260 if re.match(prefix, kwargs[arg_kwarg_name]):
261 try:
262 kwargs[arg_kwarg_name] = \
263 get_example_file(kwargs[arg_kwarg_name][9:])
264 # file not found by get_example_file:
265 except IOError:
266 pass
267 # check args
268 else:
269 try:
270 inspected_args = [
271 p.name
272 for p in inspect.signature(func).parameters.values()
273 ]
274 except AttributeError:
275 inspected_args = inspect.getargspec(func).args
276 try:
277 ind = inspected_args.index(arg_kwarg_name)
278 except ValueError:
279 pass
280 else:
281 if ind < len(args) and isinstance(args[ind], str):
282 # need to check length of args from inspect
283 if re.match(prefix, args[ind]):
284 try:
285 args = list(args)
286 args[ind] = get_example_file(args[ind][9:])
287 args = tuple(args)
288 # file not found by get_example_file:
289 except IOError:
290 pass
291 return func(*args, **kwargs)
292 return _map_example_filename
293
294
295 if __name__ == '__main__':
296 import doctest
297 doctest.testmod(exclude_empty=True)
298
[end of obspy/core/util/decorator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/obspy/core/util/decorator.py b/obspy/core/util/decorator.py
--- a/obspy/core/util/decorator.py
+++ b/obspy/core/util/decorator.py
@@ -164,8 +164,14 @@
pass
elif zipfile.is_zipfile(filename):
try:
- zip = zipfile.ZipFile(filename)
- obj_list = [zip.read(name) for name in zip.namelist()]
+ with zipfile.ZipFile(filename) as zip:
+ if b'obspy_no_uncompress' in zip.comment:
+ # be nice to plugins based on zip format
+ # do not uncompress the file if tag is present
+ # see issue #3192
+ obj_list = None
+ else:
+ obj_list = [zip.read(name) for name in zip.namelist()]
except Exception:
pass
elif filename.endswith('.bz2'):
|
{"golden_diff": "diff --git a/obspy/core/util/decorator.py b/obspy/core/util/decorator.py\n--- a/obspy/core/util/decorator.py\n+++ b/obspy/core/util/decorator.py\n@@ -164,8 +164,14 @@\n pass\n elif zipfile.is_zipfile(filename):\n try:\n- zip = zipfile.ZipFile(filename)\n- obj_list = [zip.read(name) for name in zip.namelist()]\n+ with zipfile.ZipFile(filename) as zip:\n+ if b'obspy_no_uncompress' in zip.comment:\n+ # be nice to plugins based on zip format\n+ # do not uncompress the file if tag is present\n+ # see issue #3192\n+ obj_list = None\n+ else:\n+ obj_list = [zip.read(name) for name in zip.namelist()]\n except Exception:\n pass\n elif filename.endswith('.bz2'):\n", "issue": "Pop check_compression from argument list for readers?\nI wrote a small io plugin for ObsPy events based on zipped files, see https://github.com/trichter/obspycsv. Because ObsPy automatically unpacks zip files, I had some difficulties to get it working.\r\nI found the check_compression argument in the uncompress_file decorator with which its working fine. I think, however, that it should be popped from the argument list [here](https://github.com/obspy/obspy/blob/master/obspy/core/util/decorator.py#L139). Otherwise:\r\n\r\n```py\r\nIn [1]: ev = read_events()\r\nIn [2]: ev.write('test.xml', 'QUAKEML')\r\nIn [3]: read_events('test.xml', check_compression=False)\r\nTypeError: _read_quakeml() got an unexpected keyword argument 'check_compression'\r\n```\r\n\r\nIdeally, a plugin could define on its own if the compression check should be skipped, e.g. by setting an additional entry point. I see, however, that this feature needs quite some refactoring of the reader code.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDecorator used in ObsPy.\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nimport functools\nimport inspect\nfrom pathlib import Path\nimport re\nimport socket\nimport tarfile\nimport unittest\nimport warnings\nimport zipfile\n\nimport numpy as np\nfrom decorator import decorator\n\nfrom obspy.core.util import get_example_file\nfrom obspy.core.util.base import NamedTemporaryFile\nfrom obspy.core.util.deprecation_helpers import ObsPyDeprecationWarning\n\n\ndef deprecated(warning_msg=None):\n \"\"\"\n This is a decorator which can be used to mark functions as deprecated.\n\n .. note::\n Actually, this is not a decorator itself but a decorator factory,\n returning the correct decorator for the specified options. It can be\n used just like a decorator.\n\n It will result in a warning being emitted when the function is used.\n \"\"\"\n @decorator\n def _deprecated(func, *args, **kwargs):\n if 'deprecated' in str(func.__doc__).lower():\n msg = func.__doc__\n elif warning_msg:\n msg = warning_msg\n func.__doc__ = warning_msg\n else:\n msg = \"Call to deprecated function %s.\" % func.__name__\n warnings.warn(msg, category=ObsPyDeprecationWarning, stacklevel=3)\n return func(*args, **kwargs)\n return _deprecated\n\n\ndef deprecated_keywords(keywords):\n \"\"\"\n Decorator for marking keywords as deprecated.\n\n .. note::\n Actually, this is not a decorator itself but a decorator factory,\n returning the correct decorator for the specified options. It can be\n used just like a decorator.\n\n :type keywords: dict\n :param keywords: old/new keyword names as key/value pairs.\n \"\"\"\n def fdec(func):\n fname = func.__name__\n msg = \"Deprecated keyword %s in %s() call - please use %s instead.\"\n msg2 = \"Deprecated keyword %s in %s() call - ignoring.\"\n msg3 = (\"Conflicting deprecated keywords (%s) in %s() call\"\n \" - please use new '%s' keyword instead.\")\n\n @functools.wraps(func)\n def echo_func(*args, **kwargs):\n # check if multiple deprecated keywords get mapped to the same new\n # keyword\n new_keyword_appearance_counts = dict.fromkeys(keywords.values(), 0)\n for key, new_key in keywords.items():\n if key in kwargs:\n new_keyword_appearance_counts[new_key] += 1\n for key_ in keywords.values():\n # ignore `None` as new value, it means that no mapping is\n # happening..\n if key_ is None:\n continue\n if new_keyword_appearance_counts[key_] > 1:\n conflicting_keys = \", \".join(\n [old_key for old_key, new_key in keywords.items()\n if new_key == key_])\n raise Exception(msg3 % (conflicting_keys, fname, new_key))\n # map deprecated keywords to new keywords\n for kw in list(kwargs):\n if kw in keywords:\n nkw = keywords[kw]\n if nkw is None:\n warnings.warn(msg2 % (kw, fname),\n category=ObsPyDeprecationWarning,\n stacklevel=3)\n else:\n warnings.warn(msg % (kw, fname, nkw),\n category=ObsPyDeprecationWarning,\n stacklevel=3)\n kwargs[nkw] = kwargs[kw]\n del kwargs[kw]\n return func(*args, **kwargs)\n return echo_func\n\n return fdec\n\n\n@decorator\ndef skip_on_network_error(func, *args, **kwargs):\n \"\"\"\n Decorator for unittest to mark test routines that fail with certain network\n errors (e.g. timeouts) as \"skipped\" rather than \"Error\".\n \"\"\"\n try:\n return func(*args, **kwargs)\n ###################################################\n # add more except clauses like this to add other\n # network errors that should be skipped\n except socket.timeout as e:\n if str(e) == \"timed out\":\n raise unittest.SkipTest(str(e))\n ###################################################\n except socket.error as e:\n if str(e) == \"[Errno 110] Connection timed out\":\n raise unittest.SkipTest(str(e))\n # general except to be able to generally reraise\n except Exception:\n raise\n\n\n@decorator\ndef uncompress_file(func, filename, *args, **kwargs):\n \"\"\"\n Decorator used for temporary uncompressing file if .gz or .bz2 archive.\n \"\"\"\n if not kwargs.pop('check_compression', True):\n return func(filename, *args, **kwargs)\n if not isinstance(filename, str):\n return func(filename, *args, **kwargs)\n elif not Path(filename).exists():\n msg = \"File not found '%s'\" % (filename)\n raise IOError(msg)\n # check if we got a compressed file or archive\n obj_list = []\n if tarfile.is_tarfile(filename):\n try:\n # reading with transparent compression\n with tarfile.open(filename, 'r|*') as tar:\n for tarinfo in tar:\n # only handle regular files\n if not tarinfo.isfile():\n continue\n data = tar.extractfile(tarinfo).read()\n # Skip empty files - we don't need them no matter what\n # and it guards against rare cases where waveforms files\n # are also slightly valid tar-files.\n if not data:\n continue\n obj_list.append(data)\n except Exception:\n pass\n elif zipfile.is_zipfile(filename):\n try:\n zip = zipfile.ZipFile(filename)\n obj_list = [zip.read(name) for name in zip.namelist()]\n except Exception:\n pass\n elif filename.endswith('.bz2'):\n # bz2 module\n try:\n import bz2\n with open(filename, 'rb') as fp:\n obj_list.append(bz2.decompress(fp.read()))\n except Exception:\n pass\n elif filename.endswith('.gz'):\n # gzip module\n try:\n import gzip\n with gzip.open(filename, 'rb') as fp:\n obj_list.append(fp.read())\n except Exception:\n pass\n # handle results\n if obj_list:\n # write results to temporary files\n result = None\n for obj in obj_list:\n with NamedTemporaryFile() as tempfile:\n tempfile._fileobj.write(obj)\n stream = func(tempfile.name, *args, **kwargs)\n # just add other stream objects to first stream\n if result is None:\n result = stream\n else:\n result += stream\n else:\n # no compressions\n result = func(filename, *args, **kwargs)\n return result\n\n\n@decorator\ndef raise_if_masked(func, *args, **kwargs):\n \"\"\"\n Raises if the first argument (self in case of methods) is a Trace with\n masked values or a Stream containing a Trace with masked values.\n \"\"\"\n arrays = []\n # first arg seems to be a Stream\n if hasattr(args[0], \"traces\"):\n arrays = [tr.data for tr in args[0]]\n # first arg seems to be a Trace\n if hasattr(args[0], \"data\") and isinstance(args[0].data, np.ndarray):\n arrays = [args[0].data]\n for arr in arrays:\n if np.ma.is_masked(arr):\n msg = \"Trace with masked values found. This is not \" + \\\n \"supported for this operation. Try the split() \" + \\\n \"method on Trace/Stream to produce a Stream with \" + \\\n \"unmasked Traces.\"\n raise NotImplementedError(msg)\n return func(*args, **kwargs)\n\n\n@decorator\ndef skip_if_no_data(func, *args, **kwargs):\n \"\"\"\n Does nothing if the first argument (self in case of methods) is a Trace\n with no data in it.\n \"\"\"\n if not args[0]:\n return\n return func(*args, **kwargs)\n\n\ndef map_example_filename(arg_kwarg_name):\n \"\"\"\n Decorator that replaces \"/path/to/filename\" patterns in the arg or kwarg\n of the specified name with the correct file path. If the pattern is not\n encountered nothing is done.\n\n .. note::\n Actually, this is not a decorator itself but a decorator factory,\n returning the correct decorator for the specified options. It can be\n used just like a decorator.\n\n :type arg_kwarg_name: str\n :param arg_kwarg_name: name of the arg/kwarg that should be (tried) to map\n \"\"\"\n @decorator\n def _map_example_filename(func, *args, **kwargs):\n prefix = '/path/to/'\n # check kwargs\n if arg_kwarg_name in kwargs:\n if isinstance(kwargs[arg_kwarg_name], str):\n if re.match(prefix, kwargs[arg_kwarg_name]):\n try:\n kwargs[arg_kwarg_name] = \\\n get_example_file(kwargs[arg_kwarg_name][9:])\n # file not found by get_example_file:\n except IOError:\n pass\n # check args\n else:\n try:\n inspected_args = [\n p.name\n for p in inspect.signature(func).parameters.values()\n ]\n except AttributeError:\n inspected_args = inspect.getargspec(func).args\n try:\n ind = inspected_args.index(arg_kwarg_name)\n except ValueError:\n pass\n else:\n if ind < len(args) and isinstance(args[ind], str):\n # need to check length of args from inspect\n if re.match(prefix, args[ind]):\n try:\n args = list(args)\n args[ind] = get_example_file(args[ind][9:])\n args = tuple(args)\n # file not found by get_example_file:\n except IOError:\n pass\n return func(*args, **kwargs)\n return _map_example_filename\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(exclude_empty=True)\n", "path": "obspy/core/util/decorator.py"}]}
| 3,768 | 211 |
gh_patches_debug_39291
|
rasdani/github-patches
|
git_diff
|
mabel-dev__opteryx-1375
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
✨ GCS improvements
Create the client object once and reuse
List blobs should only return the name of the blob and not any other details
</issue>
<code>
[start of opteryx/connectors/gcp_cloudstorage_connector.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import os
14 from typing import Dict
15 from typing import List
16
17 import pyarrow
18 from orso.schema import FlatColumn
19 from orso.schema import RelationSchema
20 from orso.tools import single_item_cache
21 from orso.types import OrsoTypes
22
23 from opteryx.connectors.base.base_connector import BaseConnector
24 from opteryx.connectors.capabilities import Cacheable
25 from opteryx.connectors.capabilities import Partitionable
26 from opteryx.connectors.capabilities import PredicatePushable
27 from opteryx.exceptions import DatasetNotFoundError
28 from opteryx.exceptions import MissingDependencyError
29 from opteryx.exceptions import UnsupportedFileTypeError
30 from opteryx.utils import paths
31 from opteryx.utils.file_decoders import VALID_EXTENSIONS
32 from opteryx.utils.file_decoders import get_decoder
33
34
35 class GcpCloudStorageConnector(BaseConnector, Cacheable, Partitionable, PredicatePushable):
36 __mode__ = "Blob"
37
38 PUSHABLE_OPS: Dict[str, bool] = {
39 "Eq": True,
40 "NotEq": True,
41 "Gt": True,
42 "GtEq": True,
43 "Lt": True,
44 "LtEq": True,
45 }
46
47 PUSHABLE_TYPES = {OrsoTypes.BOOLEAN, OrsoTypes.DOUBLE, OrsoTypes.INTEGER, OrsoTypes.VARCHAR}
48
49 def __init__(self, credentials=None, **kwargs):
50 try:
51 from google.auth.credentials import AnonymousCredentials
52 from google.cloud import storage
53 except ImportError as err:
54 raise MissingDependencyError(err.name) from err
55
56 BaseConnector.__init__(self, **kwargs)
57 Partitionable.__init__(self, **kwargs)
58 Cacheable.__init__(self, **kwargs)
59 PredicatePushable.__init__(self, **kwargs)
60
61 self.dataset = self.dataset.replace(".", "/")
62 self.credentials = credentials
63
64 # we're going to cache the first blob as the schema and dataset reader
65 # sometimes both start here
66 self.cached_first_blob = None
67
68 def _get_storage_client(self):
69 from google.cloud import storage
70
71 if os.environ.get("STORAGE_EMULATOR_HOST"):
72 from google.auth.credentials import AnonymousCredentials
73
74 return storage.Client(credentials=AnonymousCredentials())
75 else: # pragma: no cover
76 return storage.Client()
77
78 def _get_blob(self, bucket: str, blob_name: str):
79 client = self._get_storage_client()
80
81 gcs_bucket = client.get_bucket(bucket)
82 blob = gcs_bucket.get_blob(blob_name)
83 return blob
84
85 def read_blob(self, *, blob_name, **kwargs):
86 bucket, object_path, name, extension = paths.get_parts(blob_name)
87
88 bucket = bucket.replace("va_data", "va-data")
89 bucket = bucket.replace("data_", "data-")
90
91 blob = self._get_blob(
92 bucket=bucket,
93 blob_name=object_path + "/" + name + extension,
94 )
95 return blob.download_as_bytes()
96
97 @single_item_cache
98 def get_list_of_blob_names(self, *, prefix: str) -> List[str]:
99 bucket, object_path, _, _ = paths.get_parts(prefix)
100 bucket = bucket.replace("va_data", "va-data")
101 bucket = bucket.replace("data_", "data-")
102
103 client = self._get_storage_client()
104
105 gcs_bucket = client.get_bucket(bucket)
106 blobs = client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path)
107 blobs = (bucket + "/" + blob.name for blob in blobs if not blob.name.endswith("/"))
108 return [blob for blob in blobs if ("." + blob.split(".")[-1].lower()) in VALID_EXTENSIONS]
109
110 def read_dataset(
111 self, columns: list = None, predicates: list = None, **kwargs
112 ) -> pyarrow.Table:
113 blob_names = self.partition_scheme.get_blobs_in_partition(
114 start_date=self.start_date,
115 end_date=self.end_date,
116 blob_list_getter=self.get_list_of_blob_names,
117 prefix=self.dataset,
118 )
119
120 # Check if the first blob was cached earlier
121 # if self.cached_first_blob is not None:
122 # yield self.cached_first_blob # Use cached blob
123 # blob_names = blob_names[1:] # Skip first blob
124 # self.cached_first_blob = None
125
126 for blob_name in blob_names:
127 try:
128 decoder = get_decoder(blob_name)
129 blob_bytes = self.read_blob(blob_name=blob_name, statistics=self.statistics)
130 yield decoder(blob_bytes, projection=columns, selection=predicates)
131 except UnsupportedFileTypeError:
132 pass
133
134 def get_dataset_schema(self) -> RelationSchema:
135 # Try to read the schema from the metastore
136 self.schema = self.read_schema_from_metastore()
137 if self.schema:
138 return self.schema
139
140 # Read first blob for schema inference and cache it
141 record = next(self.read_dataset(), None)
142 self.cached_first_blob = record
143
144 if record is None:
145 raise DatasetNotFoundError(dataset=self.dataset)
146
147 arrow_schema = record.schema
148
149 self.schema = RelationSchema(
150 name=self.dataset,
151 columns=[FlatColumn.from_arrow(field) for field in arrow_schema],
152 )
153
154 return self.schema
155
[end of opteryx/connectors/gcp_cloudstorage_connector.py]
[start of opteryx/__version__.py]
1 __build__ = 189
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 12
31 _revision = 2
32 _status = VersionStatus.RELEASE
33
34 __version__ = f"{_major}.{_minor}.{_revision}" + (
35 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
36 )
37
[end of opteryx/__version__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 189
+__build__ = 193
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -28,8 +28,8 @@
_major = 0
_minor = 12
-_revision = 2
-_status = VersionStatus.RELEASE
+_revision = 3
+_status = VersionStatus.BETA
__version__ = f"{_major}.{_minor}.{_revision}" + (
f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
diff --git a/opteryx/connectors/gcp_cloudstorage_connector.py b/opteryx/connectors/gcp_cloudstorage_connector.py
--- a/opteryx/connectors/gcp_cloudstorage_connector.py
+++ b/opteryx/connectors/gcp_cloudstorage_connector.py
@@ -64,6 +64,7 @@
# we're going to cache the first blob as the schema and dataset reader
# sometimes both start here
self.cached_first_blob = None
+ self.client = self._get_storage_client()
def _get_storage_client(self):
from google.cloud import storage
@@ -76,9 +77,7 @@
return storage.Client()
def _get_blob(self, bucket: str, blob_name: str):
- client = self._get_storage_client()
-
- gcs_bucket = client.get_bucket(bucket)
+ gcs_bucket = self.client.get_bucket(bucket)
blob = gcs_bucket.get_blob(blob_name)
return blob
@@ -100,10 +99,8 @@
bucket = bucket.replace("va_data", "va-data")
bucket = bucket.replace("data_", "data-")
- client = self._get_storage_client()
-
- gcs_bucket = client.get_bucket(bucket)
- blobs = client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path)
+ gcs_bucket = self.client.get_bucket(bucket)
+ blobs = self.client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path, fields="items(name)")
blobs = (bucket + "/" + blob.name for blob in blobs if not blob.name.endswith("/"))
return [blob for blob in blobs if ("." + blob.split(".")[-1].lower()) in VALID_EXTENSIONS]
@@ -117,12 +114,6 @@
prefix=self.dataset,
)
- # Check if the first blob was cached earlier
- # if self.cached_first_blob is not None:
- # yield self.cached_first_blob # Use cached blob
- # blob_names = blob_names[1:] # Skip first blob
- # self.cached_first_blob = None
-
for blob_name in blob_names:
try:
decoder = get_decoder(blob_name)
|
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 189\n+__build__ = 193\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n@@ -28,8 +28,8 @@\n \n _major = 0\n _minor = 12\n-_revision = 2\n-_status = VersionStatus.RELEASE\n+_revision = 3\n+_status = VersionStatus.BETA\n \n __version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\ndiff --git a/opteryx/connectors/gcp_cloudstorage_connector.py b/opteryx/connectors/gcp_cloudstorage_connector.py\n--- a/opteryx/connectors/gcp_cloudstorage_connector.py\n+++ b/opteryx/connectors/gcp_cloudstorage_connector.py\n@@ -64,6 +64,7 @@\n # we're going to cache the first blob as the schema and dataset reader\n # sometimes both start here\n self.cached_first_blob = None\n+ self.client = self._get_storage_client()\n \n def _get_storage_client(self):\n from google.cloud import storage\n@@ -76,9 +77,7 @@\n return storage.Client()\n \n def _get_blob(self, bucket: str, blob_name: str):\n- client = self._get_storage_client()\n-\n- gcs_bucket = client.get_bucket(bucket)\n+ gcs_bucket = self.client.get_bucket(bucket)\n blob = gcs_bucket.get_blob(blob_name)\n return blob\n \n@@ -100,10 +99,8 @@\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n \n- client = self._get_storage_client()\n-\n- gcs_bucket = client.get_bucket(bucket)\n- blobs = client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path)\n+ gcs_bucket = self.client.get_bucket(bucket)\n+ blobs = self.client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path, fields=\"items(name)\")\n blobs = (bucket + \"/\" + blob.name for blob in blobs if not blob.name.endswith(\"/\"))\n return [blob for blob in blobs if (\".\" + blob.split(\".\")[-1].lower()) in VALID_EXTENSIONS]\n \n@@ -117,12 +114,6 @@\n prefix=self.dataset,\n )\n \n- # Check if the first blob was cached earlier\n- # if self.cached_first_blob is not None:\n- # yield self.cached_first_blob # Use cached blob\n- # blob_names = blob_names[1:] # Skip first blob\n- # self.cached_first_blob = None\n-\n for blob_name in blob_names:\n try:\n decoder = get_decoder(blob_name)\n", "issue": "\u2728 GCS improvements\nCreate the client object once and reuse\n\nList blobs should only return the name of the blob and not any other details \n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom typing import Dict\nfrom typing import List\n\nimport pyarrow\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import single_item_cache\nfrom orso.types import OrsoTypes\n\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import Cacheable\nfrom opteryx.connectors.capabilities import Partitionable\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import DatasetNotFoundError\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnsupportedFileTypeError\nfrom opteryx.utils import paths\nfrom opteryx.utils.file_decoders import VALID_EXTENSIONS\nfrom opteryx.utils.file_decoders import get_decoder\n\n\nclass GcpCloudStorageConnector(BaseConnector, Cacheable, Partitionable, PredicatePushable):\n __mode__ = \"Blob\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n }\n\n PUSHABLE_TYPES = {OrsoTypes.BOOLEAN, OrsoTypes.DOUBLE, OrsoTypes.INTEGER, OrsoTypes.VARCHAR}\n\n def __init__(self, credentials=None, **kwargs):\n try:\n from google.auth.credentials import AnonymousCredentials\n from google.cloud import storage\n except ImportError as err:\n raise MissingDependencyError(err.name) from err\n\n BaseConnector.__init__(self, **kwargs)\n Partitionable.__init__(self, **kwargs)\n Cacheable.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n self.dataset = self.dataset.replace(\".\", \"/\")\n self.credentials = credentials\n\n # we're going to cache the first blob as the schema and dataset reader\n # sometimes both start here\n self.cached_first_blob = None\n\n def _get_storage_client(self):\n from google.cloud import storage\n\n if os.environ.get(\"STORAGE_EMULATOR_HOST\"):\n from google.auth.credentials import AnonymousCredentials\n\n return storage.Client(credentials=AnonymousCredentials())\n else: # pragma: no cover\n return storage.Client()\n\n def _get_blob(self, bucket: str, blob_name: str):\n client = self._get_storage_client()\n\n gcs_bucket = client.get_bucket(bucket)\n blob = gcs_bucket.get_blob(blob_name)\n return blob\n\n def read_blob(self, *, blob_name, **kwargs):\n bucket, object_path, name, extension = paths.get_parts(blob_name)\n\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n\n blob = self._get_blob(\n bucket=bucket,\n blob_name=object_path + \"/\" + name + extension,\n )\n return blob.download_as_bytes()\n\n @single_item_cache\n def get_list_of_blob_names(self, *, prefix: str) -> List[str]:\n bucket, object_path, _, _ = paths.get_parts(prefix)\n bucket = bucket.replace(\"va_data\", \"va-data\")\n bucket = bucket.replace(\"data_\", \"data-\")\n\n client = self._get_storage_client()\n\n gcs_bucket = client.get_bucket(bucket)\n blobs = client.list_blobs(bucket_or_name=gcs_bucket, prefix=object_path)\n blobs = (bucket + \"/\" + blob.name for blob in blobs if not blob.name.endswith(\"/\"))\n return [blob for blob in blobs if (\".\" + blob.split(\".\")[-1].lower()) in VALID_EXTENSIONS]\n\n def read_dataset(\n self, columns: list = None, predicates: list = None, **kwargs\n ) -> pyarrow.Table:\n blob_names = self.partition_scheme.get_blobs_in_partition(\n start_date=self.start_date,\n end_date=self.end_date,\n blob_list_getter=self.get_list_of_blob_names,\n prefix=self.dataset,\n )\n\n # Check if the first blob was cached earlier\n # if self.cached_first_blob is not None:\n # yield self.cached_first_blob # Use cached blob\n # blob_names = blob_names[1:] # Skip first blob\n # self.cached_first_blob = None\n\n for blob_name in blob_names:\n try:\n decoder = get_decoder(blob_name)\n blob_bytes = self.read_blob(blob_name=blob_name, statistics=self.statistics)\n yield decoder(blob_bytes, projection=columns, selection=predicates)\n except UnsupportedFileTypeError:\n pass\n\n def get_dataset_schema(self) -> RelationSchema:\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # Read first blob for schema inference and cache it\n record = next(self.read_dataset(), None)\n self.cached_first_blob = record\n\n if record is None:\n raise DatasetNotFoundError(dataset=self.dataset)\n\n arrow_schema = record.schema\n\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[FlatColumn.from_arrow(field) for field in arrow_schema],\n )\n\n return self.schema\n", "path": "opteryx/connectors/gcp_cloudstorage_connector.py"}, {"content": "__build__ = 189\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 12\n_revision = 2\n_status = VersionStatus.RELEASE\n\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
| 2,541 | 685 |
gh_patches_debug_39046
|
rasdani/github-patches
|
git_diff
|
pytorch__ignite-1393
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Automatically generated toctree for methods and classes
## 🚀 Feature
Idea is to replace our manually created toctree for [metrics](https://github.com/pytorch/ignite/blob/master/docs/source/metrics.rst#complete-list-of-metrics), [handlers](https://github.com/pytorch/ignite/blob/master/docs/source/handlers.rst#complete-list-of-handlers), [regression metrics](https://github.com/pytorch/ignite/blob/master/docs/source/contrib/metrics.rst#regression-metrics) etc.
How to do that :
- check `.. autosummary:: ` tag in Sphinx
- add it and configure for each listed above .rst file : metrics.rst, handlers.rst etc
Example of usage:
- https://numpy.org/devdocs/reference/arrays.ndarray.html#id1
- https://github.com/numpy/numpy/blob/master/doc/source/reference/arrays.rst (edited)
This issue maybe or maybe not blocked by #1272
For Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.
Please, take a look at [CONTRIBUTING guide](https://github.com/pytorch/ignite/blob/master/CONTRIBUTING.md).
</issue>
<code>
[start of docs/source/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Configuration file for the Sphinx documentation builder.
4 #
5 # This file does only contain a selection of the most common options. For a
6 # full list see the documentation:
7 # http://www.sphinx-doc.org/en/stable/config
8
9 # -- Path setup --------------------------------------------------------------
10
11 # If extensions (or modules to document with autodoc) are in another directory,
12 # add these directories to sys.path here. If the directory is relative to the
13 # documentation root, use os.path.abspath to make it absolute, like shown here.
14 #
15 import os
16 import sys
17
18 sys.path.insert(0, os.path.abspath("../.."))
19 import ignite
20 import pytorch_sphinx_theme
21
22 # -- Project information -----------------------------------------------------
23
24 project = "ignite"
25 copyright = "2020, PyTorch-Ignite Contributors"
26 author = "PyTorch-Ignite Contributors"
27
28 # The short X.Y version
29 try:
30 version = os.environ["code_version"]
31 if "master" in version:
32 version = "master (" + ignite.__version__ + ")"
33 else:
34 version = version.replace("v", "")
35 except KeyError:
36 version = ignite.__version__
37
38 # The full version, including alpha/beta/rc tags
39 release = "master"
40
41
42 # -- General configuration ---------------------------------------------------
43
44 # If your documentation needs a minimal Sphinx version, state it here.
45 #
46 # needs_sphinx = '1.0'
47
48 # Add any Sphinx extension module names here, as strings. They can be
49 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
50 # ones.
51 extensions = [
52 "sphinx.ext.autosummary",
53 "sphinx.ext.doctest",
54 "sphinx.ext.intersphinx",
55 "sphinx.ext.todo",
56 "sphinx.ext.coverage",
57 "sphinx.ext.mathjax",
58 "sphinx.ext.napoleon",
59 "sphinx.ext.viewcode",
60 "sphinx.ext.autosectionlabel",
61 ]
62
63 # Add any paths that contain templates here, relative to this directory.
64 templates_path = ["_templates"]
65
66 # The suffix(es) of source filenames.
67 # You can specify multiple suffix as a list of string:
68 #
69 # source_suffix = ['.rst', '.md']
70 source_suffix = ".rst"
71
72 # The master toctree document.
73 master_doc = "index"
74
75 # The language for content autogenerated by Sphinx. Refer to documentation
76 # for a list of supported languages.
77 #
78 # This is also used if you do content translation via gettext catalogs.
79 # Usually you set "language" from the command line for these cases.
80 language = None
81
82 # List of patterns, relative to source directory, that match files and
83 # directories to ignore when looking for source files.
84 # This pattern also affects html_static_path and html_extra_path .
85 exclude_patterns = []
86
87 # The name of the Pygments (syntax highlighting) style to use.
88 pygments_style = "sphinx"
89
90
91 # -- Options for HTML output -------------------------------------------------
92
93 # The theme to use for HTML and HTML Help pages. See the documentation for
94 # a list of builtin themes.
95 #
96 html_theme = "pytorch_sphinx_theme"
97 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
98
99 html_theme_options = {
100 "canonical_url": "https://pytorch.org/ignite/index.html",
101 "collapse_navigation": False,
102 "display_version": True,
103 "logo_only": True,
104 }
105
106 html_logo = "_static/img/ignite_logo.svg"
107
108 # Theme options are theme-specific and customize the look and feel of a theme
109 # further. For a list of options available for each theme, see the
110 # documentation.
111 #
112 # html_theme_options = {}
113
114 # Add any paths that contain custom static files (such as style sheets) here,
115 # relative to this directory. They are copied after the builtin static files,
116 # so a file named "default.css" will overwrite the builtin "default.css".
117 html_static_path = ["_static", "_templates/_static"]
118
119 html_context = {
120 "css_files": [
121 # 'https://fonts.googleapis.com/css?family=Lato',
122 # '_static/css/pytorch_theme.css'
123 "_static/css/ignite_theme.css"
124 ],
125 }
126
127
128 # -- Options for HTMLHelp output ---------------------------------------------
129
130 # Output file base name for HTML help builder.
131 htmlhelp_basename = "ignitedoc"
132
133
134 # -- Options for LaTeX output ------------------------------------------------
135
136 latex_elements = {
137 # The paper size ('letterpaper' or 'a4paper').
138 #
139 # 'papersize': 'letterpaper',
140 # The font size ('10pt', '11pt' or '12pt').
141 #
142 # 'pointsize': '10pt',
143 # Additional stuff for the LaTeX preamble.
144 #
145 # 'preamble': '',
146 # Latex figure (float) alignment
147 #
148 # 'figure_align': 'htbp',
149 }
150
151 # Grouping the document tree into LaTeX files. List of tuples
152 # (source start file, target name, title,
153 # author, documentclass [howto, manual, or own class]).
154 latex_documents = [
155 (master_doc, "ignite.tex", "ignite Documentation", "Torch Contributors", "manual"),
156 ]
157
158
159 # -- Options for manual page output ------------------------------------------
160
161 # One entry per manual page. List of tuples
162 # (source start file, name, description, authors, manual section).
163 man_pages = [(master_doc, "ignite", "ignite Documentation", [author], 1)]
164
165
166 # -- Options for Texinfo output ----------------------------------------------
167
168 # Grouping the document tree into Texinfo files. List of tuples
169 # (source start file, target name, title, author,
170 # dir menu entry, description, category)
171 texinfo_documents = [
172 (
173 master_doc,
174 "ignite",
175 "ignite Documentation",
176 author,
177 "ignite",
178 "One line description of project.",
179 "Miscellaneous",
180 ),
181 ]
182
183
184 # -- Extension configuration -------------------------------------------------
185
186 # -- Options for intersphinx extension ---------------------------------------
187
188 # Example configuration for intersphinx: refer to the Python standard library.
189 intersphinx_mapping = {"https://docs.python.org/": None}
190
191 # -- Options for todo extension ----------------------------------------------
192
193 # If true, `todo` and `todoList` produce output, else they produce nothing.
194 todo_include_todos = True
195
196 # -- Type hints configs ------------------------------------------------------
197
198 autodoc_typehints = "signature"
199
200 # -- A patch that turns-off cross refs for type annotations ------------------
201
202 import sphinx.domains.python
203 from docutils import nodes
204 from sphinx import addnodes
205
206 # replaces pending_xref node with desc_type for type annotations
207 sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type("", nodes.Text(t))
208
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -205,3 +205,98 @@
# replaces pending_xref node with desc_type for type annotations
sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type("", nodes.Text(t))
+
+# -- Autosummary patch to get list of a classes, funcs automatically ----------
+
+from importlib import import_module
+from inspect import getmembers, isclass, isfunction
+import sphinx.ext.autosummary
+from sphinx.ext.autosummary import Autosummary
+from docutils.parsers.rst import directives
+from docutils.statemachine import StringList
+
+
+class BetterAutosummary(Autosummary):
+ """Autosummary with autolisting for modules.
+
+ By default it tries to import all public names (__all__),
+ otherwise import all classes and/or functions in a module.
+
+ Options:
+ - :autolist: option to get list of classes and functions from currentmodule.
+ - :autolist-classes: option to get list of classes from currentmodule.
+ - :autolist-functions: option to get list of functions from currentmodule.
+
+ Example Usage:
+
+ .. currentmodule:: ignite.metrics
+
+ .. autosummary::
+ :nosignatures:
+ :autolist:
+ """
+
+ # Add new option
+ _option_spec = Autosummary.option_spec.copy()
+ _option_spec.update(
+ {
+ "autolist": directives.unchanged,
+ "autolist-classes": directives.unchanged,
+ "autolist-functions": directives.unchanged,
+ }
+ )
+ option_spec = _option_spec
+
+ def run(self):
+ for auto in ("autolist", "autolist-classes", "autolist-functions"):
+ if auto in self.options:
+ # Get current module name
+ module_name = self.env.ref_context.get("py:module")
+ # Import module
+ module = import_module(module_name)
+
+ # Get public names (if possible)
+ try:
+ names = getattr(module, "__all__")
+ except AttributeError:
+ # Get classes defined in the module
+ cls_names = [
+ name[0]
+ for name in getmembers(module, isclass)
+ if name[-1].__module__ == module_name and not (name[0].startswith("_"))
+ ]
+ # Get functions defined in the module
+ fn_names = [
+ name[0]
+ for name in getmembers(module, isfunction)
+ if (name[-1].__module__ == module_name) and not (name[0].startswith("_"))
+ ]
+ names = cls_names + fn_names
+ # It may happen that module doesn't have any defined class or func
+ if not names:
+ names = [name[0] for name in getmembers(module)]
+
+ if auto == "autolist":
+ # Get list of all classes and functions inside module
+ names = [
+ name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))
+ ]
+ else:
+ if auto == "autolist-classes":
+ # Get only classes
+ check = isclass
+ elif auto == "autolist-functions":
+ # Get only functions
+ check = isfunction
+ else:
+ raise NotImplementedError
+
+ names = [name for name in names if check(getattr(module, name))]
+
+ # Update content
+ self.content = StringList(names)
+ return super().run()
+
+
+# Patch original Autosummary
+sphinx.ext.autosummary.Autosummary = BetterAutosummary
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -205,3 +205,98 @@\n \n # replaces pending_xref node with desc_type for type annotations\n sphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(\"\", nodes.Text(t))\n+\n+# -- Autosummary patch to get list of a classes, funcs automatically ----------\n+\n+from importlib import import_module\n+from inspect import getmembers, isclass, isfunction\n+import sphinx.ext.autosummary\n+from sphinx.ext.autosummary import Autosummary\n+from docutils.parsers.rst import directives\n+from docutils.statemachine import StringList\n+\n+\n+class BetterAutosummary(Autosummary):\n+ \"\"\"Autosummary with autolisting for modules.\n+\n+ By default it tries to import all public names (__all__),\n+ otherwise import all classes and/or functions in a module.\n+\n+ Options:\n+ - :autolist: option to get list of classes and functions from currentmodule.\n+ - :autolist-classes: option to get list of classes from currentmodule.\n+ - :autolist-functions: option to get list of functions from currentmodule.\n+\n+ Example Usage:\n+\n+ .. currentmodule:: ignite.metrics\n+\n+ .. autosummary::\n+ :nosignatures:\n+ :autolist:\n+ \"\"\"\n+\n+ # Add new option\n+ _option_spec = Autosummary.option_spec.copy()\n+ _option_spec.update(\n+ {\n+ \"autolist\": directives.unchanged,\n+ \"autolist-classes\": directives.unchanged,\n+ \"autolist-functions\": directives.unchanged,\n+ }\n+ )\n+ option_spec = _option_spec\n+\n+ def run(self):\n+ for auto in (\"autolist\", \"autolist-classes\", \"autolist-functions\"):\n+ if auto in self.options:\n+ # Get current module name\n+ module_name = self.env.ref_context.get(\"py:module\")\n+ # Import module\n+ module = import_module(module_name)\n+\n+ # Get public names (if possible)\n+ try:\n+ names = getattr(module, \"__all__\")\n+ except AttributeError:\n+ # Get classes defined in the module\n+ cls_names = [\n+ name[0]\n+ for name in getmembers(module, isclass)\n+ if name[-1].__module__ == module_name and not (name[0].startswith(\"_\"))\n+ ]\n+ # Get functions defined in the module\n+ fn_names = [\n+ name[0]\n+ for name in getmembers(module, isfunction)\n+ if (name[-1].__module__ == module_name) and not (name[0].startswith(\"_\"))\n+ ]\n+ names = cls_names + fn_names\n+ # It may happen that module doesn't have any defined class or func\n+ if not names:\n+ names = [name[0] for name in getmembers(module)]\n+\n+ if auto == \"autolist\":\n+ # Get list of all classes and functions inside module\n+ names = [\n+ name for name in names if (isclass(getattr(module, name)) or isfunction(getattr(module, name)))\n+ ]\n+ else:\n+ if auto == \"autolist-classes\":\n+ # Get only classes\n+ check = isclass\n+ elif auto == \"autolist-functions\":\n+ # Get only functions\n+ check = isfunction\n+ else:\n+ raise NotImplementedError\n+\n+ names = [name for name in names if check(getattr(module, name))]\n+\n+ # Update content\n+ self.content = StringList(names)\n+ return super().run()\n+\n+\n+# Patch original Autosummary\n+sphinx.ext.autosummary.Autosummary = BetterAutosummary\n", "issue": "Automatically generated toctree for methods and classes\n## \ud83d\ude80 Feature\r\n\r\nIdea is to replace our manually created toctree for [metrics](https://github.com/pytorch/ignite/blob/master/docs/source/metrics.rst#complete-list-of-metrics), [handlers](https://github.com/pytorch/ignite/blob/master/docs/source/handlers.rst#complete-list-of-handlers), [regression metrics](https://github.com/pytorch/ignite/blob/master/docs/source/contrib/metrics.rst#regression-metrics) etc.\r\n\r\nHow to do that : \r\n- check `.. autosummary:: ` tag in Sphinx\r\n- add it and configure for each listed above .rst file : metrics.rst, handlers.rst etc\r\n\r\nExample of usage:\r\n- https://numpy.org/devdocs/reference/arrays.ndarray.html#id1\r\n- https://github.com/numpy/numpy/blob/master/doc/source/reference/arrays.rst (edited) \r\n\r\nThis issue maybe or maybe not blocked by #1272 \r\n\r\n\r\nFor Hacktoberfest contributors, feel free to ask questions for details if any and say that you would like to tackle the issue.\r\nPlease, take a look at [CONTRIBUTING guide](https://github.com/pytorch/ignite/blob/master/CONTRIBUTING.md).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../..\"))\nimport ignite\nimport pytorch_sphinx_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = \"ignite\"\ncopyright = \"2020, PyTorch-Ignite Contributors\"\nauthor = \"PyTorch-Ignite Contributors\"\n\n# The short X.Y version\ntry:\n version = os.environ[\"code_version\"]\n if \"master\" in version:\n version = \"master (\" + ignite.__version__ + \")\"\n else:\n version = version.replace(\"v\", \"\")\nexcept KeyError:\n version = ignite.__version__\n\n# The full version, including alpha/beta/rc tags\nrelease = \"master\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.viewcode\",\n \"sphinx.ext.autosectionlabel\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n \"canonical_url\": \"https://pytorch.org/ignite/index.html\",\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n}\n\nhtml_logo = \"_static/img/ignite_logo.svg\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\", \"_templates/_static\"]\n\nhtml_context = {\n \"css_files\": [\n # 'https://fonts.googleapis.com/css?family=Lato',\n # '_static/css/pytorch_theme.css'\n \"_static/css/ignite_theme.css\"\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ignitedoc\"\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"ignite.tex\", \"ignite Documentation\", \"Torch Contributors\", \"manual\"),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"ignite\", \"ignite Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"ignite\",\n \"ignite Documentation\",\n author,\n \"ignite\",\n \"One line description of project.\",\n \"Miscellaneous\",\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"https://docs.python.org/\": None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Type hints configs ------------------------------------------------------\n\nautodoc_typehints = \"signature\"\n\n# -- A patch that turns-off cross refs for type annotations ------------------\n\nimport sphinx.domains.python\nfrom docutils import nodes\nfrom sphinx import addnodes\n\n# replaces pending_xref node with desc_type for type annotations\nsphinx.domains.python.type_to_xref = lambda t, e=None: addnodes.desc_type(\"\", nodes.Text(t))\n", "path": "docs/source/conf.py"}]}
| 2,748 | 868 |
gh_patches_debug_8960
|
rasdani/github-patches
|
git_diff
|
scverse__scanpy-1691
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
importlib_metadata >= 2.0 breaks scanpy.logging.print_versions()
- [x] I have checked that this issue has not already been reported.
- [x] I have confirmed this bug exists on the latest version of scanpy.
- [x] (optional) I have confirmed this bug exists on the master branch of scanpy.
---
When scanpy gets installed with the latest version of `importlib_metadata` (2.0), the
command `sc.logging.print_versions()` fails with the following error:
```pytb
WARNING: If you miss a compact list, please try `print_header`!
Traceback (most recent call last):
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 195, in sinfo
mod_version = _find_version(mod.__version__)
AttributeError: module 'importlib_metadata' has no attribute '__version__'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/scanpy/logging.py", line 161, in print_versions
sinfo(dependencies=True)
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 198, in sinfo
mod_version = _find_version(mod.version)
File "/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py", line 42, in _find_version
return mod_version_attr()
TypeError: version() missing 1 required positional argument: 'distribution_name'
```
According to the `importlib_metadata` changelog, the `__version__` attribute has been removed from the package:
```
=========================
importlib_metadata NEWS
=========================
v2.0.0
======
* ``importlib_metadata`` no longer presents a
``__version__`` attribute. Consumers wishing to
resolve the version of the package should query it
directly with
``importlib_metadata.version('importlib-metadata')``.
Closes #71.
```
</issue>
<code>
[start of scanpy/logging.py]
1 """Logging and Profiling
2 """
3 import io
4 import logging
5 import sys
6 from functools import update_wrapper, partial
7 from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
8 from datetime import datetime, timedelta, timezone
9 from typing import Optional
10
11 import anndata.logging
12 from sinfo import sinfo
13
14
15 HINT = (INFO + DEBUG) // 2
16 logging.addLevelName(HINT, 'HINT')
17
18
19 class _RootLogger(logging.RootLogger):
20 def __init__(self, level):
21 super().__init__(level)
22 self.propagate = False
23 _RootLogger.manager = logging.Manager(self)
24
25 def log(
26 self,
27 level: int,
28 msg: str,
29 *,
30 extra: Optional[dict] = None,
31 time: datetime = None,
32 deep: Optional[str] = None,
33 ) -> datetime:
34 from . import settings
35
36 now = datetime.now(timezone.utc)
37 time_passed: timedelta = None if time is None else now - time
38 extra = {
39 **(extra or {}),
40 'deep': deep if settings.verbosity.level < level else None,
41 'time_passed': time_passed,
42 }
43 super().log(level, msg, extra=extra)
44 return now
45
46 def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:
47 return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)
48
49 def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:
50 return self.log(ERROR, msg, time=time, deep=deep, extra=extra)
51
52 def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:
53 return self.log(WARNING, msg, time=time, deep=deep, extra=extra)
54
55 def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:
56 return self.log(INFO, msg, time=time, deep=deep, extra=extra)
57
58 def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:
59 return self.log(HINT, msg, time=time, deep=deep, extra=extra)
60
61 def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:
62 return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)
63
64
65 def _set_log_file(settings):
66 file = settings.logfile
67 name = settings.logpath
68 root = settings._root_logger
69 h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)
70 h.setFormatter(_LogFormatter())
71 h.setLevel(root.level)
72 if len(root.handlers) == 1:
73 root.removeHandler(root.handlers[0])
74 elif len(root.handlers) > 1:
75 raise RuntimeError('Scanpy’s root logger somehow got more than one handler')
76 root.addHandler(h)
77
78
79 def _set_log_level(settings, level: int):
80 root = settings._root_logger
81 root.setLevel(level)
82 (h,) = root.handlers # may only be 1
83 h.setLevel(level)
84
85
86 class _LogFormatter(logging.Formatter):
87 def __init__(
88 self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'
89 ):
90 super().__init__(fmt, datefmt, style)
91
92 def format(self, record: logging.LogRecord):
93 format_orig = self._style._fmt
94 if record.levelno == INFO:
95 self._style._fmt = '{message}'
96 elif record.levelno == HINT:
97 self._style._fmt = '--> {message}'
98 elif record.levelno == DEBUG:
99 self._style._fmt = ' {message}'
100 if record.time_passed:
101 # strip microseconds
102 if record.time_passed.microseconds:
103 record.time_passed = timedelta(
104 seconds=int(record.time_passed.total_seconds())
105 )
106 if '{time_passed}' in record.msg:
107 record.msg = record.msg.replace(
108 '{time_passed}', str(record.time_passed)
109 )
110 else:
111 self._style._fmt += ' ({time_passed})'
112 if record.deep:
113 record.msg = f'{record.msg}: {record.deep}'
114 result = logging.Formatter.format(self, record)
115 self._style._fmt = format_orig
116 return result
117
118
119 print_memory_usage = anndata.logging.print_memory_usage
120 get_memory_usage = anndata.logging.get_memory_usage
121
122
123 _DEPENDENCIES_NUMERICS = [
124 'anndata', # anndata actually shouldn't, but as long as it's in development
125 'umap',
126 'numpy',
127 'scipy',
128 'pandas',
129 ('sklearn', 'scikit-learn'),
130 'statsmodels',
131 ('igraph', 'python-igraph'),
132 'louvain',
133 'leidenalg',
134 ]
135
136
137 def _versions_dependencies(dependencies):
138 # this is not the same as the requirements!
139 for mod in dependencies:
140 mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)
141 try:
142 imp = __import__(mod_name)
143 yield dist_name, imp.__version__
144 except (ImportError, AttributeError):
145 pass
146
147
148 def print_header(*, file=None):
149 """\
150 Versions that might influence the numerical results.
151 Matplotlib and Seaborn are excluded from this.
152 """
153
154 modules = ['scanpy'] + _DEPENDENCIES_NUMERICS
155 print(
156 ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),
157 file=file or sys.stdout,
158 )
159
160
161 def print_versions(*, file=None):
162 """Print print versions of imported packages"""
163 if file is None: # Inform people about the behavior change
164 warning('If you miss a compact list, please try `print_header`!')
165 stdout = sys.stdout
166 try:
167 buf = sys.stdout = io.StringIO()
168 sinfo(dependencies=True)
169 finally:
170 sys.stdout = stdout
171 output = buf.getvalue()
172 print(output, file=file)
173
174
175 def print_version_and_date(*, file=None):
176 """\
177 Useful for starting a notebook so you see when you started working.
178 """
179 from . import __version__
180
181 if file is None:
182 file = sys.stdout
183 print(
184 f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',
185 file=file,
186 )
187
188
189 def _copy_docs_and_signature(fn):
190 return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])
191
192
193 def error(
194 msg: str,
195 *,
196 time: datetime = None,
197 deep: Optional[str] = None,
198 extra: Optional[dict] = None,
199 ) -> datetime:
200 """\
201 Log message with specific level and return current time.
202
203 Parameters
204 ==========
205 msg
206 Message to display.
207 time
208 A time in the past. If this is passed, the time difference from then
209 to now is appended to `msg` as ` (HH:MM:SS)`.
210 If `msg` contains `{time_passed}`, the time difference is instead
211 inserted at that position.
212 deep
213 If the current verbosity is higher than the log function’s level,
214 this gets displayed as well
215 extra
216 Additional values you can specify in `msg` like `{time_passed}`.
217 """
218 from ._settings import settings
219
220 return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)
221
222
223 @_copy_docs_and_signature(error)
224 def warning(msg, *, time=None, deep=None, extra=None) -> datetime:
225 from ._settings import settings
226
227 return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)
228
229
230 @_copy_docs_and_signature(error)
231 def info(msg, *, time=None, deep=None, extra=None) -> datetime:
232 from ._settings import settings
233
234 return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)
235
236
237 @_copy_docs_and_signature(error)
238 def hint(msg, *, time=None, deep=None, extra=None) -> datetime:
239 from ._settings import settings
240
241 return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)
242
243
244 @_copy_docs_and_signature(error)
245 def debug(msg, *, time=None, deep=None, extra=None) -> datetime:
246 from ._settings import settings
247
248 return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)
249
[end of scanpy/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scanpy/logging.py b/scanpy/logging.py
--- a/scanpy/logging.py
+++ b/scanpy/logging.py
@@ -165,7 +165,17 @@
stdout = sys.stdout
try:
buf = sys.stdout = io.StringIO()
- sinfo(dependencies=True)
+ sinfo(
+ dependencies=True,
+ excludes=[
+ 'builtins',
+ 'stdlib_list',
+ 'importlib_metadata',
+ # Special module present if test coverage being calculated
+ # https://gitlab.com/joelostblom/sinfo/-/issues/10
+ "$coverage",
+ ],
+ )
finally:
sys.stdout = stdout
output = buf.getvalue()
|
{"golden_diff": "diff --git a/scanpy/logging.py b/scanpy/logging.py\n--- a/scanpy/logging.py\n+++ b/scanpy/logging.py\n@@ -165,7 +165,17 @@\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n- sinfo(dependencies=True)\n+ sinfo(\n+ dependencies=True,\n+ excludes=[\n+ 'builtins',\n+ 'stdlib_list',\n+ 'importlib_metadata',\n+ # Special module present if test coverage being calculated\n+ # https://gitlab.com/joelostblom/sinfo/-/issues/10\n+ \"$coverage\",\n+ ],\n+ )\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n", "issue": "importlib_metadata >= 2.0 breaks scanpy.logging.print_versions()\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of scanpy.\r\n- [x] (optional) I have confirmed this bug exists on the master branch of scanpy.\r\n\r\n---\r\n\r\nWhen scanpy gets installed with the latest version of `importlib_metadata` (2.0), the \r\ncommand `sc.logging.print_versions()` fails with the following error: \r\n\r\n```pytb\r\nWARNING: If you miss a compact list, please try `print_header`!\r\nTraceback (most recent call last):\r\n File \"/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py\", line 195, in sinfo\r\n mod_version = _find_version(mod.__version__)\r\nAttributeError: module 'importlib_metadata' has no attribute '__version__'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/scanpy/logging.py\", line 161, in print_versions\r\n sinfo(dependencies=True)\r\n File \"/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py\", line 198, in sinfo\r\n mod_version = _find_version(mod.version)\r\n File \"/home/sturm/anaconda3/envs/scanpy_test/lib/python3.7/site-packages/sinfo/main.py\", line 42, in _find_version\r\n return mod_version_attr()\r\nTypeError: version() missing 1 required positional argument: 'distribution_name'\r\n```\r\n\r\nAccording to the `importlib_metadata` changelog, the `__version__` attribute has been removed from the package: \r\n\r\n```\r\n=========================\r\n importlib_metadata NEWS\r\n=========================\r\n\r\nv2.0.0\r\n======\r\n\r\n* ``importlib_metadata`` no longer presents a\r\n ``__version__`` attribute. Consumers wishing to\r\n resolve the version of the package should query it\r\n directly with\r\n ``importlib_metadata.version('importlib-metadata')``.\r\n Closes #71.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Logging and Profiling\n\"\"\"\nimport io\nimport logging\nimport sys\nfrom functools import update_wrapper, partial\nfrom logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET\nfrom datetime import datetime, timedelta, timezone\nfrom typing import Optional\n\nimport anndata.logging\nfrom sinfo import sinfo\n\n\nHINT = (INFO + DEBUG) // 2\nlogging.addLevelName(HINT, 'HINT')\n\n\nclass _RootLogger(logging.RootLogger):\n def __init__(self, level):\n super().__init__(level)\n self.propagate = False\n _RootLogger.manager = logging.Manager(self)\n\n def log(\n self,\n level: int,\n msg: str,\n *,\n extra: Optional[dict] = None,\n time: datetime = None,\n deep: Optional[str] = None,\n ) -> datetime:\n from . import settings\n\n now = datetime.now(timezone.utc)\n time_passed: timedelta = None if time is None else now - time\n extra = {\n **(extra or {}),\n 'deep': deep if settings.verbosity.level < level else None,\n 'time_passed': time_passed,\n }\n super().log(level, msg, extra=extra)\n return now\n\n def critical(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(CRITICAL, msg, time=time, deep=deep, extra=extra)\n\n def error(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(ERROR, msg, time=time, deep=deep, extra=extra)\n\n def warning(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(WARNING, msg, time=time, deep=deep, extra=extra)\n\n def info(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(INFO, msg, time=time, deep=deep, extra=extra)\n\n def hint(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(HINT, msg, time=time, deep=deep, extra=extra)\n\n def debug(self, msg, *, time=None, deep=None, extra=None) -> datetime:\n return self.log(DEBUG, msg, time=time, deep=deep, extra=extra)\n\n\ndef _set_log_file(settings):\n file = settings.logfile\n name = settings.logpath\n root = settings._root_logger\n h = logging.StreamHandler(file) if name is None else logging.FileHandler(name)\n h.setFormatter(_LogFormatter())\n h.setLevel(root.level)\n if len(root.handlers) == 1:\n root.removeHandler(root.handlers[0])\n elif len(root.handlers) > 1:\n raise RuntimeError('Scanpy\u2019s root logger somehow got more than one handler')\n root.addHandler(h)\n\n\ndef _set_log_level(settings, level: int):\n root = settings._root_logger\n root.setLevel(level)\n (h,) = root.handlers # may only be 1\n h.setLevel(level)\n\n\nclass _LogFormatter(logging.Formatter):\n def __init__(\n self, fmt='{levelname}: {message}', datefmt='%Y-%m-%d %H:%M', style='{'\n ):\n super().__init__(fmt, datefmt, style)\n\n def format(self, record: logging.LogRecord):\n format_orig = self._style._fmt\n if record.levelno == INFO:\n self._style._fmt = '{message}'\n elif record.levelno == HINT:\n self._style._fmt = '--> {message}'\n elif record.levelno == DEBUG:\n self._style._fmt = ' {message}'\n if record.time_passed:\n # strip microseconds\n if record.time_passed.microseconds:\n record.time_passed = timedelta(\n seconds=int(record.time_passed.total_seconds())\n )\n if '{time_passed}' in record.msg:\n record.msg = record.msg.replace(\n '{time_passed}', str(record.time_passed)\n )\n else:\n self._style._fmt += ' ({time_passed})'\n if record.deep:\n record.msg = f'{record.msg}: {record.deep}'\n result = logging.Formatter.format(self, record)\n self._style._fmt = format_orig\n return result\n\n\nprint_memory_usage = anndata.logging.print_memory_usage\nget_memory_usage = anndata.logging.get_memory_usage\n\n\n_DEPENDENCIES_NUMERICS = [\n 'anndata', # anndata actually shouldn't, but as long as it's in development\n 'umap',\n 'numpy',\n 'scipy',\n 'pandas',\n ('sklearn', 'scikit-learn'),\n 'statsmodels',\n ('igraph', 'python-igraph'),\n 'louvain',\n 'leidenalg',\n]\n\n\ndef _versions_dependencies(dependencies):\n # this is not the same as the requirements!\n for mod in dependencies:\n mod_name, dist_name = mod if isinstance(mod, tuple) else (mod, mod)\n try:\n imp = __import__(mod_name)\n yield dist_name, imp.__version__\n except (ImportError, AttributeError):\n pass\n\n\ndef print_header(*, file=None):\n \"\"\"\\\n Versions that might influence the numerical results.\n Matplotlib and Seaborn are excluded from this.\n \"\"\"\n\n modules = ['scanpy'] + _DEPENDENCIES_NUMERICS\n print(\n ' '.join(f'{mod}=={ver}' for mod, ver in _versions_dependencies(modules)),\n file=file or sys.stdout,\n )\n\n\ndef print_versions(*, file=None):\n \"\"\"Print print versions of imported packages\"\"\"\n if file is None: # Inform people about the behavior change\n warning('If you miss a compact list, please try `print_header`!')\n stdout = sys.stdout\n try:\n buf = sys.stdout = io.StringIO()\n sinfo(dependencies=True)\n finally:\n sys.stdout = stdout\n output = buf.getvalue()\n print(output, file=file)\n\n\ndef print_version_and_date(*, file=None):\n \"\"\"\\\n Useful for starting a notebook so you see when you started working.\n \"\"\"\n from . import __version__\n\n if file is None:\n file = sys.stdout\n print(\n f'Running Scanpy {__version__}, ' f'on {datetime.now():%Y-%m-%d %H:%M}.',\n file=file,\n )\n\n\ndef _copy_docs_and_signature(fn):\n return partial(update_wrapper, wrapped=fn, assigned=['__doc__', '__annotations__'])\n\n\ndef error(\n msg: str,\n *,\n time: datetime = None,\n deep: Optional[str] = None,\n extra: Optional[dict] = None,\n) -> datetime:\n \"\"\"\\\n Log message with specific level and return current time.\n\n Parameters\n ==========\n msg\n Message to display.\n time\n A time in the past. If this is passed, the time difference from then\n to now is appended to `msg` as ` (HH:MM:SS)`.\n If `msg` contains `{time_passed}`, the time difference is instead\n inserted at that position.\n deep\n If the current verbosity is higher than the log function\u2019s level,\n this gets displayed as well\n extra\n Additional values you can specify in `msg` like `{time_passed}`.\n \"\"\"\n from ._settings import settings\n\n return settings._root_logger.error(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef warning(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.warning(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef info(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.info(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef hint(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.hint(msg, time=time, deep=deep, extra=extra)\n\n\n@_copy_docs_and_signature(error)\ndef debug(msg, *, time=None, deep=None, extra=None) -> datetime:\n from ._settings import settings\n\n return settings._root_logger.debug(msg, time=time, deep=deep, extra=extra)\n", "path": "scanpy/logging.py"}]}
| 3,522 | 168 |
gh_patches_debug_11146
|
rasdani/github-patches
|
git_diff
|
dbt-labs__dbt-core-7568
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-2440] `dbt show` throws `Database Error` for models with `sql_header` required for valid query
If a model is configured with a `sql_header` that is necessary to successfully run the query, `dbt show` currently fails because the [`compiled_node.compiled_code` does not include the sql_header SQL](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/task/show.py#L21).
Reproduction case (run against BQ, but not a BQ-specific issue)
```
-- models/my_model.sql
{% call set_sql_header(config) %}
CREATE TEMPORARY FUNCTION yes_no_to_boolean(answer STRING)
RETURNS BOOLEAN AS (
CASE
WHEN LOWER(answer) = 'yes' THEN True
WHEN LOWER(answer) = 'no' THEN False
ELSE NULL
END
);
{%- endcall %}
select yes_no_to_boolean("yes") as column
```
```
dbt show --select my_model --project-dir
19:00:05 Found 1 model, 0 tests, 0 snapshots, 0 analyses, 551 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups
19:00:05
19:00:06 Concurrency: 1 threads (target='dev')
19:00:06
19:00:08 BigQuery adapter: https://console.cloud.google.com/bigquery?project=dbt-test-env&j=bq:US:9802c6ea-f771-4d46-9da3-bf6f521bd1da&page=queryresults
19:00:08 Encountered an error:
Runtime Error
Database Error in model dummydep (models2/dummydep.sql)
Function not found: yes_no_to_boolean at [8:8]
```
**Acceptance criteria:**
Instead of directly executing `compiled_node.compiled_code`, template it into a multi-statement query that includes the `sql_header` (similar approach to the one proposed for https://github.com/dbt-labs/dbt-core/issues/7390)
[CT-2440] `dbt show` throws `Database Error` for models with `sql_header` required for valid query
If a model is configured with a `sql_header` that is necessary to successfully run the query, `dbt show` currently fails because the [`compiled_node.compiled_code` does not include the sql_header SQL](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/task/show.py#L21).
Reproduction case (run against BQ, but not a BQ-specific issue)
```
-- models/my_model.sql
{% call set_sql_header(config) %}
CREATE TEMPORARY FUNCTION yes_no_to_boolean(answer STRING)
RETURNS BOOLEAN AS (
CASE
WHEN LOWER(answer) = 'yes' THEN True
WHEN LOWER(answer) = 'no' THEN False
ELSE NULL
END
);
{%- endcall %}
select yes_no_to_boolean("yes") as column
```
```
dbt show --select my_model --project-dir
19:00:05 Found 1 model, 0 tests, 0 snapshots, 0 analyses, 551 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups
19:00:05
19:00:06 Concurrency: 1 threads (target='dev')
19:00:06
19:00:08 BigQuery adapter: https://console.cloud.google.com/bigquery?project=dbt-test-env&j=bq:US:9802c6ea-f771-4d46-9da3-bf6f521bd1da&page=queryresults
19:00:08 Encountered an error:
Runtime Error
Database Error in model dummydep (models2/dummydep.sql)
Function not found: yes_no_to_boolean at [8:8]
```
**Acceptance criteria:**
Instead of directly executing `compiled_node.compiled_code`, template it into a multi-statement query that includes the `sql_header` (similar approach to the one proposed for https://github.com/dbt-labs/dbt-core/issues/7390)
</issue>
<code>
[start of core/dbt/task/show.py]
1 import io
2 import threading
3 import time
4
5 from dbt.contracts.graph.nodes import SeedNode
6 from dbt.contracts.results import RunResult, RunStatus
7 from dbt.events.base_types import EventLevel
8 from dbt.events.functions import fire_event
9 from dbt.events.types import ShowNode, Note
10 from dbt.exceptions import DbtRuntimeError
11 from dbt.task.compile import CompileTask, CompileRunner
12 from dbt.task.seed import SeedRunner
13
14
15 class ShowRunner(CompileRunner):
16 def __init__(self, config, adapter, node, node_index, num_nodes):
17 super().__init__(config, adapter, node, node_index, num_nodes)
18 self.run_ephemeral_models = True
19
20 def execute(self, compiled_node, manifest):
21 start_time = time.time()
22
23 # Allow passing in -1 (or any negative number) to get all rows
24 limit = None if self.config.args.limit < 0 else self.config.args.limit
25
26 adapter_response, execute_result = self.adapter.execute(
27 compiled_node.compiled_code, fetch=True, limit=limit
28 )
29 end_time = time.time()
30
31 return RunResult(
32 node=compiled_node,
33 status=RunStatus.Success,
34 timing=[],
35 thread_id=threading.current_thread().name,
36 execution_time=end_time - start_time,
37 message=None,
38 adapter_response=adapter_response.to_dict(),
39 agate_table=execute_result,
40 failures=None,
41 )
42
43
44 class ShowTask(CompileTask):
45 def _runtime_initialize(self):
46 if not (self.args.select or getattr(self.args, "inline", None)):
47 raise DbtRuntimeError("Either --select or --inline must be passed to show")
48 super()._runtime_initialize()
49
50 def get_runner_type(self, node):
51 if isinstance(node, SeedNode):
52 return SeedRunner
53 else:
54 return ShowRunner
55
56 def task_end_messages(self, results):
57 is_inline = bool(getattr(self.args, "inline", None))
58
59 if is_inline:
60 matched_results = [result for result in results if result.node.name == "inline_query"]
61 else:
62 matched_results = []
63 for result in results:
64 if result.node.name in self.selection_arg[0]:
65 matched_results.append(result)
66 else:
67 fire_event(
68 Note(msg=f"Excluded node '{result.node.name}' from results"),
69 EventLevel.DEBUG,
70 )
71
72 for result in matched_results:
73 table = result.agate_table
74
75 # Hack to get Agate table output as string
76 output = io.StringIO()
77 if self.args.output == "json":
78 table.to_json(path=output)
79 else:
80 table.print_table(output=output, max_rows=None)
81
82 node_name = result.node.name
83
84 if hasattr(result.node, "version") and result.node.version:
85 node_name += f".v{result.node.version}"
86
87 fire_event(
88 ShowNode(
89 node_name=node_name,
90 preview=output.getvalue(),
91 is_inline=is_inline,
92 output_format=self.args.output,
93 unique_id=result.node.unique_id,
94 )
95 )
96
97 def _handle_result(self, result):
98 super()._handle_result(result)
99
100 if (
101 result.node.is_ephemeral_model
102 and type(self) is ShowTask
103 and (self.args.select or getattr(self.args, "inline", None))
104 ):
105 self.node_results.append(result)
106
[end of core/dbt/task/show.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/dbt/task/show.py b/core/dbt/task/show.py
--- a/core/dbt/task/show.py
+++ b/core/dbt/task/show.py
@@ -23,6 +23,11 @@
# Allow passing in -1 (or any negative number) to get all rows
limit = None if self.config.args.limit < 0 else self.config.args.limit
+ if "sql_header" in compiled_node.unrendered_config:
+ compiled_node.compiled_code = (
+ compiled_node.unrendered_config["sql_header"] + compiled_node.compiled_code
+ )
+
adapter_response, execute_result = self.adapter.execute(
compiled_node.compiled_code, fetch=True, limit=limit
)
|
{"golden_diff": "diff --git a/core/dbt/task/show.py b/core/dbt/task/show.py\n--- a/core/dbt/task/show.py\n+++ b/core/dbt/task/show.py\n@@ -23,6 +23,11 @@\n # Allow passing in -1 (or any negative number) to get all rows\n limit = None if self.config.args.limit < 0 else self.config.args.limit\n \n+ if \"sql_header\" in compiled_node.unrendered_config:\n+ compiled_node.compiled_code = (\n+ compiled_node.unrendered_config[\"sql_header\"] + compiled_node.compiled_code\n+ )\n+\n adapter_response, execute_result = self.adapter.execute(\n compiled_node.compiled_code, fetch=True, limit=limit\n )\n", "issue": "[CT-2440] `dbt show` throws `Database Error` for models with `sql_header` required for valid query \nIf a model is configured with a `sql_header` that is necessary to successfully run the query, `dbt show` currently fails because the [`compiled_node.compiled_code` does not include the sql_header SQL](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/task/show.py#L21).\r\n\r\nReproduction case (run against BQ, but not a BQ-specific issue)\r\n\r\n```\r\n-- models/my_model.sql\r\n{% call set_sql_header(config) %}\r\n CREATE TEMPORARY FUNCTION yes_no_to_boolean(answer STRING)\r\n RETURNS BOOLEAN AS (\r\n CASE\r\n WHEN LOWER(answer) = 'yes' THEN True\r\n WHEN LOWER(answer) = 'no' THEN False\r\n ELSE NULL\r\n END\r\n );\r\n{%- endcall %}\r\n\r\nselect yes_no_to_boolean(\"yes\") as column\r\n```\r\n\r\n```\r\ndbt show --select my_model --project-dir\r\n19:00:05 Found 1 model, 0 tests, 0 snapshots, 0 analyses, 551 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups\r\n19:00:05 \r\n19:00:06 Concurrency: 1 threads (target='dev')\r\n19:00:06 \r\n19:00:08 BigQuery adapter: https://console.cloud.google.com/bigquery?project=dbt-test-env&j=bq:US:9802c6ea-f771-4d46-9da3-bf6f521bd1da&page=queryresults\r\n19:00:08 Encountered an error:\r\nRuntime Error\r\n Database Error in model dummydep (models2/dummydep.sql)\r\n Function not found: yes_no_to_boolean at [8:8]\r\n```\r\n\r\n**Acceptance criteria:** \r\nInstead of directly executing `compiled_node.compiled_code`, template it into a multi-statement query that includes the `sql_header` (similar approach to the one proposed for https://github.com/dbt-labs/dbt-core/issues/7390)\r\n\n[CT-2440] `dbt show` throws `Database Error` for models with `sql_header` required for valid query \nIf a model is configured with a `sql_header` that is necessary to successfully run the query, `dbt show` currently fails because the [`compiled_node.compiled_code` does not include the sql_header SQL](https://github.com/dbt-labs/dbt-core/blob/main/core/dbt/task/show.py#L21).\r\n\r\nReproduction case (run against BQ, but not a BQ-specific issue)\r\n\r\n```\r\n-- models/my_model.sql\r\n{% call set_sql_header(config) %}\r\n CREATE TEMPORARY FUNCTION yes_no_to_boolean(answer STRING)\r\n RETURNS BOOLEAN AS (\r\n CASE\r\n WHEN LOWER(answer) = 'yes' THEN True\r\n WHEN LOWER(answer) = 'no' THEN False\r\n ELSE NULL\r\n END\r\n );\r\n{%- endcall %}\r\n\r\nselect yes_no_to_boolean(\"yes\") as column\r\n```\r\n\r\n```\r\ndbt show --select my_model --project-dir\r\n19:00:05 Found 1 model, 0 tests, 0 snapshots, 0 analyses, 551 macros, 0 operations, 0 seed files, 0 sources, 0 exposures, 0 metrics, 0 groups\r\n19:00:05 \r\n19:00:06 Concurrency: 1 threads (target='dev')\r\n19:00:06 \r\n19:00:08 BigQuery adapter: https://console.cloud.google.com/bigquery?project=dbt-test-env&j=bq:US:9802c6ea-f771-4d46-9da3-bf6f521bd1da&page=queryresults\r\n19:00:08 Encountered an error:\r\nRuntime Error\r\n Database Error in model dummydep (models2/dummydep.sql)\r\n Function not found: yes_no_to_boolean at [8:8]\r\n```\r\n\r\n**Acceptance criteria:** \r\nInstead of directly executing `compiled_node.compiled_code`, template it into a multi-statement query that includes the `sql_header` (similar approach to the one proposed for https://github.com/dbt-labs/dbt-core/issues/7390)\r\n\n", "before_files": [{"content": "import io\nimport threading\nimport time\n\nfrom dbt.contracts.graph.nodes import SeedNode\nfrom dbt.contracts.results import RunResult, RunStatus\nfrom dbt.events.base_types import EventLevel\nfrom dbt.events.functions import fire_event\nfrom dbt.events.types import ShowNode, Note\nfrom dbt.exceptions import DbtRuntimeError\nfrom dbt.task.compile import CompileTask, CompileRunner\nfrom dbt.task.seed import SeedRunner\n\n\nclass ShowRunner(CompileRunner):\n def __init__(self, config, adapter, node, node_index, num_nodes):\n super().__init__(config, adapter, node, node_index, num_nodes)\n self.run_ephemeral_models = True\n\n def execute(self, compiled_node, manifest):\n start_time = time.time()\n\n # Allow passing in -1 (or any negative number) to get all rows\n limit = None if self.config.args.limit < 0 else self.config.args.limit\n\n adapter_response, execute_result = self.adapter.execute(\n compiled_node.compiled_code, fetch=True, limit=limit\n )\n end_time = time.time()\n\n return RunResult(\n node=compiled_node,\n status=RunStatus.Success,\n timing=[],\n thread_id=threading.current_thread().name,\n execution_time=end_time - start_time,\n message=None,\n adapter_response=adapter_response.to_dict(),\n agate_table=execute_result,\n failures=None,\n )\n\n\nclass ShowTask(CompileTask):\n def _runtime_initialize(self):\n if not (self.args.select or getattr(self.args, \"inline\", None)):\n raise DbtRuntimeError(\"Either --select or --inline must be passed to show\")\n super()._runtime_initialize()\n\n def get_runner_type(self, node):\n if isinstance(node, SeedNode):\n return SeedRunner\n else:\n return ShowRunner\n\n def task_end_messages(self, results):\n is_inline = bool(getattr(self.args, \"inline\", None))\n\n if is_inline:\n matched_results = [result for result in results if result.node.name == \"inline_query\"]\n else:\n matched_results = []\n for result in results:\n if result.node.name in self.selection_arg[0]:\n matched_results.append(result)\n else:\n fire_event(\n Note(msg=f\"Excluded node '{result.node.name}' from results\"),\n EventLevel.DEBUG,\n )\n\n for result in matched_results:\n table = result.agate_table\n\n # Hack to get Agate table output as string\n output = io.StringIO()\n if self.args.output == \"json\":\n table.to_json(path=output)\n else:\n table.print_table(output=output, max_rows=None)\n\n node_name = result.node.name\n\n if hasattr(result.node, \"version\") and result.node.version:\n node_name += f\".v{result.node.version}\"\n\n fire_event(\n ShowNode(\n node_name=node_name,\n preview=output.getvalue(),\n is_inline=is_inline,\n output_format=self.args.output,\n unique_id=result.node.unique_id,\n )\n )\n\n def _handle_result(self, result):\n super()._handle_result(result)\n\n if (\n result.node.is_ephemeral_model\n and type(self) is ShowTask\n and (self.args.select or getattr(self.args, \"inline\", None))\n ):\n self.node_results.append(result)\n", "path": "core/dbt/task/show.py"}]}
| 2,452 | 159 |
gh_patches_debug_13045
|
rasdani/github-patches
|
git_diff
|
aws-cloudformation__cfn-lint-2891
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
E2520 false positive for CloudWatch Alarm with expression
### CloudFormation Lint Version
0.80.3
### What operating system are you using?
MacOS
### Describe the bug
A valid CloudWatch alarm that uses a metrics expression is resulting in an E2520 false positive. The alarm was defined in the CloudWatch console and exported via the "View Source | CloudFormation YAML" capability, so it's definitionally a valid CloudWatch alarm. To confirm that the bug isn't in the console, created a copy of the alarm using the generated definition and neither CloudFormation nor CloudWatch have any complaints.
### Expected behavior
E2520 should not be raised when `Dimensions` is present under `MetricStat.Metric`.
### Reproduction template
```yaml
AWSTemplateFormatVersion: "2010-09-09"
Description: AXIS ALB alarms
Parameters:
pLoadBalancerId:
Type: String
Default: app/private-api-proxy/ced2a65499b104e7
pAlarmPrefix:
Type: String
Default: MySampleApp
Resources:
rAlb5xxPercentage:
Type: AWS::CloudWatch::Alarm
Properties:
AlarmName: !Sub "${pAlarmPrefix}-ALB-5XX-Percentage"
AlarmDescription: >-
This alarm fires when the ALB is returning HTTP 5XX errors. It is
usually due to a misconfiguration of the ALB or not having any
associated targets.
See [runbook](https://google.com) for more details.
ActionsEnabled: true
OKActions: []
AlarmActions: []
InsufficientDataActions: []
Dimensions: []
EvaluationPeriods: 15
DatapointsToAlarm: 3
Threshold: 5
ComparisonOperator: GreaterThanOrEqualToThreshold
TreatMissingData: notBreaching
Metrics:
- Id: e1
Label: ALB 5XX Percentage
ReturnData: true
Expression: (m2/(m1+m2+m3+0.001))*100
- Id: m1
ReturnData: false
MetricStat:
Metric:
Namespace: AWS/ApplicationELB
MetricName: RequestCount
Dimensions:
- Name: LoadBalancer
Value: !Ref pLoadBalancerId
Period: 60
Stat: Sum
- Id: m2
ReturnData: false
MetricStat:
Metric:
Namespace: AWS/ApplicationELB
MetricName: HTTPCode_ELB_5XX_Count
Dimensions:
- Name: LoadBalancer
Value: !Ref pLoadBalancerId
Period: 60
Stat: Sum
- Id: m3
ReturnData: false
MetricStat:
Metric:
Namespace: AWS/ApplicationELB
MetricName: HTTPCode_ELB_4XX_Count
Dimensions:
- Name: LoadBalancer
Value: !Ref pLoadBalancerId
Period: 60
Stat: Sum
```
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/Exclusive.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import cfnlint.helpers
6 from cfnlint.data import AdditionalSpecs
7 from cfnlint.rules import CloudFormationLintRule, RuleMatch
8
9
10 class Exclusive(CloudFormationLintRule):
11 """Check Properties Resource Configuration"""
12
13 id = "E2520"
14 shortdesc = "Check Properties that are mutually exclusive"
15 description = (
16 "Making sure CloudFormation properties that are exclusive are not defined"
17 )
18 source_url = "https://github.com/aws-cloudformation/cfn-python-lint"
19 tags = ["resources"]
20
21 def __init__(self):
22 """Init"""
23 super().__init__()
24 exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, "Exclusive.json")
25 self.resource_types_specs = exclusivespec["ResourceTypes"]
26 self.property_types_specs = exclusivespec["PropertyTypes"]
27 for resource_type_spec in self.resource_types_specs:
28 self.resource_property_types.append(resource_type_spec)
29 for property_type_spec in self.property_types_specs:
30 self.resource_sub_property_types.append(property_type_spec)
31
32 def check(self, properties, exclusions, path, cfn):
33 """Check itself"""
34 matches = []
35 for p_value, p_path in properties.items_safe(path[:]):
36 for k, v in exclusions.items():
37 property_sets = cfn.get_object_without_conditions(p_value, [k] + v)
38 for property_set in property_sets:
39 obj = property_set["Object"].clean()
40 for prop in obj:
41 if prop == k:
42 for excl_property in exclusions[prop]:
43 if excl_property in obj:
44 if property_set["Scenario"] is None:
45 message = "Property {0} should NOT exist with {1} for {2}"
46 matches.append(
47 RuleMatch(
48 p_path + [prop],
49 message.format(
50 excl_property,
51 prop,
52 "/".join(map(str, p_path)),
53 ),
54 )
55 )
56 else:
57 scenario_text = " and ".join(
58 [
59 f'when condition "{k}" is {v}'
60 for (k, v) in property_set[
61 "Scenario"
62 ].items()
63 ]
64 )
65 message = "Property {0} should NOT exist with {1} {2} for {3}"
66 matches.append(
67 RuleMatch(
68 p_path + [prop],
69 message.format(
70 excl_property,
71 prop,
72 scenario_text,
73 "/".join(map(str, p_path)),
74 ),
75 )
76 )
77
78 return matches
79
80 def match_resource_sub_properties(self, properties, property_type, path, cfn):
81 """Match for sub properties"""
82 matches = []
83
84 exclusions = self.property_types_specs.get(property_type, {})
85 matches.extend(self.check(properties, exclusions, path, cfn))
86
87 return matches
88
89 def match_resource_properties(self, properties, resource_type, path, cfn):
90 """Check CloudFormation Properties"""
91 matches = []
92
93 exclusions = self.resource_types_specs.get(resource_type, {})
94 matches.extend(self.check(properties, exclusions, path, cfn))
95
96 return matches
97
[end of src/cfnlint/rules/resources/properties/Exclusive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py
--- a/src/cfnlint/rules/resources/properties/Exclusive.py
+++ b/src/cfnlint/rules/resources/properties/Exclusive.py
@@ -40,7 +40,7 @@
for prop in obj:
if prop == k:
for excl_property in exclusions[prop]:
- if excl_property in obj:
+ if obj.get(excl_property):
if property_set["Scenario"] is None:
message = "Property {0} should NOT exist with {1} for {2}"
matches.append(
|
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Exclusive.py b/src/cfnlint/rules/resources/properties/Exclusive.py\n--- a/src/cfnlint/rules/resources/properties/Exclusive.py\n+++ b/src/cfnlint/rules/resources/properties/Exclusive.py\n@@ -40,7 +40,7 @@\n for prop in obj:\n if prop == k:\n for excl_property in exclusions[prop]:\n- if excl_property in obj:\n+ if obj.get(excl_property):\n if property_set[\"Scenario\"] is None:\n message = \"Property {0} should NOT exist with {1} for {2}\"\n matches.append(\n", "issue": "E2520 false positive for CloudWatch Alarm with expression\n### CloudFormation Lint Version\r\n\r\n0.80.3\r\n\r\n### What operating system are you using?\r\n\r\nMacOS\r\n\r\n### Describe the bug\r\n\r\nA valid CloudWatch alarm that uses a metrics expression is resulting in an E2520 false positive. The alarm was defined in the CloudWatch console and exported via the \"View Source | CloudFormation YAML\" capability, so it's definitionally a valid CloudWatch alarm. To confirm that the bug isn't in the console, created a copy of the alarm using the generated definition and neither CloudFormation nor CloudWatch have any complaints.\r\n\r\n### Expected behavior\r\n\r\nE2520 should not be raised when `Dimensions` is present under `MetricStat.Metric`.\r\n\r\n### Reproduction template\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: \"2010-09-09\"\r\n\r\nDescription: AXIS ALB alarms\r\n\r\nParameters:\r\n pLoadBalancerId:\r\n Type: String\r\n Default: app/private-api-proxy/ced2a65499b104e7\r\n\r\n pAlarmPrefix:\r\n Type: String\r\n Default: MySampleApp\r\n\r\nResources:\r\n rAlb5xxPercentage:\r\n Type: AWS::CloudWatch::Alarm\r\n Properties:\r\n AlarmName: !Sub \"${pAlarmPrefix}-ALB-5XX-Percentage\"\r\n AlarmDescription: >-\r\n This alarm fires when the ALB is returning HTTP 5XX errors. It is\r\n usually due to a misconfiguration of the ALB or not having any\r\n associated targets.\r\n\r\n\r\n See [runbook](https://google.com) for more details.\r\n ActionsEnabled: true\r\n OKActions: []\r\n AlarmActions: []\r\n InsufficientDataActions: []\r\n Dimensions: []\r\n EvaluationPeriods: 15\r\n DatapointsToAlarm: 3\r\n Threshold: 5\r\n ComparisonOperator: GreaterThanOrEqualToThreshold\r\n TreatMissingData: notBreaching\r\n Metrics:\r\n - Id: e1\r\n Label: ALB 5XX Percentage\r\n ReturnData: true\r\n Expression: (m2/(m1+m2+m3+0.001))*100\r\n - Id: m1\r\n ReturnData: false\r\n MetricStat:\r\n Metric:\r\n Namespace: AWS/ApplicationELB\r\n MetricName: RequestCount\r\n Dimensions:\r\n - Name: LoadBalancer\r\n Value: !Ref pLoadBalancerId\r\n Period: 60\r\n Stat: Sum\r\n - Id: m2\r\n ReturnData: false\r\n MetricStat:\r\n Metric:\r\n Namespace: AWS/ApplicationELB\r\n MetricName: HTTPCode_ELB_5XX_Count\r\n Dimensions:\r\n - Name: LoadBalancer\r\n Value: !Ref pLoadBalancerId\r\n Period: 60\r\n Stat: Sum\r\n - Id: m3\r\n ReturnData: false\r\n MetricStat:\r\n Metric:\r\n Namespace: AWS/ApplicationELB\r\n MetricName: HTTPCode_ELB_4XX_Count\r\n Dimensions:\r\n - Name: LoadBalancer\r\n Value: !Ref pLoadBalancerId\r\n Period: 60\r\n Stat: Sum\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport cfnlint.helpers\nfrom cfnlint.data import AdditionalSpecs\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Exclusive(CloudFormationLintRule):\n \"\"\"Check Properties Resource Configuration\"\"\"\n\n id = \"E2520\"\n shortdesc = \"Check Properties that are mutually exclusive\"\n description = (\n \"Making sure CloudFormation properties that are exclusive are not defined\"\n )\n source_url = \"https://github.com/aws-cloudformation/cfn-python-lint\"\n tags = [\"resources\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n exclusivespec = cfnlint.helpers.load_resource(AdditionalSpecs, \"Exclusive.json\")\n self.resource_types_specs = exclusivespec[\"ResourceTypes\"]\n self.property_types_specs = exclusivespec[\"PropertyTypes\"]\n for resource_type_spec in self.resource_types_specs:\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in self.property_types_specs:\n self.resource_sub_property_types.append(property_type_spec)\n\n def check(self, properties, exclusions, path, cfn):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for k, v in exclusions.items():\n property_sets = cfn.get_object_without_conditions(p_value, [k] + v)\n for property_set in property_sets:\n obj = property_set[\"Object\"].clean()\n for prop in obj:\n if prop == k:\n for excl_property in exclusions[prop]:\n if excl_property in obj:\n if property_set[\"Scenario\"] is None:\n message = \"Property {0} should NOT exist with {1} for {2}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n else:\n scenario_text = \" and \".join(\n [\n f'when condition \"{k}\" is {v}'\n for (k, v) in property_set[\n \"Scenario\"\n ].items()\n ]\n )\n message = \"Property {0} should NOT exist with {1} {2} for {3}\"\n matches.append(\n RuleMatch(\n p_path + [prop],\n message.format(\n excl_property,\n prop,\n scenario_text,\n \"/\".join(map(str, p_path)),\n ),\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n exclusions = self.property_types_specs.get(property_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n exclusions = self.resource_types_specs.get(resource_type, {})\n matches.extend(self.check(properties, exclusions, path, cfn))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Exclusive.py"}]}
| 2,096 | 139 |
gh_patches_debug_44040
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7179
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add is_codespaces to telemetry environment context
Just like [we set `is_ci_environment` when the `CI` env var is set](https://github.com/meltano/meltano/blob/main/src/meltano/core/tracking/contexts/environment.py#L57), we should set `is_codespaces` (or something to that effect) when `CODESPACES` is set (see [docs](https://docs.github.com/en/codespaces/developing-in-codespaces/default-environment-variables-for-your-codespace)).
@tayloramurphy It'd be interesting to compare how far people get into the funnel with codespaces vs having to install locally. On the one hand, the barrier is lower so some people that click the button may be less motivated to make it to the end, but on the other hand, it should be easier to just quickly follow the steps and get to "wow". We may run into the issue that we currently consider any usage of less than 5min a bot, and that these codespaces projects may be treated as one-offs instead of being reused to form the company's official Meltano projects, so they'll never turn active. It'll be good to have the option of treating new codespaces projects differently from new local projects in our reporting.
</issue>
<code>
[start of src/meltano/core/tracking/contexts/environment.py]
1 """Environment context for the Snowplow tracker."""
2
3 from __future__ import annotations
4
5 import os
6 import platform
7 import uuid
8 from collections import defaultdict
9 from contextlib import suppress
10 from datetime import datetime
11 from pathlib import Path
12 from typing import Any
13 from warnings import warn
14
15 import psutil
16 from cached_property import cached_property
17 from snowplow_tracker import SelfDescribingJson
18 from structlog.stdlib import get_logger
19
20 import meltano
21 from meltano.core.tracking.schemas import EnvironmentContextSchema
22 from meltano.core.utils import hash_sha256, safe_hasattr
23
24 logger = get_logger(__name__)
25
26 # This file is only ever created in CI when building a release
27 release_marker_path = Path(__file__).parent / ".release_marker"
28
29
30 def _get_parent_context_uuid_str() -> str | None:
31 with suppress(KeyError):
32 uuid_str = os.environ["MELTANO_PARENT_CONTEXT_UUID"]
33 try:
34 return str(uuid.UUID(uuid_str))
35 except ValueError:
36 warn(
37 f"Invalid telemetry parent environment context UUID {uuid_str!r} "
38 "from $MELTANO_PARENT_CONTEXT_UUID - Meltano will continue as if "
39 "$MELTANO_PARENT_CONTEXT_UUID had not been set"
40 )
41 return None
42
43
44 class EnvironmentContext(SelfDescribingJson):
45 """Environment context for the Snowplow tracker."""
46
47 def __init__(self):
48 """Initialize the environment context."""
49 ci_markers = ("GITHUB_ACTIONS", "CI")
50 super().__init__(
51 EnvironmentContextSchema.url,
52 {
53 "context_uuid": str(uuid.uuid4()),
54 "parent_context_uuid": _get_parent_context_uuid_str(),
55 "meltano_version": meltano.__version__,
56 "is_dev_build": not release_marker_path.exists(),
57 "is_ci_environment": any(
58 # True if 'true', 'TRUE', 'True', or '1'
59 os.environ.get(marker, "").lower()[:1] in {"1", "t"}
60 for marker in ci_markers
61 ),
62 "python_version": platform.python_version(),
63 "python_implementation": platform.python_implementation(),
64 **self.system_info,
65 **self.process_info,
66 },
67 )
68
69 @cached_property
70 def system_info(self) -> dict[str, Any]:
71 """Get system information.
72
73 Returns:
74 A dictionary containing system information.
75 """
76 try:
77 freedesktop_data = platform.freedesktop_os_release()
78 except Exception:
79 freedesktop_data = defaultdict(type(None))
80
81 return {
82 "system_name": platform.system() or None,
83 "system_release": platform.release() or None,
84 "system_version": platform.version() or None,
85 "machine": platform.machine() or None,
86 "windows_edition": platform.win32_edition()
87 if safe_hasattr(platform, "win32_edition")
88 else None,
89 "freedesktop_id": freedesktop_data["ID"],
90 "freedesktop_id_like": freedesktop_data.get("ID_LIKE", None),
91 "freedesktop_version_id": freedesktop_data.get("VERSION_ID", None),
92 }
93
94 @staticmethod
95 def get_process_timestamp(process: psutil.Process) -> str:
96 """Obtain the creation time of a process as a ISO 8601 timestamp.
97
98 Args:
99 process: The process to obtain the creation time from.
100
101 Returns:
102 A ISO 8601 timestamp formatted string.
103 """
104 return f"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z"
105
106 @cached_property
107 def process_info(self) -> dict[str, Any]:
108 """Obtain the process information for the current process.
109
110 Returns:
111 A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc
112 """
113 process = psutil.Process()
114 with process.oneshot():
115 return {
116 "num_cpu_cores": psutil.cpu_count(),
117 "num_cpu_cores_available": self.num_available_cores,
118 "process_hierarchy": [
119 {
120 "process_name_hash": hash_sha256(proc.name()),
121 "process_creation_timestamp": self.get_process_timestamp(proc),
122 }
123 for proc in (process, *process.parents())
124 ],
125 }
126
127 @cached_property
128 def num_available_cores(self) -> int:
129 """Obtain the number of available CPU cores.
130
131 Uses sched_getaffinity where available, otherwise falls back to cpu_count().
132
133 Returns:
134 int: The number of available CPU cores.
135 """
136 if safe_hasattr(os, "sched_getaffinity"):
137 return len(os.sched_getaffinity(0))
138 return os.cpu_count()
139
140
141 environment_context = EnvironmentContext()
142
[end of src/meltano/core/tracking/contexts/environment.py]
[start of src/meltano/core/tracking/schemas.py]
1 """Meltano Iglu schemas metadata & utilities."""
2
3 from __future__ import annotations
4
5 from dataclasses import dataclass
6
7 DEFAULT_VENDOR = "com.meltano"
8
9
10 @dataclass
11 class IgluSchema:
12 """Dataclass to store the name, version, vendor, and URL for an Iglu schema."""
13
14 name: str
15 version: str
16 vendor: str = DEFAULT_VENDOR
17
18 @property
19 def url(self) -> str:
20 """Construct an iglu schema URL.
21
22 Returns:
23 The URL to the schema.
24 """
25 return f"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}"
26
27
28 CliContextSchema = IgluSchema("cli_context", "1-1-0")
29 CliEventSchema = IgluSchema("cli_event", "1-0-1")
30 BlockEventSchema = IgluSchema("block_event", "1-0-0")
31 EnvironmentContextSchema = IgluSchema("environment_context", "1-1-0")
32 ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
33 ExitEventSchema = IgluSchema("exit_event", "1-0-1")
34 PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
35 ProjectContextSchema = IgluSchema("project_context", "1-1-0")
36 TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
37
[end of src/meltano/core/tracking/schemas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py
--- a/src/meltano/core/tracking/contexts/environment.py
+++ b/src/meltano/core/tracking/contexts/environment.py
@@ -9,7 +9,7 @@
from contextlib import suppress
from datetime import datetime
from pathlib import Path
-from typing import Any
+from typing import Any, Iterable
from warnings import warn
import psutil
@@ -19,7 +19,7 @@
import meltano
from meltano.core.tracking.schemas import EnvironmentContextSchema
-from meltano.core.utils import hash_sha256, safe_hasattr
+from meltano.core.utils import get_boolean_env_var, hash_sha256, safe_hasattr, strtobool
logger = get_logger(__name__)
@@ -44,9 +44,21 @@
class EnvironmentContext(SelfDescribingJson):
"""Environment context for the Snowplow tracker."""
+ ci_markers = {"GITHUB_ACTIONS", "CI"}
+ notable_flag_env_vars = {"CODESPACES", *ci_markers}
+
+ @classmethod
+ def _notable_flag_env_vars(cls) -> Iterable[str]:
+ for env_var_name in cls.notable_flag_env_vars:
+ with suppress(KeyError): # Skip unset env vars
+ env_var_value = os.environ[env_var_name]
+ try:
+ yield env_var_name, strtobool(env_var_value)
+ except ValueError:
+ yield env_var_name, None
+
def __init__(self):
"""Initialize the environment context."""
- ci_markers = ("GITHUB_ACTIONS", "CI")
super().__init__(
EnvironmentContextSchema.url,
{
@@ -55,10 +67,9 @@
"meltano_version": meltano.__version__,
"is_dev_build": not release_marker_path.exists(),
"is_ci_environment": any(
- # True if 'true', 'TRUE', 'True', or '1'
- os.environ.get(marker, "").lower()[:1] in {"1", "t"}
- for marker in ci_markers
+ get_boolean_env_var(marker) for marker in self.ci_markers
),
+ "notable_flag_env_vars": dict(self._notable_flag_env_vars()),
"python_version": platform.python_version(),
"python_implementation": platform.python_implementation(),
**self.system_info,
@@ -108,7 +119,8 @@
"""Obtain the process information for the current process.
Returns:
- A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc
+ A dictionary containing the process information. Such as the hashed
+ process name, pid, core counts, etc
"""
process = psutil.Process()
with process.oneshot():
@@ -128,10 +140,11 @@
def num_available_cores(self) -> int:
"""Obtain the number of available CPU cores.
- Uses sched_getaffinity where available, otherwise falls back to cpu_count().
+ Uses `sched_getaffinity` where available, otherwise falls back to
+ `cpu_count`.
Returns:
- int: The number of available CPU cores.
+ The number of available CPU cores.
"""
if safe_hasattr(os, "sched_getaffinity"):
return len(os.sched_getaffinity(0))
diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py
--- a/src/meltano/core/tracking/schemas.py
+++ b/src/meltano/core/tracking/schemas.py
@@ -28,7 +28,7 @@
CliContextSchema = IgluSchema("cli_context", "1-1-0")
CliEventSchema = IgluSchema("cli_event", "1-0-1")
BlockEventSchema = IgluSchema("block_event", "1-0-0")
-EnvironmentContextSchema = IgluSchema("environment_context", "1-1-0")
+EnvironmentContextSchema = IgluSchema("environment_context", "1-2-0")
ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
ExitEventSchema = IgluSchema("exit_event", "1-0-1")
PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
|
{"golden_diff": "diff --git a/src/meltano/core/tracking/contexts/environment.py b/src/meltano/core/tracking/contexts/environment.py\n--- a/src/meltano/core/tracking/contexts/environment.py\n+++ b/src/meltano/core/tracking/contexts/environment.py\n@@ -9,7 +9,7 @@\n from contextlib import suppress\n from datetime import datetime\n from pathlib import Path\n-from typing import Any\n+from typing import Any, Iterable\n from warnings import warn\n \n import psutil\n@@ -19,7 +19,7 @@\n \n import meltano\n from meltano.core.tracking.schemas import EnvironmentContextSchema\n-from meltano.core.utils import hash_sha256, safe_hasattr\n+from meltano.core.utils import get_boolean_env_var, hash_sha256, safe_hasattr, strtobool\n \n logger = get_logger(__name__)\n \n@@ -44,9 +44,21 @@\n class EnvironmentContext(SelfDescribingJson):\n \"\"\"Environment context for the Snowplow tracker.\"\"\"\n \n+ ci_markers = {\"GITHUB_ACTIONS\", \"CI\"}\n+ notable_flag_env_vars = {\"CODESPACES\", *ci_markers}\n+\n+ @classmethod\n+ def _notable_flag_env_vars(cls) -> Iterable[str]:\n+ for env_var_name in cls.notable_flag_env_vars:\n+ with suppress(KeyError): # Skip unset env vars\n+ env_var_value = os.environ[env_var_name]\n+ try:\n+ yield env_var_name, strtobool(env_var_value)\n+ except ValueError:\n+ yield env_var_name, None\n+\n def __init__(self):\n \"\"\"Initialize the environment context.\"\"\"\n- ci_markers = (\"GITHUB_ACTIONS\", \"CI\")\n super().__init__(\n EnvironmentContextSchema.url,\n {\n@@ -55,10 +67,9 @@\n \"meltano_version\": meltano.__version__,\n \"is_dev_build\": not release_marker_path.exists(),\n \"is_ci_environment\": any(\n- # True if 'true', 'TRUE', 'True', or '1'\n- os.environ.get(marker, \"\").lower()[:1] in {\"1\", \"t\"}\n- for marker in ci_markers\n+ get_boolean_env_var(marker) for marker in self.ci_markers\n ),\n+ \"notable_flag_env_vars\": dict(self._notable_flag_env_vars()),\n \"python_version\": platform.python_version(),\n \"python_implementation\": platform.python_implementation(),\n **self.system_info,\n@@ -108,7 +119,8 @@\n \"\"\"Obtain the process information for the current process.\n \n Returns:\n- A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc\n+ A dictionary containing the process information. Such as the hashed\n+ process name, pid, core counts, etc\n \"\"\"\n process = psutil.Process()\n with process.oneshot():\n@@ -128,10 +140,11 @@\n def num_available_cores(self) -> int:\n \"\"\"Obtain the number of available CPU cores.\n \n- Uses sched_getaffinity where available, otherwise falls back to cpu_count().\n+ Uses `sched_getaffinity` where available, otherwise falls back to\n+ `cpu_count`.\n \n Returns:\n- int: The number of available CPU cores.\n+ The number of available CPU cores.\n \"\"\"\n if safe_hasattr(os, \"sched_getaffinity\"):\n return len(os.sched_getaffinity(0))\ndiff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py\n--- a/src/meltano/core/tracking/schemas.py\n+++ b/src/meltano/core/tracking/schemas.py\n@@ -28,7 +28,7 @@\n CliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\n CliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\n BlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\n-EnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-1-0\")\n+EnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-2-0\")\n ExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\n ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-1\")\n PluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\n", "issue": "Add is_codespaces to telemetry environment context\nJust like [we set `is_ci_environment` when the `CI` env var is set](https://github.com/meltano/meltano/blob/main/src/meltano/core/tracking/contexts/environment.py#L57), we should set `is_codespaces` (or something to that effect) when `CODESPACES` is set (see [docs](https://docs.github.com/en/codespaces/developing-in-codespaces/default-environment-variables-for-your-codespace)).\r\n\r\n@tayloramurphy It'd be interesting to compare how far people get into the funnel with codespaces vs having to install locally. On the one hand, the barrier is lower so some people that click the button may be less motivated to make it to the end, but on the other hand, it should be easier to just quickly follow the steps and get to \"wow\". We may run into the issue that we currently consider any usage of less than 5min a bot, and that these codespaces projects may be treated as one-offs instead of being reused to form the company's official Meltano projects, so they'll never turn active. It'll be good to have the option of treating new codespaces projects differently from new local projects in our reporting.\n", "before_files": [{"content": "\"\"\"Environment context for the Snowplow tracker.\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport platform\nimport uuid\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Any\nfrom warnings import warn\n\nimport psutil\nfrom cached_property import cached_property\nfrom snowplow_tracker import SelfDescribingJson\nfrom structlog.stdlib import get_logger\n\nimport meltano\nfrom meltano.core.tracking.schemas import EnvironmentContextSchema\nfrom meltano.core.utils import hash_sha256, safe_hasattr\n\nlogger = get_logger(__name__)\n\n# This file is only ever created in CI when building a release\nrelease_marker_path = Path(__file__).parent / \".release_marker\"\n\n\ndef _get_parent_context_uuid_str() -> str | None:\n with suppress(KeyError):\n uuid_str = os.environ[\"MELTANO_PARENT_CONTEXT_UUID\"]\n try:\n return str(uuid.UUID(uuid_str))\n except ValueError:\n warn(\n f\"Invalid telemetry parent environment context UUID {uuid_str!r} \"\n \"from $MELTANO_PARENT_CONTEXT_UUID - Meltano will continue as if \"\n \"$MELTANO_PARENT_CONTEXT_UUID had not been set\"\n )\n return None\n\n\nclass EnvironmentContext(SelfDescribingJson):\n \"\"\"Environment context for the Snowplow tracker.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the environment context.\"\"\"\n ci_markers = (\"GITHUB_ACTIONS\", \"CI\")\n super().__init__(\n EnvironmentContextSchema.url,\n {\n \"context_uuid\": str(uuid.uuid4()),\n \"parent_context_uuid\": _get_parent_context_uuid_str(),\n \"meltano_version\": meltano.__version__,\n \"is_dev_build\": not release_marker_path.exists(),\n \"is_ci_environment\": any(\n # True if 'true', 'TRUE', 'True', or '1'\n os.environ.get(marker, \"\").lower()[:1] in {\"1\", \"t\"}\n for marker in ci_markers\n ),\n \"python_version\": platform.python_version(),\n \"python_implementation\": platform.python_implementation(),\n **self.system_info,\n **self.process_info,\n },\n )\n\n @cached_property\n def system_info(self) -> dict[str, Any]:\n \"\"\"Get system information.\n\n Returns:\n A dictionary containing system information.\n \"\"\"\n try:\n freedesktop_data = platform.freedesktop_os_release()\n except Exception:\n freedesktop_data = defaultdict(type(None))\n\n return {\n \"system_name\": platform.system() or None,\n \"system_release\": platform.release() or None,\n \"system_version\": platform.version() or None,\n \"machine\": platform.machine() or None,\n \"windows_edition\": platform.win32_edition()\n if safe_hasattr(platform, \"win32_edition\")\n else None,\n \"freedesktop_id\": freedesktop_data[\"ID\"],\n \"freedesktop_id_like\": freedesktop_data.get(\"ID_LIKE\", None),\n \"freedesktop_version_id\": freedesktop_data.get(\"VERSION_ID\", None),\n }\n\n @staticmethod\n def get_process_timestamp(process: psutil.Process) -> str:\n \"\"\"Obtain the creation time of a process as a ISO 8601 timestamp.\n\n Args:\n process: The process to obtain the creation time from.\n\n Returns:\n A ISO 8601 timestamp formatted string.\n \"\"\"\n return f\"{datetime.utcfromtimestamp(process.create_time()).isoformat()}Z\"\n\n @cached_property\n def process_info(self) -> dict[str, Any]:\n \"\"\"Obtain the process information for the current process.\n\n Returns:\n A dictionary containing the process information. Such as the hashed process name, pid, core counts, etc\n \"\"\"\n process = psutil.Process()\n with process.oneshot():\n return {\n \"num_cpu_cores\": psutil.cpu_count(),\n \"num_cpu_cores_available\": self.num_available_cores,\n \"process_hierarchy\": [\n {\n \"process_name_hash\": hash_sha256(proc.name()),\n \"process_creation_timestamp\": self.get_process_timestamp(proc),\n }\n for proc in (process, *process.parents())\n ],\n }\n\n @cached_property\n def num_available_cores(self) -> int:\n \"\"\"Obtain the number of available CPU cores.\n\n Uses sched_getaffinity where available, otherwise falls back to cpu_count().\n\n Returns:\n int: The number of available CPU cores.\n \"\"\"\n if safe_hasattr(os, \"sched_getaffinity\"):\n return len(os.sched_getaffinity(0))\n return os.cpu_count()\n\n\nenvironment_context = EnvironmentContext()\n", "path": "src/meltano/core/tracking/contexts/environment.py"}, {"content": "\"\"\"Meltano Iglu schemas metadata & utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nDEFAULT_VENDOR = \"com.meltano\"\n\n\n@dataclass\nclass IgluSchema:\n \"\"\"Dataclass to store the name, version, vendor, and URL for an Iglu schema.\"\"\"\n\n name: str\n version: str\n vendor: str = DEFAULT_VENDOR\n\n @property\n def url(self) -> str:\n \"\"\"Construct an iglu schema URL.\n\n Returns:\n The URL to the schema.\n \"\"\"\n return f\"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}\"\n\n\nCliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\nCliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\nBlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\nEnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-1-0\")\nExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\nExitEventSchema = IgluSchema(\"exit_event\", \"1-0-1\")\nPluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\nProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\nTelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "path": "src/meltano/core/tracking/schemas.py"}]}
| 2,549 | 974 |
gh_patches_debug_13303
|
rasdani/github-patches
|
git_diff
|
tornadoweb__tornado-2395
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError when closing IOLoop
This started showing up in Dask's test suite recently:
```python-traceback
distributed/utils_test.py:144: in pristine_loop
loop.close(all_fds=True)
../../Software/anaconda/envs/test-environment/lib/python3.6/site-packages/tornado/platform/asyncio.py:223: in close
super(AsyncIOLoop, self).close(all_fds=all_fds)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <tornado.platform.asyncio.AsyncIOLoop object at 0x7f5751d46eb8>, all_fds = True
def close(self, all_fds=False):
self.closing = True
for fd in list(self.handlers):
fileobj, handler_func = self.handlers[fd]
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
self.asyncio_loop.close()
> del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
E KeyError: <_UnixSelectorEventLoop running=False closed=True debug=False>
```
This is likely due to some change in upstream dependencies. It looks like Tornado hasn't had a release during the time when this arose, so it's likely something else. Still, I thought I'd raise the issue.
</issue>
<code>
[start of tornado/platform/asyncio.py]
1 """Bridges between the `asyncio` module and Tornado IOLoop.
2
3 .. versionadded:: 3.2
4
5 This module integrates Tornado with the ``asyncio`` module introduced
6 in Python 3.4. This makes it possible to combine the two libraries on
7 the same event loop.
8
9 .. deprecated:: 5.0
10
11 While the code in this module is still used, it is now enabled
12 automatically when `asyncio` is available, so applications should
13 no longer need to refer to this module directly.
14
15 .. note::
16
17 Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of
18 methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on
19 Windows. Use the `~asyncio.SelectorEventLoop` instead.
20 """
21
22 from __future__ import absolute_import, division, print_function
23 import functools
24
25 from tornado.gen import convert_yielded
26 from tornado.ioloop import IOLoop
27 from tornado import stack_context
28
29 import asyncio
30
31
32 class BaseAsyncIOLoop(IOLoop):
33 def initialize(self, asyncio_loop, **kwargs):
34 self.asyncio_loop = asyncio_loop
35 # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)
36 self.handlers = {}
37 # Set of fds listening for reads/writes
38 self.readers = set()
39 self.writers = set()
40 self.closing = False
41 # If an asyncio loop was closed through an asyncio interface
42 # instead of IOLoop.close(), we'd never hear about it and may
43 # have left a dangling reference in our map. In case an
44 # application (or, more likely, a test suite) creates and
45 # destroys a lot of event loops in this way, check here to
46 # ensure that we don't have a lot of dead loops building up in
47 # the map.
48 #
49 # TODO(bdarnell): consider making self.asyncio_loop a weakref
50 # for AsyncIOMainLoop and make _ioloop_for_asyncio a
51 # WeakKeyDictionary.
52 for loop in list(IOLoop._ioloop_for_asyncio):
53 if loop.is_closed():
54 del IOLoop._ioloop_for_asyncio[loop]
55 IOLoop._ioloop_for_asyncio[asyncio_loop] = self
56 super(BaseAsyncIOLoop, self).initialize(**kwargs)
57
58 def close(self, all_fds=False):
59 self.closing = True
60 for fd in list(self.handlers):
61 fileobj, handler_func = self.handlers[fd]
62 self.remove_handler(fd)
63 if all_fds:
64 self.close_fd(fileobj)
65 self.asyncio_loop.close()
66 del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
67
68 def add_handler(self, fd, handler, events):
69 fd, fileobj = self.split_fd(fd)
70 if fd in self.handlers:
71 raise ValueError("fd %s added twice" % fd)
72 self.handlers[fd] = (fileobj, stack_context.wrap(handler))
73 if events & IOLoop.READ:
74 self.asyncio_loop.add_reader(
75 fd, self._handle_events, fd, IOLoop.READ)
76 self.readers.add(fd)
77 if events & IOLoop.WRITE:
78 self.asyncio_loop.add_writer(
79 fd, self._handle_events, fd, IOLoop.WRITE)
80 self.writers.add(fd)
81
82 def update_handler(self, fd, events):
83 fd, fileobj = self.split_fd(fd)
84 if events & IOLoop.READ:
85 if fd not in self.readers:
86 self.asyncio_loop.add_reader(
87 fd, self._handle_events, fd, IOLoop.READ)
88 self.readers.add(fd)
89 else:
90 if fd in self.readers:
91 self.asyncio_loop.remove_reader(fd)
92 self.readers.remove(fd)
93 if events & IOLoop.WRITE:
94 if fd not in self.writers:
95 self.asyncio_loop.add_writer(
96 fd, self._handle_events, fd, IOLoop.WRITE)
97 self.writers.add(fd)
98 else:
99 if fd in self.writers:
100 self.asyncio_loop.remove_writer(fd)
101 self.writers.remove(fd)
102
103 def remove_handler(self, fd):
104 fd, fileobj = self.split_fd(fd)
105 if fd not in self.handlers:
106 return
107 if fd in self.readers:
108 self.asyncio_loop.remove_reader(fd)
109 self.readers.remove(fd)
110 if fd in self.writers:
111 self.asyncio_loop.remove_writer(fd)
112 self.writers.remove(fd)
113 del self.handlers[fd]
114
115 def _handle_events(self, fd, events):
116 fileobj, handler_func = self.handlers[fd]
117 handler_func(fileobj, events)
118
119 def start(self):
120 try:
121 old_loop = asyncio.get_event_loop()
122 except (RuntimeError, AssertionError):
123 old_loop = None
124 try:
125 self._setup_logging()
126 asyncio.set_event_loop(self.asyncio_loop)
127 self.asyncio_loop.run_forever()
128 finally:
129 asyncio.set_event_loop(old_loop)
130
131 def stop(self):
132 self.asyncio_loop.stop()
133
134 def call_at(self, when, callback, *args, **kwargs):
135 # asyncio.call_at supports *args but not **kwargs, so bind them here.
136 # We do not synchronize self.time and asyncio_loop.time, so
137 # convert from absolute to relative.
138 return self.asyncio_loop.call_later(
139 max(0, when - self.time()), self._run_callback,
140 functools.partial(stack_context.wrap(callback), *args, **kwargs))
141
142 def remove_timeout(self, timeout):
143 timeout.cancel()
144
145 def add_callback(self, callback, *args, **kwargs):
146 try:
147 self.asyncio_loop.call_soon_threadsafe(
148 self._run_callback,
149 functools.partial(stack_context.wrap(callback), *args, **kwargs))
150 except RuntimeError:
151 # "Event loop is closed". Swallow the exception for
152 # consistency with PollIOLoop (and logical consistency
153 # with the fact that we can't guarantee that an
154 # add_callback that completes without error will
155 # eventually execute).
156 pass
157
158 add_callback_from_signal = add_callback
159
160 def run_in_executor(self, executor, func, *args):
161 return self.asyncio_loop.run_in_executor(executor, func, *args)
162
163 def set_default_executor(self, executor):
164 return self.asyncio_loop.set_default_executor(executor)
165
166
167 class AsyncIOMainLoop(BaseAsyncIOLoop):
168 """``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the
169 current ``asyncio`` event loop (i.e. the one returned by
170 ``asyncio.get_event_loop()``).
171
172 .. deprecated:: 5.0
173
174 Now used automatically when appropriate; it is no longer necessary
175 to refer to this class directly.
176
177 .. versionchanged:: 5.0
178
179 Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.
180 """
181 def initialize(self, **kwargs):
182 super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs)
183
184 def make_current(self):
185 # AsyncIOMainLoop already refers to the current asyncio loop so
186 # nothing to do here.
187 pass
188
189
190 class AsyncIOLoop(BaseAsyncIOLoop):
191 """``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.
192 This class follows the usual Tornado semantics for creating new
193 ``IOLoops``; these loops are not necessarily related to the
194 ``asyncio`` default event loop.
195
196 Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object
197 can be accessed with the ``asyncio_loop`` attribute.
198
199 .. versionchanged:: 5.0
200
201 When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets
202 the current `asyncio` event loop.
203
204 .. deprecated:: 5.0
205
206 Now used automatically when appropriate; it is no longer necessary
207 to refer to this class directly.
208 """
209 def initialize(self, **kwargs):
210 self.is_current = False
211 loop = asyncio.new_event_loop()
212 try:
213 super(AsyncIOLoop, self).initialize(loop, **kwargs)
214 except Exception:
215 # If initialize() does not succeed (taking ownership of the loop),
216 # we have to close it.
217 loop.close()
218 raise
219
220 def close(self, all_fds=False):
221 if self.is_current:
222 self.clear_current()
223 super(AsyncIOLoop, self).close(all_fds=all_fds)
224
225 def make_current(self):
226 if not self.is_current:
227 try:
228 self.old_asyncio = asyncio.get_event_loop()
229 except (RuntimeError, AssertionError):
230 self.old_asyncio = None
231 self.is_current = True
232 asyncio.set_event_loop(self.asyncio_loop)
233
234 def _clear_current_hook(self):
235 if self.is_current:
236 asyncio.set_event_loop(self.old_asyncio)
237 self.is_current = False
238
239
240 def to_tornado_future(asyncio_future):
241 """Convert an `asyncio.Future` to a `tornado.concurrent.Future`.
242
243 .. versionadded:: 4.1
244
245 .. deprecated:: 5.0
246 Tornado ``Futures`` have been merged with `asyncio.Future`,
247 so this method is now a no-op.
248 """
249 return asyncio_future
250
251
252 def to_asyncio_future(tornado_future):
253 """Convert a Tornado yieldable object to an `asyncio.Future`.
254
255 .. versionadded:: 4.1
256
257 .. versionchanged:: 4.3
258 Now accepts any yieldable object, not just
259 `tornado.concurrent.Future`.
260
261 .. deprecated:: 5.0
262 Tornado ``Futures`` have been merged with `asyncio.Future`,
263 so this method is now equivalent to `tornado.gen.convert_yielded`.
264 """
265 return convert_yielded(tornado_future)
266
267
268 class AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy):
269 """Event loop policy that allows loop creation on any thread.
270
271 The default `asyncio` event loop policy only automatically creates
272 event loops in the main threads. Other threads must create event
273 loops explicitly or `asyncio.get_event_loop` (and therefore
274 `.IOLoop.current`) will fail. Installing this policy allows event
275 loops to be created automatically on any thread, matching the
276 behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
277
278 Usage::
279
280 asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
281
282 .. versionadded:: 5.0
283
284 """
285 def get_event_loop(self):
286 try:
287 return super().get_event_loop()
288 except (RuntimeError, AssertionError):
289 # This was an AssertionError in python 3.4.2 (which ships with debian jessie)
290 # and changed to a RuntimeError in 3.4.3.
291 # "There is no current event loop in thread %r"
292 loop = self.new_event_loop()
293 self.set_event_loop(loop)
294 return loop
295
[end of tornado/platform/asyncio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/tornado/platform/asyncio.py b/tornado/platform/asyncio.py
--- a/tornado/platform/asyncio.py
+++ b/tornado/platform/asyncio.py
@@ -62,8 +62,13 @@
self.remove_handler(fd)
if all_fds:
self.close_fd(fileobj)
- self.asyncio_loop.close()
+ # Remove the mapping before closing the asyncio loop. If this
+ # happened in the other order, we could race against another
+ # initialize() call which would see the closed asyncio loop,
+ # assume it was closed from the asyncio side, and do this
+ # cleanup for us, leading to a KeyError.
del IOLoop._ioloop_for_asyncio[self.asyncio_loop]
+ self.asyncio_loop.close()
def add_handler(self, fd, handler, events):
fd, fileobj = self.split_fd(fd)
|
{"golden_diff": "diff --git a/tornado/platform/asyncio.py b/tornado/platform/asyncio.py\n--- a/tornado/platform/asyncio.py\n+++ b/tornado/platform/asyncio.py\n@@ -62,8 +62,13 @@\n self.remove_handler(fd)\n if all_fds:\n self.close_fd(fileobj)\n- self.asyncio_loop.close()\n+ # Remove the mapping before closing the asyncio loop. If this\n+ # happened in the other order, we could race against another\n+ # initialize() call which would see the closed asyncio loop,\n+ # assume it was closed from the asyncio side, and do this\n+ # cleanup for us, leading to a KeyError.\n del IOLoop._ioloop_for_asyncio[self.asyncio_loop]\n+ self.asyncio_loop.close()\n \n def add_handler(self, fd, handler, events):\n fd, fileobj = self.split_fd(fd)\n", "issue": "KeyError when closing IOLoop\nThis started showing up in Dask's test suite recently:\r\n\r\n```python-traceback\r\ndistributed/utils_test.py:144: in pristine_loop\r\n loop.close(all_fds=True)\r\n../../Software/anaconda/envs/test-environment/lib/python3.6/site-packages/tornado/platform/asyncio.py:223: in close\r\n super(AsyncIOLoop, self).close(all_fds=all_fds)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\nself = <tornado.platform.asyncio.AsyncIOLoop object at 0x7f5751d46eb8>, all_fds = True\r\n\r\n def close(self, all_fds=False):\r\n self.closing = True\r\n for fd in list(self.handlers):\r\n fileobj, handler_func = self.handlers[fd]\r\n self.remove_handler(fd)\r\n if all_fds:\r\n self.close_fd(fileobj)\r\n self.asyncio_loop.close()\r\n> del IOLoop._ioloop_for_asyncio[self.asyncio_loop]\r\nE KeyError: <_UnixSelectorEventLoop running=False closed=True debug=False>\r\n```\r\n\r\nThis is likely due to some change in upstream dependencies. It looks like Tornado hasn't had a release during the time when this arose, so it's likely something else. Still, I thought I'd raise the issue.\n", "before_files": [{"content": "\"\"\"Bridges between the `asyncio` module and Tornado IOLoop.\n\n.. versionadded:: 3.2\n\nThis module integrates Tornado with the ``asyncio`` module introduced\nin Python 3.4. This makes it possible to combine the two libraries on\nthe same event loop.\n\n.. deprecated:: 5.0\n\n While the code in this module is still used, it is now enabled\n automatically when `asyncio` is available, so applications should\n no longer need to refer to this module directly.\n\n.. note::\n\n Tornado requires the `~asyncio.AbstractEventLoop.add_reader` family of\n methods, so it is not compatible with the `~asyncio.ProactorEventLoop` on\n Windows. Use the `~asyncio.SelectorEventLoop` instead.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nimport functools\n\nfrom tornado.gen import convert_yielded\nfrom tornado.ioloop import IOLoop\nfrom tornado import stack_context\n\nimport asyncio\n\n\nclass BaseAsyncIOLoop(IOLoop):\n def initialize(self, asyncio_loop, **kwargs):\n self.asyncio_loop = asyncio_loop\n # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler)\n self.handlers = {}\n # Set of fds listening for reads/writes\n self.readers = set()\n self.writers = set()\n self.closing = False\n # If an asyncio loop was closed through an asyncio interface\n # instead of IOLoop.close(), we'd never hear about it and may\n # have left a dangling reference in our map. In case an\n # application (or, more likely, a test suite) creates and\n # destroys a lot of event loops in this way, check here to\n # ensure that we don't have a lot of dead loops building up in\n # the map.\n #\n # TODO(bdarnell): consider making self.asyncio_loop a weakref\n # for AsyncIOMainLoop and make _ioloop_for_asyncio a\n # WeakKeyDictionary.\n for loop in list(IOLoop._ioloop_for_asyncio):\n if loop.is_closed():\n del IOLoop._ioloop_for_asyncio[loop]\n IOLoop._ioloop_for_asyncio[asyncio_loop] = self\n super(BaseAsyncIOLoop, self).initialize(**kwargs)\n\n def close(self, all_fds=False):\n self.closing = True\n for fd in list(self.handlers):\n fileobj, handler_func = self.handlers[fd]\n self.remove_handler(fd)\n if all_fds:\n self.close_fd(fileobj)\n self.asyncio_loop.close()\n del IOLoop._ioloop_for_asyncio[self.asyncio_loop]\n\n def add_handler(self, fd, handler, events):\n fd, fileobj = self.split_fd(fd)\n if fd in self.handlers:\n raise ValueError(\"fd %s added twice\" % fd)\n self.handlers[fd] = (fileobj, stack_context.wrap(handler))\n if events & IOLoop.READ:\n self.asyncio_loop.add_reader(\n fd, self._handle_events, fd, IOLoop.READ)\n self.readers.add(fd)\n if events & IOLoop.WRITE:\n self.asyncio_loop.add_writer(\n fd, self._handle_events, fd, IOLoop.WRITE)\n self.writers.add(fd)\n\n def update_handler(self, fd, events):\n fd, fileobj = self.split_fd(fd)\n if events & IOLoop.READ:\n if fd not in self.readers:\n self.asyncio_loop.add_reader(\n fd, self._handle_events, fd, IOLoop.READ)\n self.readers.add(fd)\n else:\n if fd in self.readers:\n self.asyncio_loop.remove_reader(fd)\n self.readers.remove(fd)\n if events & IOLoop.WRITE:\n if fd not in self.writers:\n self.asyncio_loop.add_writer(\n fd, self._handle_events, fd, IOLoop.WRITE)\n self.writers.add(fd)\n else:\n if fd in self.writers:\n self.asyncio_loop.remove_writer(fd)\n self.writers.remove(fd)\n\n def remove_handler(self, fd):\n fd, fileobj = self.split_fd(fd)\n if fd not in self.handlers:\n return\n if fd in self.readers:\n self.asyncio_loop.remove_reader(fd)\n self.readers.remove(fd)\n if fd in self.writers:\n self.asyncio_loop.remove_writer(fd)\n self.writers.remove(fd)\n del self.handlers[fd]\n\n def _handle_events(self, fd, events):\n fileobj, handler_func = self.handlers[fd]\n handler_func(fileobj, events)\n\n def start(self):\n try:\n old_loop = asyncio.get_event_loop()\n except (RuntimeError, AssertionError):\n old_loop = None\n try:\n self._setup_logging()\n asyncio.set_event_loop(self.asyncio_loop)\n self.asyncio_loop.run_forever()\n finally:\n asyncio.set_event_loop(old_loop)\n\n def stop(self):\n self.asyncio_loop.stop()\n\n def call_at(self, when, callback, *args, **kwargs):\n # asyncio.call_at supports *args but not **kwargs, so bind them here.\n # We do not synchronize self.time and asyncio_loop.time, so\n # convert from absolute to relative.\n return self.asyncio_loop.call_later(\n max(0, when - self.time()), self._run_callback,\n functools.partial(stack_context.wrap(callback), *args, **kwargs))\n\n def remove_timeout(self, timeout):\n timeout.cancel()\n\n def add_callback(self, callback, *args, **kwargs):\n try:\n self.asyncio_loop.call_soon_threadsafe(\n self._run_callback,\n functools.partial(stack_context.wrap(callback), *args, **kwargs))\n except RuntimeError:\n # \"Event loop is closed\". Swallow the exception for\n # consistency with PollIOLoop (and logical consistency\n # with the fact that we can't guarantee that an\n # add_callback that completes without error will\n # eventually execute).\n pass\n\n add_callback_from_signal = add_callback\n\n def run_in_executor(self, executor, func, *args):\n return self.asyncio_loop.run_in_executor(executor, func, *args)\n\n def set_default_executor(self, executor):\n return self.asyncio_loop.set_default_executor(executor)\n\n\nclass AsyncIOMainLoop(BaseAsyncIOLoop):\n \"\"\"``AsyncIOMainLoop`` creates an `.IOLoop` that corresponds to the\n current ``asyncio`` event loop (i.e. the one returned by\n ``asyncio.get_event_loop()``).\n\n .. deprecated:: 5.0\n\n Now used automatically when appropriate; it is no longer necessary\n to refer to this class directly.\n\n .. versionchanged:: 5.0\n\n Closing an `AsyncIOMainLoop` now closes the underlying asyncio loop.\n \"\"\"\n def initialize(self, **kwargs):\n super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), **kwargs)\n\n def make_current(self):\n # AsyncIOMainLoop already refers to the current asyncio loop so\n # nothing to do here.\n pass\n\n\nclass AsyncIOLoop(BaseAsyncIOLoop):\n \"\"\"``AsyncIOLoop`` is an `.IOLoop` that runs on an ``asyncio`` event loop.\n This class follows the usual Tornado semantics for creating new\n ``IOLoops``; these loops are not necessarily related to the\n ``asyncio`` default event loop.\n\n Each ``AsyncIOLoop`` creates a new ``asyncio.EventLoop``; this object\n can be accessed with the ``asyncio_loop`` attribute.\n\n .. versionchanged:: 5.0\n\n When an ``AsyncIOLoop`` becomes the current `.IOLoop`, it also sets\n the current `asyncio` event loop.\n\n .. deprecated:: 5.0\n\n Now used automatically when appropriate; it is no longer necessary\n to refer to this class directly.\n \"\"\"\n def initialize(self, **kwargs):\n self.is_current = False\n loop = asyncio.new_event_loop()\n try:\n super(AsyncIOLoop, self).initialize(loop, **kwargs)\n except Exception:\n # If initialize() does not succeed (taking ownership of the loop),\n # we have to close it.\n loop.close()\n raise\n\n def close(self, all_fds=False):\n if self.is_current:\n self.clear_current()\n super(AsyncIOLoop, self).close(all_fds=all_fds)\n\n def make_current(self):\n if not self.is_current:\n try:\n self.old_asyncio = asyncio.get_event_loop()\n except (RuntimeError, AssertionError):\n self.old_asyncio = None\n self.is_current = True\n asyncio.set_event_loop(self.asyncio_loop)\n\n def _clear_current_hook(self):\n if self.is_current:\n asyncio.set_event_loop(self.old_asyncio)\n self.is_current = False\n\n\ndef to_tornado_future(asyncio_future):\n \"\"\"Convert an `asyncio.Future` to a `tornado.concurrent.Future`.\n\n .. versionadded:: 4.1\n\n .. deprecated:: 5.0\n Tornado ``Futures`` have been merged with `asyncio.Future`,\n so this method is now a no-op.\n \"\"\"\n return asyncio_future\n\n\ndef to_asyncio_future(tornado_future):\n \"\"\"Convert a Tornado yieldable object to an `asyncio.Future`.\n\n .. versionadded:: 4.1\n\n .. versionchanged:: 4.3\n Now accepts any yieldable object, not just\n `tornado.concurrent.Future`.\n\n .. deprecated:: 5.0\n Tornado ``Futures`` have been merged with `asyncio.Future`,\n so this method is now equivalent to `tornado.gen.convert_yielded`.\n \"\"\"\n return convert_yielded(tornado_future)\n\n\nclass AnyThreadEventLoopPolicy(asyncio.DefaultEventLoopPolicy):\n \"\"\"Event loop policy that allows loop creation on any thread.\n\n The default `asyncio` event loop policy only automatically creates\n event loops in the main threads. Other threads must create event\n loops explicitly or `asyncio.get_event_loop` (and therefore\n `.IOLoop.current`) will fail. Installing this policy allows event\n loops to be created automatically on any thread, matching the\n behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).\n\n Usage::\n\n asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())\n\n .. versionadded:: 5.0\n\n \"\"\"\n def get_event_loop(self):\n try:\n return super().get_event_loop()\n except (RuntimeError, AssertionError):\n # This was an AssertionError in python 3.4.2 (which ships with debian jessie)\n # and changed to a RuntimeError in 3.4.3.\n # \"There is no current event loop in thread %r\"\n loop = self.new_event_loop()\n self.set_event_loop(loop)\n return loop\n", "path": "tornado/platform/asyncio.py"}]}
| 4,089 | 200 |
gh_patches_debug_252
|
rasdani/github-patches
|
git_diff
|
google-deepmind__dm-haiku-48
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jax version upgrade (AttributeError: CallPrimitive)
Using the current version of master 66f9c69 of Haiku, I am getting the following error on Colab
```
AttributeError Traceback (most recent call last)
<ipython-input-3-3a9e6adbfff5> in <module>()
----> 1 import haiku as hk
/usr/local/lib/python3.6/dist-packages/haiku/__init__.py in <module>()
17
18 from haiku import data_structures
---> 19 from haiku import experimental
20 from haiku import initializers
21 from haiku import nets
/usr/local/lib/python3.6/dist-packages/haiku/experimental.py in <module>()
22 from haiku._src.base import custom_getter
23 from haiku._src.base import ParamContext
---> 24 from haiku._src.dot import to_dot
25 from haiku._src.lift import lift
26 from haiku._src.module import profiler_name_scopes
/usr/local/lib/python3.6/dist-packages/haiku/_src/dot.py in <module>()
23
24 from haiku._src import data_structures
---> 25 from haiku._src import module
26 from haiku._src import utils
27 import jax
/usr/local/lib/python3.6/dist-packages/haiku/_src/module.py in <module>()
26 from haiku._src import base
27 from haiku._src import data_structures
---> 28 from haiku._src import named_call
29 from haiku._src import utils
30 import jax.numpy as jnp
/usr/local/lib/python3.6/dist-packages/haiku/_src/named_call.py in <module>()
29
30 # Registering named call as a primitive
---> 31 named_call_p = core.CallPrimitive('named_call')
32 # named_call is implemented as a plain core.call and only diverges
33 # under compilation (see named_call_translation_rule)
AttributeError: module 'jax.core' has no attribute 'CallPrimitive'
```
I believe that's because Haiku now requires `jax>=0.1.71`, while the version by default on Colab is `jax==0.1.69`. `CallPrimitive` was introduced in jax 0.1.71.
https://github.com/google/jax/blob/1545a29e6d69a7b3c7fdf9a49b38004759a9fbfa/jax/core.py#L1106-L1115
To reproduce (inside a Colab):
```python
import jax
print(jax.__version__) # 0.1.69
!pip install -q git+https://github.com/deepmind/dm-haiku
import haiku as hk
```
Run `!pip install -q --upgrade jax jaxlib` first in your Colab to fix this issue.
</issue>
<code>
[start of setup.py]
1 # Lint as: python3
2 # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 # ==============================================================================
16 """Setup for pip package."""
17
18 from setuptools import find_namespace_packages
19 from setuptools import setup
20
21
22 def _get_version():
23 with open('haiku/__init__.py') as fp:
24 for line in fp:
25 if line.startswith('__version__'):
26 g = {}
27 exec(line, g) # pylint: disable=exec-used
28 return g['__version__']
29 raise ValueError('`__version__` not defined in `haiku/__init__.py`')
30
31
32 def _parse_requirements(requirements_txt_path):
33 with open(requirements_txt_path) as fp:
34 return fp.read().splitlines()
35
36
37 _VERSION = _get_version()
38
39 EXTRA_PACKAGES = {
40 'jax': ['jax>=0.1.55'],
41 'jaxlib': ['jaxlib>=0.1.37'],
42 }
43
44 setup(
45 name='dm-haiku',
46 version=_VERSION,
47 url='https://github.com/deepmind/dm-haiku',
48 license='Apache 2.0',
49 author='DeepMind',
50 description='Haiku is a library for building neural networks in JAX.',
51 long_description=open('README.md').read(),
52 long_description_content_type='text/markdown',
53 author_email='[email protected]',
54 # Contained modules and scripts.
55 packages=find_namespace_packages(exclude=['*_test.py']),
56 install_requires=_parse_requirements('requirements.txt'),
57 extras_require=EXTRA_PACKAGES,
58 tests_require=_parse_requirements('requirements-test.txt'),
59 requires_python='>=3.6',
60 include_package_data=True,
61 zip_safe=False,
62 # PyPI package information.
63 classifiers=[
64 'Development Status :: 4 - Beta',
65 'Intended Audience :: Developers',
66 'Intended Audience :: Education',
67 'Intended Audience :: Science/Research',
68 'License :: OSI Approved :: Apache Software License',
69 'Programming Language :: Python :: 3',
70 'Programming Language :: Python :: 3.6',
71 'Programming Language :: Python :: 3.7',
72 'Topic :: Scientific/Engineering :: Mathematics',
73 'Topic :: Software Development :: Libraries :: Python Modules',
74 'Topic :: Software Development :: Libraries',
75 ],
76 )
77
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,8 +37,8 @@
_VERSION = _get_version()
EXTRA_PACKAGES = {
- 'jax': ['jax>=0.1.55'],
- 'jaxlib': ['jaxlib>=0.1.37'],
+ 'jax': ['jax>=0.1.71'],
+ 'jaxlib': ['jaxlib>=0.1.49'],
}
setup(
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,8 +37,8 @@\n _VERSION = _get_version()\n \n EXTRA_PACKAGES = {\n- 'jax': ['jax>=0.1.55'],\n- 'jaxlib': ['jaxlib>=0.1.37'],\n+ 'jax': ['jax>=0.1.71'],\n+ 'jaxlib': ['jaxlib>=0.1.49'],\n }\n \n setup(\n", "issue": "Jax version upgrade (AttributeError: CallPrimitive)\nUsing the current version of master 66f9c69 of Haiku, I am getting the following error on Colab\r\n```\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-3-3a9e6adbfff5> in <module>()\r\n----> 1 import haiku as hk\r\n\r\n/usr/local/lib/python3.6/dist-packages/haiku/__init__.py in <module>()\r\n 17 \r\n 18 from haiku import data_structures\r\n---> 19 from haiku import experimental\r\n 20 from haiku import initializers\r\n 21 from haiku import nets\r\n\r\n/usr/local/lib/python3.6/dist-packages/haiku/experimental.py in <module>()\r\n 22 from haiku._src.base import custom_getter\r\n 23 from haiku._src.base import ParamContext\r\n---> 24 from haiku._src.dot import to_dot\r\n 25 from haiku._src.lift import lift\r\n 26 from haiku._src.module import profiler_name_scopes\r\n\r\n/usr/local/lib/python3.6/dist-packages/haiku/_src/dot.py in <module>()\r\n 23 \r\n 24 from haiku._src import data_structures\r\n---> 25 from haiku._src import module\r\n 26 from haiku._src import utils\r\n 27 import jax\r\n\r\n/usr/local/lib/python3.6/dist-packages/haiku/_src/module.py in <module>()\r\n 26 from haiku._src import base\r\n 27 from haiku._src import data_structures\r\n---> 28 from haiku._src import named_call\r\n 29 from haiku._src import utils\r\n 30 import jax.numpy as jnp\r\n\r\n/usr/local/lib/python3.6/dist-packages/haiku/_src/named_call.py in <module>()\r\n 29 \r\n 30 # Registering named call as a primitive\r\n---> 31 named_call_p = core.CallPrimitive('named_call')\r\n 32 # named_call is implemented as a plain core.call and only diverges\r\n 33 # under compilation (see named_call_translation_rule)\r\n\r\nAttributeError: module 'jax.core' has no attribute 'CallPrimitive'\r\n```\r\n\r\nI believe that's because Haiku now requires `jax>=0.1.71`, while the version by default on Colab is `jax==0.1.69`. `CallPrimitive` was introduced in jax 0.1.71.\r\nhttps://github.com/google/jax/blob/1545a29e6d69a7b3c7fdf9a49b38004759a9fbfa/jax/core.py#L1106-L1115\r\n\r\nTo reproduce (inside a Colab):\r\n```python\r\nimport jax\r\nprint(jax.__version__) # 0.1.69\r\n\r\n!pip install -q git+https://github.com/deepmind/dm-haiku\r\nimport haiku as hk\r\n```\r\n\r\nRun `!pip install -q --upgrade jax jaxlib` first in your Colab to fix this issue.\n", "before_files": [{"content": "# Lint as: python3\n# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Setup for pip package.\"\"\"\n\nfrom setuptools import find_namespace_packages\nfrom setuptools import setup\n\n\ndef _get_version():\n with open('haiku/__init__.py') as fp:\n for line in fp:\n if line.startswith('__version__'):\n g = {}\n exec(line, g) # pylint: disable=exec-used\n return g['__version__']\n raise ValueError('`__version__` not defined in `haiku/__init__.py`')\n\n\ndef _parse_requirements(requirements_txt_path):\n with open(requirements_txt_path) as fp:\n return fp.read().splitlines()\n\n\n_VERSION = _get_version()\n\nEXTRA_PACKAGES = {\n 'jax': ['jax>=0.1.55'],\n 'jaxlib': ['jaxlib>=0.1.37'],\n}\n\nsetup(\n name='dm-haiku',\n version=_VERSION,\n url='https://github.com/deepmind/dm-haiku',\n license='Apache 2.0',\n author='DeepMind',\n description='Haiku is a library for building neural networks in JAX.',\n long_description=open('README.md').read(),\n long_description_content_type='text/markdown',\n author_email='[email protected]',\n # Contained modules and scripts.\n packages=find_namespace_packages(exclude=['*_test.py']),\n install_requires=_parse_requirements('requirements.txt'),\n extras_require=EXTRA_PACKAGES,\n tests_require=_parse_requirements('requirements-test.txt'),\n requires_python='>=3.6',\n include_package_data=True,\n zip_safe=False,\n # PyPI package information.\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]}
| 2,000 | 113 |
gh_patches_debug_3065
|
rasdani/github-patches
|
git_diff
|
coala__coala-3348
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wrong doc string syntax in coalib.bearlib.aspects.Root
The doc string of the `Root` aspectclass has a formatting issue at https://github.com/coala/coala/blob/master/coalib/bearlib/aspects/__init__.py#L61
You can see the wrongly rendered result at https://api.coala.io/en/latest/coalib.bearlib.aspects.html#module-coalib.bearlib.aspects
</issue>
<code>
[start of coalib/bearlib/aspects/__init__.py]
1 from .base import aspectbase
2 from .meta import aspectclass
3 from .taste import Taste, TasteError
4
5 __all__ = ['Root', 'Taste', 'TasteError', 'aspectclass']
6
7
8 class Root(aspectbase, metaclass=aspectclass):
9 """
10 The root aspectclass.
11
12 Define sub-aspectclasses with class-bound ``.subaspect`` decorator.
13 Definition string is taken from doc-string of decorated class.
14 Remaining docs are taken from a nested ``docs`` class.
15 Tastes are defined as class attributes that are instances of
16 :class:`coalib.bearlib.aspectclasses.Taste`.
17
18 >>> @Root.subaspect
19 ... class Formatting:
20 ... \"""
21 ... A parent aspect for code formatting aspects...
22 ... \"""
23
24 We can now create subaspects like this:
25
26 >>> @Formatting.subaspect
27 ... class LineLength:
28 ... \"""
29 ... This aspect controls the length of a line...
30 ... \"""
31 ... class docs:
32 ... example = "..."
33 ... example_language = "..."
34 ... importance_reason = "..."
35 ... fix_suggestions = "..."
36 ...
37 ... max_line_length = Taste[int](
38 ... "Maximum length allowed for a line.",
39 ... (80, 90, 120), default=80)
40
41 The representation will show the full "path" to the leaf of the tree:
42
43 >>> Root.Formatting.LineLength
44 <aspectclass 'Root.Formatting.LineLength'>
45
46 We can see, which settings are availables:
47
48 >>> Formatting.tastes
49 {}
50 >>> LineLength.tastes
51 {'max_line_length': <....Taste[int] object at ...>}
52
53 And instantiate the aspect with the values, they will be automatically
54 converted:
55
56 >>> Formatting('Python')
57 <coalib.bearlib.aspects.Root.Formatting object at 0x...>
58 >>> LineLength('Python', max_line_length="100").tastes
59 {'max_line_length': 100}
60
61 If no settings are given, the defaults will be taken>
62 >>> LineLength('Python').tastes
63 {'max_line_length': 80}
64
65 Tastes can also be made available for only specific languages:
66
67 >>> from coalib.bearlib.languages import Language
68 >>> @Language
69 ... class GreaterTrumpScript:
70 ... pass
71
72 >>> @Formatting.subaspect
73 ... class Greatness:
74 ... \"""
75 ... This aspect controls the greatness of a file...
76 ... \"""
77 ...
78 ... min_greatness = Taste[int](
79 ... "Minimum greatness factor needed for a TrumpScript file. "
80 ... "This is fact.",
81 ... (1000000, 1000000000, 1000000000000), default=1000000,
82 ... languages=('GreaterTrumpScript' ,))
83
84 >>> Greatness.tastes
85 {'min_greatness': <....Taste[int] object at ...>}
86 >>> Greatness('GreaterTrumpScript').tastes
87 {'min_greatness': 1000000}
88 >>> Greatness('GreaterTrumpScript', min_greatness=1000000000000).tastes
89 {'min_greatness': 1000000000000}
90
91 >>> Greatness('Python').tastes
92 {}
93
94 >>> Greatness('Python', min_greatness=1000000000)
95 ... # doctest: +NORMALIZE_WHITESPACE
96 Traceback (most recent call last):
97 ...
98 coalib.bearlib.aspects.taste.TasteError:
99 Root.Formatting.Greatness.min_greatness is not available ...
100
101 >>> Greatness('Python').min_greatness
102 ... # doctest: +NORMALIZE_WHITESPACE
103 Traceback (most recent call last):
104 ...
105 coalib.bearlib.aspects.taste.TasteError:
106 Root.Formatting.Greatness.min_greatness is not available ...
107 """
108 parent = None
109
110 _tastes = {}
111
[end of coalib/bearlib/aspects/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/coalib/bearlib/aspects/__init__.py b/coalib/bearlib/aspects/__init__.py
--- a/coalib/bearlib/aspects/__init__.py
+++ b/coalib/bearlib/aspects/__init__.py
@@ -58,7 +58,8 @@
>>> LineLength('Python', max_line_length="100").tastes
{'max_line_length': 100}
- If no settings are given, the defaults will be taken>
+ If no settings are given, the defaults will be taken:
+
>>> LineLength('Python').tastes
{'max_line_length': 80}
|
{"golden_diff": "diff --git a/coalib/bearlib/aspects/__init__.py b/coalib/bearlib/aspects/__init__.py\n--- a/coalib/bearlib/aspects/__init__.py\n+++ b/coalib/bearlib/aspects/__init__.py\n@@ -58,7 +58,8 @@\n >>> LineLength('Python', max_line_length=\"100\").tastes\n {'max_line_length': 100}\n \n- If no settings are given, the defaults will be taken>\n+ If no settings are given, the defaults will be taken:\n+\n >>> LineLength('Python').tastes\n {'max_line_length': 80}\n", "issue": "Wrong doc string syntax in coalib.bearlib.aspects.Root\nThe doc string of the `Root` aspectclass has a formatting issue at https://github.com/coala/coala/blob/master/coalib/bearlib/aspects/__init__.py#L61\r\n\r\nYou can see the wrongly rendered result at https://api.coala.io/en/latest/coalib.bearlib.aspects.html#module-coalib.bearlib.aspects\n", "before_files": [{"content": "from .base import aspectbase\nfrom .meta import aspectclass\nfrom .taste import Taste, TasteError\n\n__all__ = ['Root', 'Taste', 'TasteError', 'aspectclass']\n\n\nclass Root(aspectbase, metaclass=aspectclass):\n \"\"\"\n The root aspectclass.\n\n Define sub-aspectclasses with class-bound ``.subaspect`` decorator.\n Definition string is taken from doc-string of decorated class.\n Remaining docs are taken from a nested ``docs`` class.\n Tastes are defined as class attributes that are instances of\n :class:`coalib.bearlib.aspectclasses.Taste`.\n\n >>> @Root.subaspect\n ... class Formatting:\n ... \\\"\"\"\n ... A parent aspect for code formatting aspects...\n ... \\\"\"\"\n\n We can now create subaspects like this:\n\n >>> @Formatting.subaspect\n ... class LineLength:\n ... \\\"\"\"\n ... This aspect controls the length of a line...\n ... \\\"\"\"\n ... class docs:\n ... example = \"...\"\n ... example_language = \"...\"\n ... importance_reason = \"...\"\n ... fix_suggestions = \"...\"\n ...\n ... max_line_length = Taste[int](\n ... \"Maximum length allowed for a line.\",\n ... (80, 90, 120), default=80)\n\n The representation will show the full \"path\" to the leaf of the tree:\n\n >>> Root.Formatting.LineLength\n <aspectclass 'Root.Formatting.LineLength'>\n\n We can see, which settings are availables:\n\n >>> Formatting.tastes\n {}\n >>> LineLength.tastes\n {'max_line_length': <....Taste[int] object at ...>}\n\n And instantiate the aspect with the values, they will be automatically\n converted:\n\n >>> Formatting('Python')\n <coalib.bearlib.aspects.Root.Formatting object at 0x...>\n >>> LineLength('Python', max_line_length=\"100\").tastes\n {'max_line_length': 100}\n\n If no settings are given, the defaults will be taken>\n >>> LineLength('Python').tastes\n {'max_line_length': 80}\n\n Tastes can also be made available for only specific languages:\n\n >>> from coalib.bearlib.languages import Language\n >>> @Language\n ... class GreaterTrumpScript:\n ... pass\n\n >>> @Formatting.subaspect\n ... class Greatness:\n ... \\\"\"\"\n ... This aspect controls the greatness of a file...\n ... \\\"\"\"\n ...\n ... min_greatness = Taste[int](\n ... \"Minimum greatness factor needed for a TrumpScript file. \"\n ... \"This is fact.\",\n ... (1000000, 1000000000, 1000000000000), default=1000000,\n ... languages=('GreaterTrumpScript' ,))\n\n >>> Greatness.tastes\n {'min_greatness': <....Taste[int] object at ...>}\n >>> Greatness('GreaterTrumpScript').tastes\n {'min_greatness': 1000000}\n >>> Greatness('GreaterTrumpScript', min_greatness=1000000000000).tastes\n {'min_greatness': 1000000000000}\n\n >>> Greatness('Python').tastes\n {}\n\n >>> Greatness('Python', min_greatness=1000000000)\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n\n >>> Greatness('Python').min_greatness\n ... # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n coalib.bearlib.aspects.taste.TasteError:\n Root.Formatting.Greatness.min_greatness is not available ...\n \"\"\"\n parent = None\n\n _tastes = {}\n", "path": "coalib/bearlib/aspects/__init__.py"}]}
| 1,800 | 151 |
gh_patches_debug_2693
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-7665
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Python] jsonschema included twice in setup.py requires list.
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### What is the problem?
`jsonschema` is included twice in the Python package [setup.py `requires` list](https://github.com/ray-project/ray/blob/master/python/setup.py#L176-L183). This is causing the usage of the Ray Python library within Bazel to fail during the analysis phase due to label duplication in the generated `py_library` target's `'deps'`:
```
ERROR: .../external/requirements_py3_pypi__ray_0_9_0_dev0/BUILD:6:1: Label '@requirements_py3_pypi__jsonschema_3_2_0//:pkg' is duplicated in the 'deps' attribute of rule 'pkg'
```
This bug was introduced in the [cluster json schema validator PR](https://github.com/ray-project/ray/pull/7261/files#diff-8cf6167d58ce775a08acafcfe6f40966).
*Ray version and other system information (Python version, TensorFlow version, OS):*
Ray master commit 90b553ed058a546e036374cd0919e00604892514 (most recent commit as of this issue filing)
### Reproduction (REQUIRED)
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [x] I have verified the issue also occurs with the [latest wheels](https://ray.readthedocs.io/en/latest/installation.html).
</issue>
<code>
[start of python/setup.py]
1 from itertools import chain
2 import os
3 import re
4 import shutil
5 import subprocess
6 import sys
7
8 from setuptools import setup, find_packages, Distribution
9 import setuptools.command.build_ext as _build_ext
10
11 # Ideally, we could include these files by putting them in a
12 # MANIFEST.in or using the package_data argument to setup, but the
13 # MANIFEST.in gets applied at the very beginning when setup.py runs
14 # before these files have been created, so we have to move the files
15 # manually.
16
17 # NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
18 ray_files = [
19 "ray/core/src/ray/thirdparty/redis/src/redis-server",
20 "ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
21 "ray/core/src/plasma/plasma_store_server",
22 "ray/_raylet.so",
23 "ray/core/src/ray/raylet/raylet_monitor",
24 "ray/core/src/ray/gcs/gcs_server",
25 "ray/core/src/ray/raylet/raylet",
26 "ray/dashboard/dashboard.py",
27 "ray/streaming/_streaming.so",
28 ]
29
30 build_java = os.getenv("RAY_INSTALL_JAVA") == "1"
31 if build_java:
32 ray_files.append("ray/jars/ray_dist.jar")
33
34 # These are the directories where automatically generated Python protobuf
35 # bindings are created.
36 generated_python_directories = [
37 "ray/core/generated",
38 "ray/streaming/generated",
39 ]
40
41 optional_ray_files = []
42
43 ray_autoscaler_files = [
44 "ray/autoscaler/aws/example-full.yaml",
45 "ray/autoscaler/azure/example-full.yaml",
46 "ray/autoscaler/gcp/example-full.yaml",
47 "ray/autoscaler/local/example-full.yaml",
48 "ray/autoscaler/kubernetes/example-full.yaml",
49 "ray/autoscaler/kubernetes/kubectl-rsync.sh",
50 "ray/autoscaler/ray-schema.json"
51 ]
52
53 ray_project_files = [
54 "ray/projects/schema.json", "ray/projects/templates/cluster_template.yaml",
55 "ray/projects/templates/project_template.yaml",
56 "ray/projects/templates/requirements.txt"
57 ]
58
59 ray_dashboard_files = [
60 os.path.join(dirpath, filename)
61 for dirpath, dirnames, filenames in os.walk("ray/dashboard/client/build")
62 for filename in filenames
63 ]
64
65 optional_ray_files += ray_autoscaler_files
66 optional_ray_files += ray_project_files
67 optional_ray_files += ray_dashboard_files
68
69 if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
70 ray_files += [
71 "ray/core/src/credis/build/src/libmember.so",
72 "ray/core/src/credis/build/src/libmaster.so",
73 "ray/core/src/credis/redis/src/redis-server"
74 ]
75
76 extras = {
77 "debug": [],
78 "dashboard": [],
79 "serve": ["uvicorn", "pygments", "werkzeug", "flask", "pandas", "blist"],
80 "tune": ["tabulate", "tensorboardX"]
81 }
82
83 extras["rllib"] = extras["tune"] + [
84 "atari_py",
85 "dm_tree",
86 "gym[atari]",
87 "lz4",
88 "opencv-python-headless",
89 "pyyaml",
90 "scipy",
91 ]
92
93 extras["streaming"] = ["msgpack >= 0.6.2"]
94
95 extras["all"] = list(set(chain.from_iterable(extras.values())))
96
97
98 class build_ext(_build_ext.build_ext):
99 def run(self):
100 # Note: We are passing in sys.executable so that we use the same
101 # version of Python to build packages inside the build.sh script. Note
102 # that certain flags will not be passed along such as --user or sudo.
103 # TODO(rkn): Fix this.
104 command = ["../build.sh", "-p", sys.executable]
105 if build_java:
106 # Also build binaries for Java if the above env variable exists.
107 command += ["-l", "python,java"]
108 subprocess.check_call(command)
109
110 # We also need to install pickle5 along with Ray, so make sure that the
111 # relevant non-Python pickle5 files get copied.
112 pickle5_files = self.walk_directory("./ray/pickle5_files/pickle5")
113
114 thirdparty_files = self.walk_directory("./ray/thirdparty_files")
115
116 files_to_include = ray_files + pickle5_files + thirdparty_files
117
118 # Copy over the autogenerated protobuf Python bindings.
119 for directory in generated_python_directories:
120 for filename in os.listdir(directory):
121 if filename[-3:] == ".py":
122 files_to_include.append(os.path.join(directory, filename))
123
124 for filename in files_to_include:
125 self.move_file(filename)
126
127 # Try to copy over the optional files.
128 for filename in optional_ray_files:
129 try:
130 self.move_file(filename)
131 except Exception:
132 print("Failed to copy optional file {}. This is ok."
133 .format(filename))
134
135 def walk_directory(self, directory):
136 file_list = []
137 for (root, dirs, filenames) in os.walk(directory):
138 for name in filenames:
139 file_list.append(os.path.join(root, name))
140 return file_list
141
142 def move_file(self, filename):
143 # TODO(rkn): This feels very brittle. It may not handle all cases. See
144 # https://github.com/apache/arrow/blob/master/python/setup.py for an
145 # example.
146 source = filename
147 destination = os.path.join(self.build_lib, filename)
148 # Create the target directory if it doesn't already exist.
149 parent_directory = os.path.dirname(destination)
150 if not os.path.exists(parent_directory):
151 os.makedirs(parent_directory)
152 if not os.path.exists(destination):
153 print("Copying {} to {}.".format(source, destination))
154 shutil.copy(source, destination, follow_symlinks=True)
155
156
157 class BinaryDistribution(Distribution):
158 def has_ext_modules(self):
159 return True
160
161
162 def find_version(*filepath):
163 # Extract version information from filepath
164 here = os.path.abspath(os.path.dirname(__file__))
165 with open(os.path.join(here, *filepath)) as fp:
166 version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
167 fp.read(), re.M)
168 if version_match:
169 return version_match.group(1)
170 raise RuntimeError("Unable to find version string.")
171
172
173 requires = [
174 "numpy >= 1.16",
175 "filelock",
176 "jsonschema",
177 "funcsigs",
178 "click",
179 "colorama",
180 "packaging",
181 "pytest",
182 "pyyaml",
183 "jsonschema",
184 "redis>=3.3.2",
185 # NOTE: Don't upgrade the version of six! Doing so causes installation
186 # problems. See https://github.com/ray-project/ray/issues/4169.
187 "six >= 1.0.0",
188 "faulthandler;python_version<'3.3'",
189 "protobuf >= 3.8.0",
190 "cloudpickle",
191 "py-spy >= 0.2.0",
192 "aiohttp",
193 "google",
194 "grpcio"
195 ]
196
197 setup(
198 name="ray",
199 version=find_version("ray", "__init__.py"),
200 author="Ray Team",
201 author_email="[email protected]",
202 description=("A system for parallel and distributed Python that unifies "
203 "the ML ecosystem."),
204 long_description=open("../README.rst").read(),
205 url="https://github.com/ray-project/ray",
206 keywords=("ray distributed parallel machine-learning "
207 "reinforcement-learning deep-learning python"),
208 packages=find_packages(),
209 cmdclass={"build_ext": build_ext},
210 # The BinaryDistribution argument triggers build_ext.
211 distclass=BinaryDistribution,
212 install_requires=requires,
213 setup_requires=["cython >= 0.29"],
214 extras_require=extras,
215 entry_points={
216 "console_scripts": [
217 "ray=ray.scripts.scripts:main",
218 "rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
219 ]
220 },
221 include_package_data=True,
222 zip_safe=False,
223 license="Apache 2.0")
224
[end of python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -180,7 +180,6 @@
"packaging",
"pytest",
"pyyaml",
- "jsonschema",
"redis>=3.3.2",
# NOTE: Don't upgrade the version of six! Doing so causes installation
# problems. See https://github.com/ray-project/ray/issues/4169.
|
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -180,7 +180,6 @@\n \"packaging\",\n \"pytest\",\n \"pyyaml\",\n- \"jsonschema\",\n \"redis>=3.3.2\",\n # NOTE: Don't upgrade the version of six! Doing so causes installation\n # problems. See https://github.com/ray-project/ray/issues/4169.\n", "issue": "[Python] jsonschema included twice in setup.py requires list.\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\n`jsonschema` is included twice in the Python package [setup.py `requires` list](https://github.com/ray-project/ray/blob/master/python/setup.py#L176-L183). This is causing the usage of the Ray Python library within Bazel to fail during the analysis phase due to label duplication in the generated `py_library` target's `'deps'`:\r\n\r\n```\r\nERROR: .../external/requirements_py3_pypi__ray_0_9_0_dev0/BUILD:6:1: Label '@requirements_py3_pypi__jsonschema_3_2_0//:pkg' is duplicated in the 'deps' attribute of rule 'pkg'\r\n```\r\n\r\nThis bug was introduced in the [cluster json schema validator PR](https://github.com/ray-project/ray/pull/7261/files#diff-8cf6167d58ce775a08acafcfe6f40966).\r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\n\r\nRay master commit 90b553ed058a546e036374cd0919e00604892514 (most recent commit as of this issue filing)\r\n\r\n\r\n### Reproduction (REQUIRED)\r\n\r\n\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://ray.readthedocs.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "from itertools import chain\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\n\n# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.\nray_files = [\n \"ray/core/src/ray/thirdparty/redis/src/redis-server\",\n \"ray/core/src/ray/gcs/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store_server\",\n \"ray/_raylet.so\",\n \"ray/core/src/ray/raylet/raylet_monitor\",\n \"ray/core/src/ray/gcs/gcs_server\",\n \"ray/core/src/ray/raylet/raylet\",\n \"ray/dashboard/dashboard.py\",\n \"ray/streaming/_streaming.so\",\n]\n\nbuild_java = os.getenv(\"RAY_INSTALL_JAVA\") == \"1\"\nif build_java:\n ray_files.append(\"ray/jars/ray_dist.jar\")\n\n# These are the directories where automatically generated Python protobuf\n# bindings are created.\ngenerated_python_directories = [\n \"ray/core/generated\",\n \"ray/streaming/generated\",\n]\n\noptional_ray_files = []\n\nray_autoscaler_files = [\n \"ray/autoscaler/aws/example-full.yaml\",\n \"ray/autoscaler/azure/example-full.yaml\",\n \"ray/autoscaler/gcp/example-full.yaml\",\n \"ray/autoscaler/local/example-full.yaml\",\n \"ray/autoscaler/kubernetes/example-full.yaml\",\n \"ray/autoscaler/kubernetes/kubectl-rsync.sh\",\n \"ray/autoscaler/ray-schema.json\"\n]\n\nray_project_files = [\n \"ray/projects/schema.json\", \"ray/projects/templates/cluster_template.yaml\",\n \"ray/projects/templates/project_template.yaml\",\n \"ray/projects/templates/requirements.txt\"\n]\n\nray_dashboard_files = [\n os.path.join(dirpath, filename)\n for dirpath, dirnames, filenames in os.walk(\"ray/dashboard/client/build\")\n for filename in filenames\n]\n\noptional_ray_files += ray_autoscaler_files\noptional_ray_files += ray_project_files\noptional_ray_files += ray_dashboard_files\n\nif \"RAY_USE_NEW_GCS\" in os.environ and os.environ[\"RAY_USE_NEW_GCS\"] == \"on\":\n ray_files += [\n \"ray/core/src/credis/build/src/libmember.so\",\n \"ray/core/src/credis/build/src/libmaster.so\",\n \"ray/core/src/credis/redis/src/redis-server\"\n ]\n\nextras = {\n \"debug\": [],\n \"dashboard\": [],\n \"serve\": [\"uvicorn\", \"pygments\", \"werkzeug\", \"flask\", \"pandas\", \"blist\"],\n \"tune\": [\"tabulate\", \"tensorboardX\"]\n}\n\nextras[\"rllib\"] = extras[\"tune\"] + [\n \"atari_py\",\n \"dm_tree\",\n \"gym[atari]\",\n \"lz4\",\n \"opencv-python-headless\",\n \"pyyaml\",\n \"scipy\",\n]\n\nextras[\"streaming\"] = [\"msgpack >= 0.6.2\"]\n\nextras[\"all\"] = list(set(chain.from_iterable(extras.values())))\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build packages inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n command = [\"../build.sh\", \"-p\", sys.executable]\n if build_java:\n # Also build binaries for Java if the above env variable exists.\n command += [\"-l\", \"python,java\"]\n subprocess.check_call(command)\n\n # We also need to install pickle5 along with Ray, so make sure that the\n # relevant non-Python pickle5 files get copied.\n pickle5_files = self.walk_directory(\"./ray/pickle5_files/pickle5\")\n\n thirdparty_files = self.walk_directory(\"./ray/thirdparty_files\")\n\n files_to_include = ray_files + pickle5_files + thirdparty_files\n\n # Copy over the autogenerated protobuf Python bindings.\n for directory in generated_python_directories:\n for filename in os.listdir(directory):\n if filename[-3:] == \".py\":\n files_to_include.append(os.path.join(directory, filename))\n\n for filename in files_to_include:\n self.move_file(filename)\n\n # Try to copy over the optional files.\n for filename in optional_ray_files:\n try:\n self.move_file(filename)\n except Exception:\n print(\"Failed to copy optional file {}. This is ok.\"\n .format(filename))\n\n def walk_directory(self, directory):\n file_list = []\n for (root, dirs, filenames) in os.walk(directory):\n for name in filenames:\n file_list.append(os.path.join(root, name))\n return file_list\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n if not os.path.exists(destination):\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination, follow_symlinks=True)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\ndef find_version(*filepath):\n # Extract version information from filepath\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *filepath)) as fp:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\nrequires = [\n \"numpy >= 1.16\",\n \"filelock\",\n \"jsonschema\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"packaging\",\n \"pytest\",\n \"pyyaml\",\n \"jsonschema\",\n \"redis>=3.3.2\",\n # NOTE: Don't upgrade the version of six! Doing so causes installation\n # problems. See https://github.com/ray-project/ray/issues/4169.\n \"six >= 1.0.0\",\n \"faulthandler;python_version<'3.3'\",\n \"protobuf >= 3.8.0\",\n \"cloudpickle\",\n \"py-spy >= 0.2.0\",\n \"aiohttp\",\n \"google\",\n \"grpcio\"\n]\n\nsetup(\n name=\"ray\",\n version=find_version(\"ray\", \"__init__.py\"),\n author=\"Ray Team\",\n author_email=\"[email protected]\",\n description=(\"A system for parallel and distributed Python that unifies \"\n \"the ML ecosystem.\"),\n long_description=open(\"../README.rst\").read(),\n url=\"https://github.com/ray-project/ray\",\n keywords=(\"ray distributed parallel machine-learning \"\n \"reinforcement-learning deep-learning python\"),\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=requires,\n setup_requires=[\"cython >= 0.29\"],\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"ray=ray.scripts.scripts:main\",\n \"rllib=ray.rllib.scripts:cli [rllib]\", \"tune=ray.tune.scripts:cli\"\n ]\n },\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
| 3,250 | 106 |
gh_patches_debug_17047
|
rasdani/github-patches
|
git_diff
|
open-telemetry__opentelemetry-python-2079
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Skipped Baggage entries in propagation still count against max entries
The decrement operation should be moved after the last continue block if the over-long entry is truly skipped, otherwise this behavior should probably be documented/tested for.
https://github.com/open-telemetry/opentelemetry-python/blob/4250078e43ddb24c88e19270c7af01ae63336fb9/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L57-L65
</issue>
<code>
[start of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import typing
16 from urllib.parse import quote_plus, unquote_plus
17
18 from opentelemetry.baggage import get_all, set_baggage
19 from opentelemetry.context import get_current
20 from opentelemetry.context.context import Context
21 from opentelemetry.propagators import textmap
22
23
24 class W3CBaggagePropagator(textmap.TextMapPropagator):
25 """Extracts and injects Baggage which is used to annotate telemetry."""
26
27 _MAX_HEADER_LENGTH = 8192
28 _MAX_PAIR_LENGTH = 4096
29 _MAX_PAIRS = 180
30 _BAGGAGE_HEADER_NAME = "baggage"
31
32 def extract(
33 self,
34 carrier: textmap.CarrierT,
35 context: typing.Optional[Context] = None,
36 getter: textmap.Getter = textmap.default_getter,
37 ) -> Context:
38 """Extract Baggage from the carrier.
39
40 See
41 `opentelemetry.propagators.textmap.TextMapPropagator.extract`
42 """
43
44 if context is None:
45 context = get_current()
46
47 header = _extract_first_element(
48 getter.get(carrier, self._BAGGAGE_HEADER_NAME)
49 )
50
51 if not header or len(header) > self._MAX_HEADER_LENGTH:
52 return context
53
54 baggage_entries = header.split(",")
55 total_baggage_entries = self._MAX_PAIRS
56 for entry in baggage_entries:
57 if total_baggage_entries <= 0:
58 return context
59 total_baggage_entries -= 1
60 if len(entry) > self._MAX_PAIR_LENGTH:
61 continue
62 try:
63 name, value = entry.split("=", 1)
64 except Exception: # pylint: disable=broad-except
65 continue
66 context = set_baggage(
67 unquote_plus(name).strip(),
68 unquote_plus(value).strip(),
69 context=context,
70 )
71
72 return context
73
74 def inject(
75 self,
76 carrier: textmap.CarrierT,
77 context: typing.Optional[Context] = None,
78 setter: textmap.Setter = textmap.default_setter,
79 ) -> None:
80 """Injects Baggage into the carrier.
81
82 See
83 `opentelemetry.propagators.textmap.TextMapPropagator.inject`
84 """
85 baggage_entries = get_all(context=context)
86 if not baggage_entries:
87 return
88
89 baggage_string = _format_baggage(baggage_entries)
90 setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
91
92 @property
93 def fields(self) -> typing.Set[str]:
94 """Returns a set with the fields set in `inject`."""
95 return {self._BAGGAGE_HEADER_NAME}
96
97
98 def _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:
99 return ",".join(
100 quote_plus(str(key)) + "=" + quote_plus(str(value))
101 for key, value in baggage_entries.items()
102 )
103
104
105 def _extract_first_element(
106 items: typing.Optional[typing.Iterable[textmap.CarrierT]],
107 ) -> typing.Optional[textmap.CarrierT]:
108 if items is None:
109 return None
110 return next(iter(items), None)
111
[end of opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py
@@ -54,9 +54,6 @@
baggage_entries = header.split(",")
total_baggage_entries = self._MAX_PAIRS
for entry in baggage_entries:
- if total_baggage_entries <= 0:
- return context
- total_baggage_entries -= 1
if len(entry) > self._MAX_PAIR_LENGTH:
continue
try:
@@ -68,6 +65,9 @@
unquote_plus(value).strip(),
context=context,
)
+ total_baggage_entries -= 1
+ if total_baggage_entries == 0:
+ break
return context
|
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py\n@@ -54,9 +54,6 @@\n baggage_entries = header.split(\",\")\n total_baggage_entries = self._MAX_PAIRS\n for entry in baggage_entries:\n- if total_baggage_entries <= 0:\n- return context\n- total_baggage_entries -= 1\n if len(entry) > self._MAX_PAIR_LENGTH:\n continue\n try:\n@@ -68,6 +65,9 @@\n unquote_plus(value).strip(),\n context=context,\n )\n+ total_baggage_entries -= 1\n+ if total_baggage_entries == 0:\n+ break\n \n return context\n", "issue": "Skipped Baggage entries in propagation still count against max entries\nThe decrement operation should be moved after the last continue block if the over-long entry is truly skipped, otherwise this behavior should probably be documented/tested for.\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/4250078e43ddb24c88e19270c7af01ae63336fb9/opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py#L57-L65\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport typing\nfrom urllib.parse import quote_plus, unquote_plus\n\nfrom opentelemetry.baggage import get_all, set_baggage\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators import textmap\n\n\nclass W3CBaggagePropagator(textmap.TextMapPropagator):\n \"\"\"Extracts and injects Baggage which is used to annotate telemetry.\"\"\"\n\n _MAX_HEADER_LENGTH = 8192\n _MAX_PAIR_LENGTH = 4096\n _MAX_PAIRS = 180\n _BAGGAGE_HEADER_NAME = \"baggage\"\n\n def extract(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n getter: textmap.Getter = textmap.default_getter,\n ) -> Context:\n \"\"\"Extract Baggage from the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.extract`\n \"\"\"\n\n if context is None:\n context = get_current()\n\n header = _extract_first_element(\n getter.get(carrier, self._BAGGAGE_HEADER_NAME)\n )\n\n if not header or len(header) > self._MAX_HEADER_LENGTH:\n return context\n\n baggage_entries = header.split(\",\")\n total_baggage_entries = self._MAX_PAIRS\n for entry in baggage_entries:\n if total_baggage_entries <= 0:\n return context\n total_baggage_entries -= 1\n if len(entry) > self._MAX_PAIR_LENGTH:\n continue\n try:\n name, value = entry.split(\"=\", 1)\n except Exception: # pylint: disable=broad-except\n continue\n context = set_baggage(\n unquote_plus(name).strip(),\n unquote_plus(value).strip(),\n context=context,\n )\n\n return context\n\n def inject(\n self,\n carrier: textmap.CarrierT,\n context: typing.Optional[Context] = None,\n setter: textmap.Setter = textmap.default_setter,\n ) -> None:\n \"\"\"Injects Baggage into the carrier.\n\n See\n `opentelemetry.propagators.textmap.TextMapPropagator.inject`\n \"\"\"\n baggage_entries = get_all(context=context)\n if not baggage_entries:\n return\n\n baggage_string = _format_baggage(baggage_entries)\n setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)\n\n @property\n def fields(self) -> typing.Set[str]:\n \"\"\"Returns a set with the fields set in `inject`.\"\"\"\n return {self._BAGGAGE_HEADER_NAME}\n\n\ndef _format_baggage(baggage_entries: typing.Mapping[str, object]) -> str:\n return \",\".join(\n quote_plus(str(key)) + \"=\" + quote_plus(str(value))\n for key, value in baggage_entries.items()\n )\n\n\ndef _extract_first_element(\n items: typing.Optional[typing.Iterable[textmap.CarrierT]],\n) -> typing.Optional[textmap.CarrierT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-api/src/opentelemetry/baggage/propagation/__init__.py"}]}
| 1,718 | 221 |
gh_patches_debug_30793
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-28849
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Tver] Can`t download Fuji TV video
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.04.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Finally, put x into all relevant boxes (like this [x])
-->
- [x] I'm reporting a broken site support
- [x] I've verified that I'm running youtube-dl version **2021.04.07**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
```
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-f', 'best', 'https://tver.jp/corner/f0072083', '-o', 'D:\\video\\download\\a.mp4', '-v']
[debug] Encodings: locale cp932, fs mbcs, out cp932, pref cp932
[debug] youtube-dl version 2021.04.07
[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041
[debug] exe versions: ffmpeg 4.2, ffprobe 4.2
[debug] Proxy map: {}
[TVer] Downloading JSON metadata
[TVer] f0072083: Downloading JSON metadata
[FujiTVFODPlus7] 6191645753001: Downloading m3u8 information
ERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.
```
## Description
[TVer](tver.jp) is Japanese video site. Some TV stations are on this site posting a video.
I can no longer download videos from a TV station called Fuji TV. I think the cause is a specification change. it become the same as any other TV station. (https://tver.jp/info/notice/3137.html)
Can you please support a new specification.
Thanks.
</issue>
<code>
[start of youtube_dl/extractor/tver.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..compat import compat_str
8 from ..utils import (
9 int_or_none,
10 remove_start,
11 smuggle_url,
12 strip_or_none,
13 try_get,
14 )
15
16
17 class TVerIE(InfoExtractor):
18 _VALID_URL = r'https?://(?:www\.)?tver\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\d+))'
19 # videos are only available for 7 days
20 _TESTS = [{
21 'url': 'https://tver.jp/corner/f0062178',
22 'only_matching': True,
23 }, {
24 'url': 'https://tver.jp/feature/f0062413',
25 'only_matching': True,
26 }, {
27 'url': 'https://tver.jp/episode/79622438',
28 'only_matching': True,
29 }, {
30 # subtitle = ' '
31 'url': 'https://tver.jp/corner/f0068870',
32 'only_matching': True,
33 }]
34 _TOKEN = None
35 BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
36
37 def _real_initialize(self):
38 self._TOKEN = self._download_json(
39 'https://tver.jp/api/access_token.php', None)['token']
40
41 def _real_extract(self, url):
42 path, video_id = re.match(self._VALID_URL, url).groups()
43 main = self._download_json(
44 'https://api.tver.jp/v4/' + path, video_id,
45 query={'token': self._TOKEN})['main']
46 p_id = main['publisher_id']
47 service = remove_start(main['service'], 'ts_')
48 info = {
49 '_type': 'url_transparent',
50 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
51 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
52 }
53
54 if service == 'cx':
55 title = main['title']
56 subtitle = strip_or_none(main.get('subtitle'))
57 if subtitle:
58 title += ' - ' + subtitle
59 info.update({
60 'title': title,
61 'url': 'https://i.fod.fujitv.co.jp/plus7/web/%s/%s.html' % (p_id[:4], p_id),
62 'ie_key': 'FujiTVFODPlus7',
63 })
64 else:
65 r_id = main['reference_id']
66 if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
67 r_id = 'ref:' + r_id
68 bc_url = smuggle_url(
69 self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),
70 {'geo_countries': ['JP']})
71 info.update({
72 'url': bc_url,
73 'ie_key': 'BrightcoveNew',
74 })
75
76 return info
77
[end of youtube_dl/extractor/tver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/youtube_dl/extractor/tver.py b/youtube_dl/extractor/tver.py
--- a/youtube_dl/extractor/tver.py
+++ b/youtube_dl/extractor/tver.py
@@ -9,7 +9,6 @@
int_or_none,
remove_start,
smuggle_url,
- strip_or_none,
try_get,
)
@@ -45,32 +44,18 @@
query={'token': self._TOKEN})['main']
p_id = main['publisher_id']
service = remove_start(main['service'], 'ts_')
- info = {
+
+ r_id = main['reference_id']
+ if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
+ r_id = 'ref:' + r_id
+ bc_url = smuggle_url(
+ self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),
+ {'geo_countries': ['JP']})
+
+ return {
'_type': 'url_transparent',
'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),
'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),
+ 'url': bc_url,
+ 'ie_key': 'BrightcoveNew',
}
-
- if service == 'cx':
- title = main['title']
- subtitle = strip_or_none(main.get('subtitle'))
- if subtitle:
- title += ' - ' + subtitle
- info.update({
- 'title': title,
- 'url': 'https://i.fod.fujitv.co.jp/plus7/web/%s/%s.html' % (p_id[:4], p_id),
- 'ie_key': 'FujiTVFODPlus7',
- })
- else:
- r_id = main['reference_id']
- if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):
- r_id = 'ref:' + r_id
- bc_url = smuggle_url(
- self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),
- {'geo_countries': ['JP']})
- info.update({
- 'url': bc_url,
- 'ie_key': 'BrightcoveNew',
- })
-
- return info
|
{"golden_diff": "diff --git a/youtube_dl/extractor/tver.py b/youtube_dl/extractor/tver.py\n--- a/youtube_dl/extractor/tver.py\n+++ b/youtube_dl/extractor/tver.py\n@@ -9,7 +9,6 @@\n int_or_none,\n remove_start,\n smuggle_url,\n- strip_or_none,\n try_get,\n )\n \n@@ -45,32 +44,18 @@\n query={'token': self._TOKEN})['main']\n p_id = main['publisher_id']\n service = remove_start(main['service'], 'ts_')\n- info = {\n+\n+ r_id = main['reference_id']\n+ if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n+ r_id = 'ref:' + r_id\n+ bc_url = smuggle_url(\n+ self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),\n+ {'geo_countries': ['JP']})\n+\n+ return {\n '_type': 'url_transparent',\n 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),\n 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),\n+ 'url': bc_url,\n+ 'ie_key': 'BrightcoveNew',\n }\n-\n- if service == 'cx':\n- title = main['title']\n- subtitle = strip_or_none(main.get('subtitle'))\n- if subtitle:\n- title += ' - ' + subtitle\n- info.update({\n- 'title': title,\n- 'url': 'https://i.fod.fujitv.co.jp/plus7/web/%s/%s.html' % (p_id[:4], p_id),\n- 'ie_key': 'FujiTVFODPlus7',\n- })\n- else:\n- r_id = main['reference_id']\n- if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n- r_id = 'ref:' + r_id\n- bc_url = smuggle_url(\n- self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),\n- {'geo_countries': ['JP']})\n- info.update({\n- 'url': bc_url,\n- 'ie_key': 'BrightcoveNew',\n- })\n-\n- return info\n", "issue": "[Tver] Can`t download Fuji TV video \n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:\r\n- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.04.07. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.\r\n- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.\r\n- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.\r\n- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.\r\n- Finally, put x into all relevant boxes (like this [x])\r\n-->\r\n\r\n- [x] I'm reporting a broken site support\r\n- [x] I've verified that I'm running youtube-dl version **2021.04.07**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar issues including closed ones\r\n\r\n\r\n## Verbose log\r\n\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-f', 'best', 'https://tver.jp/corner/f0072083', '-o', 'D:\\\\video\\\\download\\\\a.mp4', '-v']\r\n[debug] Encodings: locale cp932, fs mbcs, out cp932, pref cp932\r\n[debug] youtube-dl version 2021.04.07\r\n[debug] Python version 3.4.4 (CPython) - Windows-10-10.0.19041\r\n[debug] exe versions: ffmpeg 4.2, ffprobe 4.2\r\n[debug] Proxy map: {}\r\n[TVer] Downloading JSON metadata\r\n[TVer] f0072083: Downloading JSON metadata\r\n[FujiTVFODPlus7] 6191645753001: Downloading m3u8 information\r\nERROR: No video formats found; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\r\n```\r\n\r\n## Description\r\n\r\n[TVer](tver.jp) is Japanese video site. Some TV stations are on this site posting a video.\r\n\r\nI can no longer download videos from a TV station called Fuji TV. I think the cause is a specification change. it become the same as any other TV station. (https://tver.jp/info/notice/3137.html) \r\nCan you please support a new specification.\r\nThanks. \n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n int_or_none,\n remove_start,\n smuggle_url,\n strip_or_none,\n try_get,\n)\n\n\nclass TVerIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?tver\\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\\d+))'\n # videos are only available for 7 days\n _TESTS = [{\n 'url': 'https://tver.jp/corner/f0062178',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/feature/f0062413',\n 'only_matching': True,\n }, {\n 'url': 'https://tver.jp/episode/79622438',\n 'only_matching': True,\n }, {\n # subtitle = ' '\n 'url': 'https://tver.jp/corner/f0068870',\n 'only_matching': True,\n }]\n _TOKEN = None\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'\n\n def _real_initialize(self):\n self._TOKEN = self._download_json(\n 'https://tver.jp/api/access_token.php', None)['token']\n\n def _real_extract(self, url):\n path, video_id = re.match(self._VALID_URL, url).groups()\n main = self._download_json(\n 'https://api.tver.jp/v4/' + path, video_id,\n query={'token': self._TOKEN})['main']\n p_id = main['publisher_id']\n service = remove_start(main['service'], 'ts_')\n info = {\n '_type': 'url_transparent',\n 'description': try_get(main, lambda x: x['note'][0]['text'], compat_str),\n 'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])),\n }\n\n if service == 'cx':\n title = main['title']\n subtitle = strip_or_none(main.get('subtitle'))\n if subtitle:\n title += ' - ' + subtitle\n info.update({\n 'title': title,\n 'url': 'https://i.fod.fujitv.co.jp/plus7/web/%s/%s.html' % (p_id[:4], p_id),\n 'ie_key': 'FujiTVFODPlus7',\n })\n else:\n r_id = main['reference_id']\n if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'):\n r_id = 'ref:' + r_id\n bc_url = smuggle_url(\n self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id),\n {'geo_countries': ['JP']})\n info.update({\n 'url': bc_url,\n 'ie_key': 'BrightcoveNew',\n })\n\n return info\n", "path": "youtube_dl/extractor/tver.py"}]}
| 2,090 | 545 |
gh_patches_debug_3979
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-1246
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Need binding to void GENERAL_NAMES_free(GENERAL_NAMES *)
the function call to d2i methods on the altSubjectName extension returned a dynamicly allocated memory object that must be garbage collected so binding for GENERAL_NAMES_free should be exposed from hazmat so that higher level code can avoid memory leaks. Not sure which module should expose the binding but I used x509v3.py module in the Proposed solution https://github.com/crc32a/cryptography/commit/24df02646de1e5c1773c9048076b5d67d4c5c0fa
this effects issue https://github.com/pyca/pyopenssl/issues/139 of pyopenssl and an example of its usage to avoid memory leaks is
https://github.com/rackerlabs/pyopenssl/commit/a479a74820619da13dfab8925cf49c4f766b6536
</issue>
<code>
[start of cryptography/hazmat/bindings/openssl/x509v3.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 INCLUDES = """
17 #include <openssl/x509v3.h>
18 """
19
20 TYPES = """
21 typedef struct {
22 X509 *issuer_cert;
23 X509 *subject_cert;
24 ...;
25 } X509V3_CTX;
26
27 typedef void * (*X509V3_EXT_D2I)(void *, const unsigned char **, long);
28
29 typedef struct {
30 ASN1_ITEM_EXP *it;
31 X509V3_EXT_D2I d2i;
32 ...;
33 } X509V3_EXT_METHOD;
34
35 static const int GEN_OTHERNAME;
36 static const int GEN_EMAIL;
37 static const int GEN_X400;
38 static const int GEN_DNS;
39 static const int GEN_URI;
40 static const int GEN_DIRNAME;
41 static const int GEN_EDIPARTY;
42 static const int GEN_IPADD;
43 static const int GEN_RID;
44
45 typedef struct {
46 ...;
47 } OTHERNAME;
48
49 typedef struct {
50 ...;
51 } EDIPARTYNAME;
52
53 typedef struct {
54 int type;
55 union {
56 char *ptr;
57 OTHERNAME *otherName; /* otherName */
58 ASN1_IA5STRING *rfc822Name;
59 ASN1_IA5STRING *dNSName;
60 ASN1_TYPE *x400Address;
61 X509_NAME *directoryName;
62 EDIPARTYNAME *ediPartyName;
63 ASN1_IA5STRING *uniformResourceIdentifier;
64 ASN1_OCTET_STRING *iPAddress;
65 ASN1_OBJECT *registeredID;
66
67 /* Old names */
68 ASN1_OCTET_STRING *ip; /* iPAddress */
69 X509_NAME *dirn; /* dirn */
70 ASN1_IA5STRING *ia5; /* rfc822Name, dNSName, */
71 /* uniformResourceIdentifier */
72 ASN1_OBJECT *rid; /* registeredID */
73 ASN1_TYPE *other; /* x400Address */
74 } d;
75 ...;
76 } GENERAL_NAME;
77
78 typedef struct stack_st_GENERAL_NAME GENERAL_NAMES;
79 """
80
81 FUNCTIONS = """
82 void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);
83 X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);
84 int GENERAL_NAME_print(BIO *, GENERAL_NAME *);
85 """
86
87 MACROS = """
88 void *X509V3_set_ctx_nodb(X509V3_CTX *);
89 int sk_GENERAL_NAME_num(struct stack_st_GENERAL_NAME *);
90 int sk_GENERAL_NAME_push(struct stack_st_GENERAL_NAME *, GENERAL_NAME *);
91 GENERAL_NAME *sk_GENERAL_NAME_value(struct stack_st_GENERAL_NAME *, int);
92
93 /* These aren't macros these functions are all const X on openssl > 1.0.x */
94 const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *);
95 const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int);
96 """
97
98 CUSTOMIZATIONS = """
99 """
100
101 CONDITIONAL_NAMES = {}
102
[end of cryptography/hazmat/bindings/openssl/x509v3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cryptography/hazmat/bindings/openssl/x509v3.py b/cryptography/hazmat/bindings/openssl/x509v3.py
--- a/cryptography/hazmat/bindings/openssl/x509v3.py
+++ b/cryptography/hazmat/bindings/openssl/x509v3.py
@@ -82,6 +82,7 @@
void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);
X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);
int GENERAL_NAME_print(BIO *, GENERAL_NAME *);
+void GENERAL_NAMES_free(GENERAL_NAMES *);
"""
MACROS = """
|
{"golden_diff": "diff --git a/cryptography/hazmat/bindings/openssl/x509v3.py b/cryptography/hazmat/bindings/openssl/x509v3.py\n--- a/cryptography/hazmat/bindings/openssl/x509v3.py\n+++ b/cryptography/hazmat/bindings/openssl/x509v3.py\n@@ -82,6 +82,7 @@\n void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);\n X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);\n int GENERAL_NAME_print(BIO *, GENERAL_NAME *);\n+void GENERAL_NAMES_free(GENERAL_NAMES *);\n \"\"\"\n \n MACROS = \"\"\"\n", "issue": "Need binding to void GENERAL_NAMES_free(GENERAL_NAMES *)\nthe function call to d2i methods on the altSubjectName extension returned a dynamicly allocated memory object that must be garbage collected so binding for GENERAL_NAMES_free should be exposed from hazmat so that higher level code can avoid memory leaks. Not sure which module should expose the binding but I used x509v3.py module in the Proposed solution https://github.com/crc32a/cryptography/commit/24df02646de1e5c1773c9048076b5d67d4c5c0fa\n\nthis effects issue https://github.com/pyca/pyopenssl/issues/139 of pyopenssl and an example of its usage to avoid memory leaks is\nhttps://github.com/rackerlabs/pyopenssl/commit/a479a74820619da13dfab8925cf49c4f766b6536\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/x509v3.h>\n\"\"\"\n\nTYPES = \"\"\"\ntypedef struct {\n X509 *issuer_cert;\n X509 *subject_cert;\n ...;\n} X509V3_CTX;\n\ntypedef void * (*X509V3_EXT_D2I)(void *, const unsigned char **, long);\n\ntypedef struct {\n ASN1_ITEM_EXP *it;\n X509V3_EXT_D2I d2i;\n ...;\n} X509V3_EXT_METHOD;\n\nstatic const int GEN_OTHERNAME;\nstatic const int GEN_EMAIL;\nstatic const int GEN_X400;\nstatic const int GEN_DNS;\nstatic const int GEN_URI;\nstatic const int GEN_DIRNAME;\nstatic const int GEN_EDIPARTY;\nstatic const int GEN_IPADD;\nstatic const int GEN_RID;\n\ntypedef struct {\n ...;\n} OTHERNAME;\n\ntypedef struct {\n ...;\n} EDIPARTYNAME;\n\ntypedef struct {\n int type;\n union {\n char *ptr;\n OTHERNAME *otherName; /* otherName */\n ASN1_IA5STRING *rfc822Name;\n ASN1_IA5STRING *dNSName;\n ASN1_TYPE *x400Address;\n X509_NAME *directoryName;\n EDIPARTYNAME *ediPartyName;\n ASN1_IA5STRING *uniformResourceIdentifier;\n ASN1_OCTET_STRING *iPAddress;\n ASN1_OBJECT *registeredID;\n\n /* Old names */\n ASN1_OCTET_STRING *ip; /* iPAddress */\n X509_NAME *dirn; /* dirn */\n ASN1_IA5STRING *ia5; /* rfc822Name, dNSName, */\n /* uniformResourceIdentifier */\n ASN1_OBJECT *rid; /* registeredID */\n ASN1_TYPE *other; /* x400Address */\n } d;\n ...;\n} GENERAL_NAME;\n\ntypedef struct stack_st_GENERAL_NAME GENERAL_NAMES;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nvoid X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);\nX509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);\nint GENERAL_NAME_print(BIO *, GENERAL_NAME *);\n\"\"\"\n\nMACROS = \"\"\"\nvoid *X509V3_set_ctx_nodb(X509V3_CTX *);\nint sk_GENERAL_NAME_num(struct stack_st_GENERAL_NAME *);\nint sk_GENERAL_NAME_push(struct stack_st_GENERAL_NAME *, GENERAL_NAME *);\nGENERAL_NAME *sk_GENERAL_NAME_value(struct stack_st_GENERAL_NAME *, int);\n\n/* These aren't macros these functions are all const X on openssl > 1.0.x */\nconst X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *);\nconst X509V3_EXT_METHOD *X509V3_EXT_get_nid(int);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n\nCONDITIONAL_NAMES = {}\n", "path": "cryptography/hazmat/bindings/openssl/x509v3.py"}]}
| 1,778 | 186 |
gh_patches_debug_5324
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-968
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEAT][CV] Add conditions to checks missing conditions
Some checks are missing conditions:
- [x] Heatmap
- [x] Image Drift
- [x] Train Test Drift
- [x] Robustness
</issue>
<code>
[start of deepchecks/vision/suites/default_suites.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Functions for loading the default (built-in) vision suites for various validation stages.
12
13 Each function returns a new suite that is initialized with a list of checks and default conditions.
14 It is possible to customize these suites by editing the checks and conditions inside it after the suites' creation.
15 """
16 from deepchecks.vision.checks import ClassPerformance, TrainTestLabelDrift, MeanAveragePrecisionReport, \
17 MeanAverageRecallReport, ImagePropertyDrift, ImageDatasetDrift, SimpleModelComparison, ConfusionMatrixReport, \
18 RobustnessReport, TrainTestPredictionDrift
19 from deepchecks.vision import Suite
20
21
22 __all__ = ['train_test_validation', 'model_evaluation', 'full_suite']
23
24 from deepchecks.vision.checks.distribution import HeatmapComparison
25
26
27 def train_test_validation() -> Suite:
28 """Create a suite that is meant to validate correctness of train-test split, including integrity, \
29 distribution and leakage checks."""
30 return Suite(
31 'Train Test Validation Suite',
32 HeatmapComparison(),
33 TrainTestLabelDrift(),
34 TrainTestPredictionDrift(),
35 ImagePropertyDrift().add_condition_drift_score_not_greater_than(),
36 ImageDatasetDrift()
37 )
38
39
40 def model_evaluation() -> Suite:
41 """Create a suite that is meant to test model performance and overfit."""
42 return Suite(
43 'Model Evaluation Suite',
44 ClassPerformance(),
45 MeanAveragePrecisionReport(),
46 MeanAverageRecallReport(),
47 SimpleModelComparison(),
48 ConfusionMatrixReport(),
49 RobustnessReport().add_condition_degradation_not_greater_than()
50 )
51
52
53 def full_suite() -> Suite:
54 """Create a suite that includes many of the implemented checks, for a quick overview of your model and data."""
55 return Suite(
56 'Full Suite',
57 model_evaluation(),
58 train_test_validation(),
59 )
60
[end of deepchecks/vision/suites/default_suites.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/vision/suites/default_suites.py b/deepchecks/vision/suites/default_suites.py
--- a/deepchecks/vision/suites/default_suites.py
+++ b/deepchecks/vision/suites/default_suites.py
@@ -31,7 +31,7 @@
'Train Test Validation Suite',
HeatmapComparison(),
TrainTestLabelDrift(),
- TrainTestPredictionDrift(),
+ TrainTestPredictionDrift().add_condition_drift_score_not_greater_than(),
ImagePropertyDrift().add_condition_drift_score_not_greater_than(),
ImageDatasetDrift()
)
|
{"golden_diff": "diff --git a/deepchecks/vision/suites/default_suites.py b/deepchecks/vision/suites/default_suites.py\n--- a/deepchecks/vision/suites/default_suites.py\n+++ b/deepchecks/vision/suites/default_suites.py\n@@ -31,7 +31,7 @@\n 'Train Test Validation Suite',\n HeatmapComparison(),\n TrainTestLabelDrift(),\n- TrainTestPredictionDrift(),\n+ TrainTestPredictionDrift().add_condition_drift_score_not_greater_than(),\n ImagePropertyDrift().add_condition_drift_score_not_greater_than(),\n ImageDatasetDrift()\n )\n", "issue": "[FEAT][CV] Add conditions to checks missing conditions\nSome checks are missing conditions:\r\n\r\n- [x] Heatmap\r\n- [x] Image Drift\r\n- [x] Train Test Drift\r\n- [x] Robustness \n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Functions for loading the default (built-in) vision suites for various validation stages.\n\nEach function returns a new suite that is initialized with a list of checks and default conditions.\nIt is possible to customize these suites by editing the checks and conditions inside it after the suites' creation.\n\"\"\"\nfrom deepchecks.vision.checks import ClassPerformance, TrainTestLabelDrift, MeanAveragePrecisionReport, \\\n MeanAverageRecallReport, ImagePropertyDrift, ImageDatasetDrift, SimpleModelComparison, ConfusionMatrixReport, \\\n RobustnessReport, TrainTestPredictionDrift\nfrom deepchecks.vision import Suite\n\n\n__all__ = ['train_test_validation', 'model_evaluation', 'full_suite']\n\nfrom deepchecks.vision.checks.distribution import HeatmapComparison\n\n\ndef train_test_validation() -> Suite:\n \"\"\"Create a suite that is meant to validate correctness of train-test split, including integrity, \\\n distribution and leakage checks.\"\"\"\n return Suite(\n 'Train Test Validation Suite',\n HeatmapComparison(),\n TrainTestLabelDrift(),\n TrainTestPredictionDrift(),\n ImagePropertyDrift().add_condition_drift_score_not_greater_than(),\n ImageDatasetDrift()\n )\n\n\ndef model_evaluation() -> Suite:\n \"\"\"Create a suite that is meant to test model performance and overfit.\"\"\"\n return Suite(\n 'Model Evaluation Suite',\n ClassPerformance(),\n MeanAveragePrecisionReport(),\n MeanAverageRecallReport(),\n SimpleModelComparison(),\n ConfusionMatrixReport(),\n RobustnessReport().add_condition_degradation_not_greater_than()\n )\n\n\ndef full_suite() -> Suite:\n \"\"\"Create a suite that includes many of the implemented checks, for a quick overview of your model and data.\"\"\"\n return Suite(\n 'Full Suite',\n model_evaluation(),\n train_test_validation(),\n )\n", "path": "deepchecks/vision/suites/default_suites.py"}]}
| 1,191 | 143 |
gh_patches_debug_35089
|
rasdani/github-patches
|
git_diff
|
aio-libs__aiohttp-2237
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'NoneType' object has no attribute 'errno'
## Long story short
Trying to resolve a domain which is an alias for another one, which does not have an A or CNAME record, raises AttributeError: 'NoneType' object has no attribute 'errno'
## Expected behaviour
Raise an error correctly, socket.gaierror probably.
## Actual behaviour
```Traceback (most recent call last):
File "xtest.py", line 16, in <module>
process()
File "/usr/lib/python3.6/asyncio/base_events.py", line 449, in run_until_complete
return future.result()
File "/usr/lib/python3.6/asyncio/tasks.py", line 239, in _step
result = coro.send(None)
File "/myenv/lib/python3.6/site-packages/aiohttp/helpers.py", line 72, in send
return self._coro.send(arg)
File "/myenv/lib/python3.6/site-packages/aiohttp/client.py", line 233, in _request
conn = yield from self._connector.connect(req)
File "/myenv/lib/python3.6/site-packages/aiohttp/connector.py", line 378, in connect
proto = yield from self._create_connection(req)
File "/myenv/lib/python3.6/site-packages/aiohttp/connector.py", line 687, in _create_connection
_, proto = yield from self._create_direct_connection(req)
File "/myenv/lib/python3.6/site-packages/aiohttp/connector.py", line 735, in _create_direct_connection
exc.errno,
AttributeError: 'NoneType' object has no attribute 'errno'
```
## Steps to reproduce
This script will reproduce the error.
```
import asyncio
import aiohttp
from aiohttp.resolver import AsyncResolver
def process():
url = 'http://esly.win/'
resolver = AsyncResolver()
conn = aiohttp.TCPConnector(resolver=resolver, verify_ssl=False)
session = aiohttp.ClientSession(connector=conn)
return session.get(url)
loop = asyncio.get_event_loop()
loop.run_until_complete(
process()
)
```
If I use the session without setting the connector it first raises a socket.gaierror but then
> During handling of the above exception, another exception occurred...
And the same traceback appears.
## Your environment
Python 3.6.0b2
Ubuntu 10.10
aiohttp==2.2,5
Also happens with aiohttp==2.3.0a0 (installed from git on 29/Aug/2017)
</issue>
<code>
[start of aiohttp/resolver.py]
1 import asyncio
2 import socket
3
4 from .abc import AbstractResolver
5
6
7 __all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')
8
9 try:
10 import aiodns
11 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
12 except ImportError: # pragma: no cover
13 aiodns = None
14
15 aiodns_default = False
16
17
18 class ThreadedResolver(AbstractResolver):
19 """Use Executor for synchronous getaddrinfo() calls, which defaults to
20 concurrent.futures.ThreadPoolExecutor.
21 """
22
23 def __init__(self, loop=None):
24 if loop is None:
25 loop = asyncio.get_event_loop()
26 self._loop = loop
27
28 @asyncio.coroutine
29 def resolve(self, host, port=0, family=socket.AF_INET):
30 infos = yield from self._loop.getaddrinfo(
31 host, port, type=socket.SOCK_STREAM, family=family)
32
33 hosts = []
34 for family, _, proto, _, address in infos:
35 hosts.append(
36 {'hostname': host,
37 'host': address[0], 'port': address[1],
38 'family': family, 'proto': proto,
39 'flags': socket.AI_NUMERICHOST})
40
41 return hosts
42
43 @asyncio.coroutine
44 def close(self):
45 pass
46
47
48 class AsyncResolver(AbstractResolver):
49 """Use the `aiodns` package to make asynchronous DNS lookups"""
50
51 def __init__(self, loop=None, *args, **kwargs):
52 if loop is None:
53 loop = asyncio.get_event_loop()
54
55 if aiodns is None:
56 raise RuntimeError("Resolver requires aiodns library")
57
58 self._loop = loop
59 self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
60
61 if not hasattr(self._resolver, 'gethostbyname'):
62 # aiodns 1.1 is not available, fallback to DNSResolver.query
63 self.resolve = self.resolve_with_query
64
65 @asyncio.coroutine
66 def resolve(self, host, port=0, family=socket.AF_INET):
67 hosts = []
68 resp = yield from self._resolver.gethostbyname(host, family)
69
70 for address in resp.addresses:
71 hosts.append(
72 {'hostname': host,
73 'host': address, 'port': port,
74 'family': family, 'proto': 0,
75 'flags': socket.AI_NUMERICHOST})
76 return hosts
77
78 @asyncio.coroutine
79 def resolve_with_query(self, host, port=0, family=socket.AF_INET):
80 if family == socket.AF_INET6:
81 qtype = 'AAAA'
82 else:
83 qtype = 'A'
84
85 hosts = []
86 resp = yield from self._resolver.query(host, qtype)
87
88 for rr in resp:
89 hosts.append(
90 {'hostname': host,
91 'host': rr.host, 'port': port,
92 'family': family, 'proto': 0,
93 'flags': socket.AI_NUMERICHOST})
94
95 return hosts
96
97 @asyncio.coroutine
98 def close(self):
99 return self._resolver.cancel()
100
101
102 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
103
[end of aiohttp/resolver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py
--- a/aiohttp/resolver.py
+++ b/aiohttp/resolver.py
@@ -60,31 +60,42 @@
if not hasattr(self._resolver, 'gethostbyname'):
# aiodns 1.1 is not available, fallback to DNSResolver.query
- self.resolve = self.resolve_with_query
+ self.resolve = self._resolve_with_query
@asyncio.coroutine
def resolve(self, host, port=0, family=socket.AF_INET):
+ try:
+ resp = yield from self._resolver.gethostbyname(host, family)
+ except aiodns.error.DNSError as exc:
+ msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
+ raise OSError(msg) from exc
hosts = []
- resp = yield from self._resolver.gethostbyname(host, family)
-
for address in resp.addresses:
hosts.append(
{'hostname': host,
'host': address, 'port': port,
'family': family, 'proto': 0,
'flags': socket.AI_NUMERICHOST})
+
+ if not hosts:
+ raise OSError("DNS lookup failed")
+
return hosts
@asyncio.coroutine
- def resolve_with_query(self, host, port=0, family=socket.AF_INET):
+ def _resolve_with_query(self, host, port=0, family=socket.AF_INET):
if family == socket.AF_INET6:
qtype = 'AAAA'
else:
qtype = 'A'
- hosts = []
- resp = yield from self._resolver.query(host, qtype)
+ try:
+ resp = yield from self._resolver.query(host, qtype)
+ except aiodns.error.DNSError as exc:
+ msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
+ raise OSError(msg) from exc
+ hosts = []
for rr in resp:
hosts.append(
{'hostname': host,
@@ -92,6 +103,9 @@
'family': family, 'proto': 0,
'flags': socket.AI_NUMERICHOST})
+ if not hosts:
+ raise OSError("DNS lookup failed")
+
return hosts
@asyncio.coroutine
|
{"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -60,31 +60,42 @@\n \n if not hasattr(self._resolver, 'gethostbyname'):\n # aiodns 1.1 is not available, fallback to DNSResolver.query\n- self.resolve = self.resolve_with_query\n+ self.resolve = self._resolve_with_query\n \n @asyncio.coroutine\n def resolve(self, host, port=0, family=socket.AF_INET):\n+ try:\n+ resp = yield from self._resolver.gethostbyname(host, family)\n+ except aiodns.error.DNSError as exc:\n+ msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n+ raise OSError(msg) from exc\n hosts = []\n- resp = yield from self._resolver.gethostbyname(host, family)\n-\n for address in resp.addresses:\n hosts.append(\n {'hostname': host,\n 'host': address, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n+\n+ if not hosts:\n+ raise OSError(\"DNS lookup failed\")\n+\n return hosts\n \n @asyncio.coroutine\n- def resolve_with_query(self, host, port=0, family=socket.AF_INET):\n+ def _resolve_with_query(self, host, port=0, family=socket.AF_INET):\n if family == socket.AF_INET6:\n qtype = 'AAAA'\n else:\n qtype = 'A'\n \n- hosts = []\n- resp = yield from self._resolver.query(host, qtype)\n+ try:\n+ resp = yield from self._resolver.query(host, qtype)\n+ except aiodns.error.DNSError as exc:\n+ msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n+ raise OSError(msg) from exc\n \n+ hosts = []\n for rr in resp:\n hosts.append(\n {'hostname': host,\n@@ -92,6 +103,9 @@\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n \n+ if not hosts:\n+ raise OSError(\"DNS lookup failed\")\n+\n return hosts\n \n @asyncio.coroutine\n", "issue": "AttributeError: 'NoneType' object has no attribute 'errno'\n## Long story short\r\n\r\nTrying to resolve a domain which is an alias for another one, which does not have an A or CNAME record, raises AttributeError: 'NoneType' object has no attribute 'errno'\r\n\r\n## Expected behaviour\r\n\r\nRaise an error correctly, socket.gaierror probably.\r\n\r\n## Actual behaviour\r\n\r\n```Traceback (most recent call last):\r\n File \"xtest.py\", line 16, in <module>\r\n process()\r\n File \"/usr/lib/python3.6/asyncio/base_events.py\", line 449, in run_until_complete\r\n return future.result()\r\n File \"/usr/lib/python3.6/asyncio/tasks.py\", line 239, in _step\r\n result = coro.send(None)\r\n File \"/myenv/lib/python3.6/site-packages/aiohttp/helpers.py\", line 72, in send\r\n return self._coro.send(arg)\r\n File \"/myenv/lib/python3.6/site-packages/aiohttp/client.py\", line 233, in _request\r\n conn = yield from self._connector.connect(req)\r\n File \"/myenv/lib/python3.6/site-packages/aiohttp/connector.py\", line 378, in connect\r\n proto = yield from self._create_connection(req)\r\n File \"/myenv/lib/python3.6/site-packages/aiohttp/connector.py\", line 687, in _create_connection\r\n _, proto = yield from self._create_direct_connection(req)\r\n File \"/myenv/lib/python3.6/site-packages/aiohttp/connector.py\", line 735, in _create_direct_connection\r\n exc.errno,\r\nAttributeError: 'NoneType' object has no attribute 'errno'\r\n```\r\n\r\n## Steps to reproduce\r\n\r\nThis script will reproduce the error.\r\n\r\n```\r\nimport asyncio\r\nimport aiohttp\r\nfrom aiohttp.resolver import AsyncResolver\r\n\r\ndef process():\r\n url = 'http://esly.win/'\r\n resolver = AsyncResolver()\r\n conn = aiohttp.TCPConnector(resolver=resolver, verify_ssl=False)\r\n session = aiohttp.ClientSession(connector=conn)\r\n return session.get(url)\r\n\r\nloop = asyncio.get_event_loop()\r\nloop.run_until_complete(\r\n process()\r\n)\r\n```\r\n\r\nIf I use the session without setting the connector it first raises a socket.gaierror but then \r\n> During handling of the above exception, another exception occurred...\r\n\r\nAnd the same traceback appears.\r\n\r\n## Your environment\r\nPython 3.6.0b2\r\nUbuntu 10.10\r\naiohttp==2.2,5 \r\nAlso happens with aiohttp==2.3.0a0 (installed from git on 29/Aug/2017)\n", "before_files": [{"content": "import asyncio\nimport socket\n\nfrom .abc import AbstractResolver\n\n\n__all__ = ('ThreadedResolver', 'AsyncResolver', 'DefaultResolver')\n\ntry:\n import aiodns\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self, loop=None):\n if loop is None:\n loop = asyncio.get_event_loop()\n self._loop = loop\n\n @asyncio.coroutine\n def resolve(self, host, port=0, family=socket.AF_INET):\n infos = yield from self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family)\n\n hosts = []\n for family, _, proto, _, address in infos:\n hosts.append(\n {'hostname': host,\n 'host': address[0], 'port': address[1],\n 'family': family, 'proto': proto,\n 'flags': socket.AI_NUMERICHOST})\n\n return hosts\n\n @asyncio.coroutine\n def close(self):\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(self, loop=None, *args, **kwargs):\n if loop is None:\n loop = asyncio.get_event_loop()\n\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = loop\n self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)\n\n if not hasattr(self._resolver, 'gethostbyname'):\n # aiodns 1.1 is not available, fallback to DNSResolver.query\n self.resolve = self.resolve_with_query\n\n @asyncio.coroutine\n def resolve(self, host, port=0, family=socket.AF_INET):\n hosts = []\n resp = yield from self._resolver.gethostbyname(host, family)\n\n for address in resp.addresses:\n hosts.append(\n {'hostname': host,\n 'host': address, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n return hosts\n\n @asyncio.coroutine\n def resolve_with_query(self, host, port=0, family=socket.AF_INET):\n if family == socket.AF_INET6:\n qtype = 'AAAA'\n else:\n qtype = 'A'\n\n hosts = []\n resp = yield from self._resolver.query(host, qtype)\n\n for rr in resp:\n hosts.append(\n {'hostname': host,\n 'host': rr.host, 'port': port,\n 'family': family, 'proto': 0,\n 'flags': socket.AI_NUMERICHOST})\n\n return hosts\n\n @asyncio.coroutine\n def close(self):\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]}
| 2,015 | 525 |
gh_patches_debug_2967
|
rasdani/github-patches
|
git_diff
|
canonical__cloud-init-4422
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
package-update-upgrade-install does not work on Gentoo
This bug was originally filed in Launchpad as [LP: #1799544](https://bugs.launchpad.net/cloud-init/+bug/1799544)
<details>
<summary>Launchpad details</summary>
<pre>
affected_projects = []
assignee = holmanb
assignee_name = Brett Holman
date_closed = 2022-07-21T15:16:56.010973+00:00
date_created = 2018-10-23T17:34:36.633424+00:00
date_fix_committed = 2022-07-21T15:16:56.010973+00:00
date_fix_released = 2022-07-21T15:16:56.010973+00:00
id = 1799544
importance = medium
is_complete = True
lp_url = https://bugs.launchpad.net/cloud-init/+bug/1799544
milestone = 22.2
owner = gilles-dartiguelongue
owner_name = Gilles Dartiguelongue
private = False
status = fix_released
submitter = gilles-dartiguelongue
submitter_name = Gilles Dartiguelongue
tags = ['gentoo']
duplicates = []
</pre>
</details>
_Launchpad user **Gilles Dartiguelongue(gilles-dartiguelongue)** wrote on 2018-10-23T17:34:36.633424+00:00_
I'm testing cloud-init in a nocloud setup. I'm trying to perform installation of packages using the appropriate module and after fixing some issues in Gentoo packaging, I hit an error in execution due to cmd = list('emerge') being interpreted as ['e', 'm', 'e', ...] while it was meant as ['emerge'].
</issue>
<code>
[start of cloudinit/distros/gentoo.py]
1 # Copyright (C) 2014 Rackspace, US Inc.
2 # Copyright (C) 2016 Matthew Thode.
3 #
4 # Author: Nate House <[email protected]>
5 # Author: Matthew Thode <[email protected]>
6 #
7 # This file is part of cloud-init. See LICENSE file for license information.
8
9 from cloudinit import distros, helpers
10 from cloudinit import log as logging
11 from cloudinit import subp, util
12 from cloudinit.distros import net_util
13 from cloudinit.distros.parsers.hostname import HostnameConf
14 from cloudinit.settings import PER_INSTANCE
15
16 LOG = logging.getLogger(__name__)
17
18
19 class Distro(distros.Distro):
20 locale_conf_fn = "/etc/env.d/02locale"
21 locale_gen_fn = "/etc/locale.gen"
22 network_conf_fn = "/etc/conf.d/net"
23 hostname_conf_fn = "/etc/conf.d/hostname"
24 init_cmd = ["rc-service"] # init scripts
25 default_locale = "en_US.UTF-8"
26
27 # C.UTF8 makes sense to generate, but is not selected
28 # Add /etc/locale.gen entries to this list to support more locales
29 locales = ["C.UTF8 UTF-8", "en_US.UTF-8 UTF-8"]
30
31 def __init__(self, name, cfg, paths):
32 distros.Distro.__init__(self, name, cfg, paths)
33 # This will be used to restrict certain
34 # calls from repeatly happening (when they
35 # should only happen say once per instance...)
36 self._runner = helpers.Runners(paths)
37 self.osfamily = "gentoo"
38 # Fix sshd restarts
39 cfg["ssh_svcname"] = "/etc/init.d/sshd"
40 if distros.uses_systemd():
41 LOG.error("Cloud-init does not support systemd with gentoo")
42
43 def apply_locale(self, _, out_fn=None):
44 """rc-only - not compatible with systemd
45
46 Locales need to be added to /etc/locale.gen and generated prior
47 to selection. Default to en_US.UTF-8 for simplicity.
48 """
49 util.write_file(self.locale_gen_fn, "\n".join(self.locales), mode=644)
50
51 # generate locales
52 subp.subp(["locale-gen"], capture=False)
53
54 # select locale
55 subp.subp(
56 ["eselect", "locale", "set", self.default_locale], capture=False
57 )
58
59 def install_packages(self, pkglist):
60 self.update_package_sources()
61 self.package_command("", pkgs=pkglist)
62
63 def _write_network(self, settings):
64 entries = net_util.translate_network(settings)
65 LOG.debug(
66 "Translated ubuntu style network settings %s into %s",
67 settings,
68 entries,
69 )
70 dev_names = entries.keys()
71 nameservers = []
72
73 for (dev, info) in entries.items():
74 if "dns-nameservers" in info:
75 nameservers.extend(info["dns-nameservers"])
76 if dev == "lo":
77 continue
78 net_fn = self.network_conf_fn + "." + dev
79 dns_nameservers = info.get("dns-nameservers")
80 if isinstance(dns_nameservers, (list, tuple)):
81 dns_nameservers = str(tuple(dns_nameservers)).replace(",", "")
82 # eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}
83 # lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}
84 results = ""
85 if info.get("bootproto") == "dhcp":
86 results += 'config_{name}="dhcp"'.format(name=dev)
87 else:
88 results += (
89 'config_{name}="{ip_address} netmask {netmask}"\n'
90 'mac_{name}="{hwaddr}"\n'
91 ).format(
92 name=dev,
93 ip_address=info.get("address"),
94 netmask=info.get("netmask"),
95 hwaddr=info.get("hwaddress"),
96 )
97 results += 'routes_{name}="default via {gateway}"\n'.format(
98 name=dev, gateway=info.get("gateway")
99 )
100 if info.get("dns-nameservers"):
101 results += 'dns_servers_{name}="{dnsservers}"\n'.format(
102 name=dev, dnsservers=dns_nameservers
103 )
104 util.write_file(net_fn, results)
105 self._create_network_symlink(dev)
106 if info.get("auto"):
107 cmd = [
108 "rc-update",
109 "add",
110 "net.{name}".format(name=dev),
111 "default",
112 ]
113 try:
114 (_out, err) = subp.subp(cmd)
115 if len(err):
116 LOG.warning(
117 "Running %s resulted in stderr output: %s",
118 cmd,
119 err,
120 )
121 except subp.ProcessExecutionError:
122 util.logexc(
123 LOG, "Running interface command %s failed", cmd
124 )
125
126 if nameservers:
127 util.write_file(
128 self.resolve_conf_fn, convert_resolv_conf(nameservers)
129 )
130
131 return dev_names
132
133 @staticmethod
134 def _create_network_symlink(interface_name):
135 file_path = "/etc/init.d/net.{name}".format(name=interface_name)
136 if not util.is_link(file_path):
137 util.sym_link("/etc/init.d/net.lo", file_path)
138
139 def _bring_up_interface(self, device_name):
140 cmd = ["/etc/init.d/net.%s" % device_name, "restart"]
141 LOG.debug(
142 "Attempting to run bring up interface %s using command %s",
143 device_name,
144 cmd,
145 )
146 try:
147 (_out, err) = subp.subp(cmd)
148 if len(err):
149 LOG.warning(
150 "Running %s resulted in stderr output: %s", cmd, err
151 )
152 return True
153 except subp.ProcessExecutionError:
154 util.logexc(LOG, "Running interface command %s failed", cmd)
155 return False
156
157 def _bring_up_interfaces(self, device_names):
158 use_all = False
159 for d in device_names:
160 if d == "all":
161 use_all = True
162 if use_all:
163 # Grab device names from init scripts
164 cmd = ["ls", "/etc/init.d/net.*"]
165 try:
166 (_out, err) = subp.subp(cmd)
167 if len(err):
168 LOG.warning(
169 "Running %s resulted in stderr output: %s", cmd, err
170 )
171 except subp.ProcessExecutionError:
172 util.logexc(LOG, "Running interface command %s failed", cmd)
173 return False
174 devices = [x.split(".")[2] for x in _out.split(" ")]
175 return distros.Distro._bring_up_interfaces(self, devices)
176 else:
177 return distros.Distro._bring_up_interfaces(self, device_names)
178
179 def _write_hostname(self, hostname, filename):
180 conf = None
181 try:
182 # Try to update the previous one
183 # so lets see if we can read it first.
184 conf = self._read_hostname_conf(filename)
185 except IOError:
186 pass
187 if not conf:
188 conf = HostnameConf("")
189
190 # Many distro's format is the hostname by itself, and that is the
191 # way HostnameConf works but gentoo expects it to be in
192 # hostname="the-actual-hostname"
193 conf.set_hostname('hostname="%s"' % hostname)
194 util.write_file(filename, str(conf), 0o644)
195
196 def _read_system_hostname(self):
197 sys_hostname = self._read_hostname(self.hostname_conf_fn)
198 return self.hostname_conf_fn, sys_hostname
199
200 @staticmethod
201 def _read_hostname_conf(filename):
202 conf = HostnameConf(util.load_file(filename))
203 conf.parse()
204 return conf
205
206 def _read_hostname(self, filename, default=None):
207 hostname = None
208 try:
209 conf = self._read_hostname_conf(filename)
210 hostname = conf.hostname
211 except IOError:
212 pass
213 if not hostname:
214 return default
215 return hostname
216
217 def set_timezone(self, tz):
218 distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
219
220 def package_command(self, command, args=None, pkgs=None):
221 cmd = list("emerge")
222 # Redirect output
223 cmd.append("--quiet")
224
225 if command == "upgrade":
226 cmd.extend(["--update", "world"])
227 else:
228 if pkgs is None:
229 pkgs = []
230
231 if args and isinstance(args, str):
232 cmd.append(args)
233 elif args and isinstance(args, list):
234 cmd.extend(args)
235
236 if command:
237 cmd.append(command)
238
239 pkglist = util.expand_package_list("%s-%s", pkgs)
240 cmd.extend(pkglist)
241
242 # Allow the output of this to flow outwards (ie not be captured)
243 subp.subp(cmd, capture=False)
244
245 def update_package_sources(self):
246 self._runner.run(
247 "update-sources",
248 self.package_command,
249 ["--sync"],
250 freq=PER_INSTANCE,
251 )
252
253
254 def convert_resolv_conf(settings):
255 """Returns a settings string formatted for resolv.conf."""
256 result = ""
257 if isinstance(settings, list):
258 for ns in settings:
259 result += "nameserver %s\n" % ns
260 return result
261
262
263 # vi: ts=4 expandtab
264
[end of cloudinit/distros/gentoo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -218,7 +218,7 @@
distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))
def package_command(self, command, args=None, pkgs=None):
- cmd = list("emerge")
+ cmd = ["emerge"]
# Redirect output
cmd.append("--quiet")
|
{"golden_diff": "diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py\n--- a/cloudinit/distros/gentoo.py\n+++ b/cloudinit/distros/gentoo.py\n@@ -218,7 +218,7 @@\n distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))\n \n def package_command(self, command, args=None, pkgs=None):\n- cmd = list(\"emerge\")\n+ cmd = [\"emerge\"]\n # Redirect output\n cmd.append(\"--quiet\")\n", "issue": "package-update-upgrade-install does not work on Gentoo\nThis bug was originally filed in Launchpad as [LP: #1799544](https://bugs.launchpad.net/cloud-init/+bug/1799544)\n<details>\n<summary>Launchpad details</summary>\n<pre>\naffected_projects = []\nassignee = holmanb\nassignee_name = Brett Holman\ndate_closed = 2022-07-21T15:16:56.010973+00:00\ndate_created = 2018-10-23T17:34:36.633424+00:00\ndate_fix_committed = 2022-07-21T15:16:56.010973+00:00\ndate_fix_released = 2022-07-21T15:16:56.010973+00:00\nid = 1799544\nimportance = medium\nis_complete = True\nlp_url = https://bugs.launchpad.net/cloud-init/+bug/1799544\nmilestone = 22.2\nowner = gilles-dartiguelongue\nowner_name = Gilles Dartiguelongue\nprivate = False\nstatus = fix_released\nsubmitter = gilles-dartiguelongue\nsubmitter_name = Gilles Dartiguelongue\ntags = ['gentoo']\nduplicates = []\n</pre>\n</details>\n\n_Launchpad user **Gilles Dartiguelongue(gilles-dartiguelongue)** wrote on 2018-10-23T17:34:36.633424+00:00_\n\nI'm testing cloud-init in a nocloud setup. I'm trying to perform installation of packages using the appropriate module and after fixing some issues in Gentoo packaging, I hit an error in execution due to cmd = list('emerge') being interpreted as ['e', 'm', 'e', ...] while it was meant as ['emerge'].\n", "before_files": [{"content": "# Copyright (C) 2014 Rackspace, US Inc.\n# Copyright (C) 2016 Matthew Thode.\n#\n# Author: Nate House <[email protected]>\n# Author: Matthew Thode <[email protected]>\n#\n# This file is part of cloud-init. See LICENSE file for license information.\n\nfrom cloudinit import distros, helpers\nfrom cloudinit import log as logging\nfrom cloudinit import subp, util\nfrom cloudinit.distros import net_util\nfrom cloudinit.distros.parsers.hostname import HostnameConf\nfrom cloudinit.settings import PER_INSTANCE\n\nLOG = logging.getLogger(__name__)\n\n\nclass Distro(distros.Distro):\n locale_conf_fn = \"/etc/env.d/02locale\"\n locale_gen_fn = \"/etc/locale.gen\"\n network_conf_fn = \"/etc/conf.d/net\"\n hostname_conf_fn = \"/etc/conf.d/hostname\"\n init_cmd = [\"rc-service\"] # init scripts\n default_locale = \"en_US.UTF-8\"\n\n # C.UTF8 makes sense to generate, but is not selected\n # Add /etc/locale.gen entries to this list to support more locales\n locales = [\"C.UTF8 UTF-8\", \"en_US.UTF-8 UTF-8\"]\n\n def __init__(self, name, cfg, paths):\n distros.Distro.__init__(self, name, cfg, paths)\n # This will be used to restrict certain\n # calls from repeatly happening (when they\n # should only happen say once per instance...)\n self._runner = helpers.Runners(paths)\n self.osfamily = \"gentoo\"\n # Fix sshd restarts\n cfg[\"ssh_svcname\"] = \"/etc/init.d/sshd\"\n if distros.uses_systemd():\n LOG.error(\"Cloud-init does not support systemd with gentoo\")\n\n def apply_locale(self, _, out_fn=None):\n \"\"\"rc-only - not compatible with systemd\n\n Locales need to be added to /etc/locale.gen and generated prior\n to selection. Default to en_US.UTF-8 for simplicity.\n \"\"\"\n util.write_file(self.locale_gen_fn, \"\\n\".join(self.locales), mode=644)\n\n # generate locales\n subp.subp([\"locale-gen\"], capture=False)\n\n # select locale\n subp.subp(\n [\"eselect\", \"locale\", \"set\", self.default_locale], capture=False\n )\n\n def install_packages(self, pkglist):\n self.update_package_sources()\n self.package_command(\"\", pkgs=pkglist)\n\n def _write_network(self, settings):\n entries = net_util.translate_network(settings)\n LOG.debug(\n \"Translated ubuntu style network settings %s into %s\",\n settings,\n entries,\n )\n dev_names = entries.keys()\n nameservers = []\n\n for (dev, info) in entries.items():\n if \"dns-nameservers\" in info:\n nameservers.extend(info[\"dns-nameservers\"])\n if dev == \"lo\":\n continue\n net_fn = self.network_conf_fn + \".\" + dev\n dns_nameservers = info.get(\"dns-nameservers\")\n if isinstance(dns_nameservers, (list, tuple)):\n dns_nameservers = str(tuple(dns_nameservers)).replace(\",\", \"\")\n # eth0, {'auto': True, 'ipv6': {}, 'bootproto': 'dhcp'}\n # lo, {'dns-nameservers': ['10.0.1.3'], 'ipv6': {}, 'auto': True}\n results = \"\"\n if info.get(\"bootproto\") == \"dhcp\":\n results += 'config_{name}=\"dhcp\"'.format(name=dev)\n else:\n results += (\n 'config_{name}=\"{ip_address} netmask {netmask}\"\\n'\n 'mac_{name}=\"{hwaddr}\"\\n'\n ).format(\n name=dev,\n ip_address=info.get(\"address\"),\n netmask=info.get(\"netmask\"),\n hwaddr=info.get(\"hwaddress\"),\n )\n results += 'routes_{name}=\"default via {gateway}\"\\n'.format(\n name=dev, gateway=info.get(\"gateway\")\n )\n if info.get(\"dns-nameservers\"):\n results += 'dns_servers_{name}=\"{dnsservers}\"\\n'.format(\n name=dev, dnsservers=dns_nameservers\n )\n util.write_file(net_fn, results)\n self._create_network_symlink(dev)\n if info.get(\"auto\"):\n cmd = [\n \"rc-update\",\n \"add\",\n \"net.{name}\".format(name=dev),\n \"default\",\n ]\n try:\n (_out, err) = subp.subp(cmd)\n if len(err):\n LOG.warning(\n \"Running %s resulted in stderr output: %s\",\n cmd,\n err,\n )\n except subp.ProcessExecutionError:\n util.logexc(\n LOG, \"Running interface command %s failed\", cmd\n )\n\n if nameservers:\n util.write_file(\n self.resolve_conf_fn, convert_resolv_conf(nameservers)\n )\n\n return dev_names\n\n @staticmethod\n def _create_network_symlink(interface_name):\n file_path = \"/etc/init.d/net.{name}\".format(name=interface_name)\n if not util.is_link(file_path):\n util.sym_link(\"/etc/init.d/net.lo\", file_path)\n\n def _bring_up_interface(self, device_name):\n cmd = [\"/etc/init.d/net.%s\" % device_name, \"restart\"]\n LOG.debug(\n \"Attempting to run bring up interface %s using command %s\",\n device_name,\n cmd,\n )\n try:\n (_out, err) = subp.subp(cmd)\n if len(err):\n LOG.warning(\n \"Running %s resulted in stderr output: %s\", cmd, err\n )\n return True\n except subp.ProcessExecutionError:\n util.logexc(LOG, \"Running interface command %s failed\", cmd)\n return False\n\n def _bring_up_interfaces(self, device_names):\n use_all = False\n for d in device_names:\n if d == \"all\":\n use_all = True\n if use_all:\n # Grab device names from init scripts\n cmd = [\"ls\", \"/etc/init.d/net.*\"]\n try:\n (_out, err) = subp.subp(cmd)\n if len(err):\n LOG.warning(\n \"Running %s resulted in stderr output: %s\", cmd, err\n )\n except subp.ProcessExecutionError:\n util.logexc(LOG, \"Running interface command %s failed\", cmd)\n return False\n devices = [x.split(\".\")[2] for x in _out.split(\" \")]\n return distros.Distro._bring_up_interfaces(self, devices)\n else:\n return distros.Distro._bring_up_interfaces(self, device_names)\n\n def _write_hostname(self, hostname, filename):\n conf = None\n try:\n # Try to update the previous one\n # so lets see if we can read it first.\n conf = self._read_hostname_conf(filename)\n except IOError:\n pass\n if not conf:\n conf = HostnameConf(\"\")\n\n # Many distro's format is the hostname by itself, and that is the\n # way HostnameConf works but gentoo expects it to be in\n # hostname=\"the-actual-hostname\"\n conf.set_hostname('hostname=\"%s\"' % hostname)\n util.write_file(filename, str(conf), 0o644)\n\n def _read_system_hostname(self):\n sys_hostname = self._read_hostname(self.hostname_conf_fn)\n return self.hostname_conf_fn, sys_hostname\n\n @staticmethod\n def _read_hostname_conf(filename):\n conf = HostnameConf(util.load_file(filename))\n conf.parse()\n return conf\n\n def _read_hostname(self, filename, default=None):\n hostname = None\n try:\n conf = self._read_hostname_conf(filename)\n hostname = conf.hostname\n except IOError:\n pass\n if not hostname:\n return default\n return hostname\n\n def set_timezone(self, tz):\n distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz))\n\n def package_command(self, command, args=None, pkgs=None):\n cmd = list(\"emerge\")\n # Redirect output\n cmd.append(\"--quiet\")\n\n if command == \"upgrade\":\n cmd.extend([\"--update\", \"world\"])\n else:\n if pkgs is None:\n pkgs = []\n\n if args and isinstance(args, str):\n cmd.append(args)\n elif args and isinstance(args, list):\n cmd.extend(args)\n\n if command:\n cmd.append(command)\n\n pkglist = util.expand_package_list(\"%s-%s\", pkgs)\n cmd.extend(pkglist)\n\n # Allow the output of this to flow outwards (ie not be captured)\n subp.subp(cmd, capture=False)\n\n def update_package_sources(self):\n self._runner.run(\n \"update-sources\",\n self.package_command,\n [\"--sync\"],\n freq=PER_INSTANCE,\n )\n\n\ndef convert_resolv_conf(settings):\n \"\"\"Returns a settings string formatted for resolv.conf.\"\"\"\n result = \"\"\n if isinstance(settings, list):\n for ns in settings:\n result += \"nameserver %s\\n\" % ns\n return result\n\n\n# vi: ts=4 expandtab\n", "path": "cloudinit/distros/gentoo.py"}]}
| 3,761 | 125 |
gh_patches_debug_8326
|
rasdani/github-patches
|
git_diff
|
google__clusterfuzz-1163
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Command field empty in OSS-Fuzz testcases
See https://oss-fuzz.com/testcase-detail/5204819744915456 for example.
</issue>
<code>
[start of src/python/bot/untrusted_runner/tasks_impl.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Tasks RPC implementations."""
15 from __future__ import absolute_import
16
17 from google.protobuf import wrappers_pb2
18 from google.protobuf.any_pb2 import Any
19 import six
20
21 from . import protobuf_utils
22
23 from bot import testcase_manager
24 from bot.fuzzers import engine
25 from bot.tasks import corpus_pruning_task
26 from bot.tasks import fuzz_task
27 from bot.tasks import minimize_task
28 from datastore import data_types
29 from protos import untrusted_runner_pb2
30 from system import environment
31
32
33 def _proto_to_fuzz_target(proto):
34 """Convert protobuf to FuzzTarget."""
35 return data_types.FuzzTarget(
36 engine=proto.engine, project=proto.project, binary=proto.binary)
37
38
39 def _proto_to_cross_pollinate_fuzzer(proto):
40 """Convert protobuf to CrossPollinateFuzzer."""
41 return corpus_pruning_task.CrossPollinateFuzzer(
42 fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),
43 backup_bucket_name=proto.backup_bucket_name,
44 corpus_engine_name=proto.corpus_engine_name)
45
46
47 def prune_corpus(request, _):
48 """Prune corpus."""
49 context = corpus_pruning_task.Context(
50 _proto_to_fuzz_target(request.fuzz_target), [
51 _proto_to_cross_pollinate_fuzzer(proto)
52 for proto in request.cross_pollinate_fuzzers
53 ], environment.get_value('USE_MINIJAIL'))
54
55 result = corpus_pruning_task.do_corpus_pruning(
56 context, request.last_execution_failed, request.revision)
57
58 # Intentionally skip edge and function coverage values as those would come
59 # from fuzzer coverage cron task (see src/go/server/cron/coverage.go).
60 coverage_info = untrusted_runner_pb2.CoverageInfo(
61 corpus_size_units=result.coverage_info.corpus_size_units,
62 corpus_size_bytes=result.coverage_info.corpus_size_bytes,
63 corpus_location=result.coverage_info.corpus_location,
64 corpus_backup_location=result.coverage_info.corpus_backup_location,
65 quarantine_size_units=result.coverage_info.quarantine_size_units,
66 quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,
67 quarantine_location=result.coverage_info.quarantine_location)
68
69 crashes = [
70 untrusted_runner_pb2.CorpusCrash(
71 crash_state=crash.crash_state,
72 crash_type=crash.crash_type,
73 crash_address=crash.crash_address,
74 crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(
75 crash.crash_stacktrace),
76 unit_path=crash.unit_path,
77 security_flag=crash.security_flag,
78 ) for crash in result.crashes
79 ]
80
81 return untrusted_runner_pb2.PruneCorpusResponse(
82 coverage_info=coverage_info,
83 crashes=crashes,
84 fuzzer_binary_name=result.fuzzer_binary_name,
85 revision=result.revision)
86
87
88 def process_testcase(request, _):
89 """Process testcase."""
90 tool_name_map = {
91 untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',
92 untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',
93 }
94
95 # TODO(ochang): Support other engines.
96 assert request.engine == 'libFuzzer'
97 assert request.operation in tool_name_map
98
99 result = minimize_task.run_libfuzzer_engine(
100 tool_name_map[request.operation], request.target_name, request.arguments,
101 request.testcase_path, request.output_path, request.timeout)
102
103 return untrusted_runner_pb2.EngineReproduceResult(
104 return_code=result.return_code,
105 time_executed=result.time_executed,
106 output=result.output)
107
108
109 def engine_fuzz(request, _):
110 """Run engine fuzzer."""
111 engine_impl = engine.get(request.engine)
112 result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(
113 engine_impl, request.target_name, request.sync_corpus_directory,
114 request.testcase_directory)
115
116 crashes = [
117 untrusted_runner_pb2.EngineCrash(
118 input_path=crash.input_path,
119 stacktrace=protobuf_utils.encode_utf8_if_unicode(crash.stacktrace),
120 reproduce_args=crash.reproduce_args,
121 crash_time=crash.crash_time) for crash in result.crashes
122 ]
123
124 packed_stats = {}
125 for key, value in six.iteritems(result.stats):
126 packed_value = Any()
127 if isinstance(value, float):
128 packed_value.Pack(wrappers_pb2.DoubleValue(value=value))
129 elif isinstance(value, int):
130 packed_value.Pack(wrappers_pb2.Int32Value(value=value))
131 elif isinstance(value, six.string_types):
132 packed_value.Pack(wrappers_pb2.StringValue(value=value))
133 else:
134 raise ValueError('Unknown stat type for ' + key)
135
136 packed_stats[key] = packed_value
137
138 return untrusted_runner_pb2.EngineFuzzResponse(
139 logs=protobuf_utils.encode_utf8_if_unicode(result.logs),
140 command=result.command,
141 crashes=crashes,
142 stats=packed_stats,
143 time_executed=result.time_executed,
144 fuzzer_metadata=fuzzer_metadata)
145
146
147 def engine_reproduce(request, _):
148 """Run engine reproduce."""
149 engine_impl = engine.get(request.engine)
150 result = testcase_manager.engine_reproduce(engine_impl, request.target_name,
151 request.testcase_path,
152 request.arguments, request.timeout)
153 return untrusted_runner_pb2.EngineReproduceResult(
154 return_code=result.return_code,
155 time_executed=result.time_executed,
156 output=result.output)
157
[end of src/python/bot/untrusted_runner/tasks_impl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/bot/untrusted_runner/tasks_impl.py b/src/python/bot/untrusted_runner/tasks_impl.py
--- a/src/python/bot/untrusted_runner/tasks_impl.py
+++ b/src/python/bot/untrusted_runner/tasks_impl.py
@@ -151,6 +151,7 @@
request.testcase_path,
request.arguments, request.timeout)
return untrusted_runner_pb2.EngineReproduceResult(
+ command=result.command,
return_code=result.return_code,
time_executed=result.time_executed,
output=result.output)
|
{"golden_diff": "diff --git a/src/python/bot/untrusted_runner/tasks_impl.py b/src/python/bot/untrusted_runner/tasks_impl.py\n--- a/src/python/bot/untrusted_runner/tasks_impl.py\n+++ b/src/python/bot/untrusted_runner/tasks_impl.py\n@@ -151,6 +151,7 @@\n request.testcase_path,\n request.arguments, request.timeout)\n return untrusted_runner_pb2.EngineReproduceResult(\n+ command=result.command,\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n", "issue": "Command field empty in OSS-Fuzz testcases\nSee https://oss-fuzz.com/testcase-detail/5204819744915456 for example.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tasks RPC implementations.\"\"\"\nfrom __future__ import absolute_import\n\nfrom google.protobuf import wrappers_pb2\nfrom google.protobuf.any_pb2 import Any\nimport six\n\nfrom . import protobuf_utils\n\nfrom bot import testcase_manager\nfrom bot.fuzzers import engine\nfrom bot.tasks import corpus_pruning_task\nfrom bot.tasks import fuzz_task\nfrom bot.tasks import minimize_task\nfrom datastore import data_types\nfrom protos import untrusted_runner_pb2\nfrom system import environment\n\n\ndef _proto_to_fuzz_target(proto):\n \"\"\"Convert protobuf to FuzzTarget.\"\"\"\n return data_types.FuzzTarget(\n engine=proto.engine, project=proto.project, binary=proto.binary)\n\n\ndef _proto_to_cross_pollinate_fuzzer(proto):\n \"\"\"Convert protobuf to CrossPollinateFuzzer.\"\"\"\n return corpus_pruning_task.CrossPollinateFuzzer(\n fuzz_target=_proto_to_fuzz_target(proto.fuzz_target),\n backup_bucket_name=proto.backup_bucket_name,\n corpus_engine_name=proto.corpus_engine_name)\n\n\ndef prune_corpus(request, _):\n \"\"\"Prune corpus.\"\"\"\n context = corpus_pruning_task.Context(\n _proto_to_fuzz_target(request.fuzz_target), [\n _proto_to_cross_pollinate_fuzzer(proto)\n for proto in request.cross_pollinate_fuzzers\n ], environment.get_value('USE_MINIJAIL'))\n\n result = corpus_pruning_task.do_corpus_pruning(\n context, request.last_execution_failed, request.revision)\n\n # Intentionally skip edge and function coverage values as those would come\n # from fuzzer coverage cron task (see src/go/server/cron/coverage.go).\n coverage_info = untrusted_runner_pb2.CoverageInfo(\n corpus_size_units=result.coverage_info.corpus_size_units,\n corpus_size_bytes=result.coverage_info.corpus_size_bytes,\n corpus_location=result.coverage_info.corpus_location,\n corpus_backup_location=result.coverage_info.corpus_backup_location,\n quarantine_size_units=result.coverage_info.quarantine_size_units,\n quarantine_size_bytes=result.coverage_info.quarantine_size_bytes,\n quarantine_location=result.coverage_info.quarantine_location)\n\n crashes = [\n untrusted_runner_pb2.CorpusCrash(\n crash_state=crash.crash_state,\n crash_type=crash.crash_type,\n crash_address=crash.crash_address,\n crash_stacktrace=protobuf_utils.encode_utf8_if_unicode(\n crash.crash_stacktrace),\n unit_path=crash.unit_path,\n security_flag=crash.security_flag,\n ) for crash in result.crashes\n ]\n\n return untrusted_runner_pb2.PruneCorpusResponse(\n coverage_info=coverage_info,\n crashes=crashes,\n fuzzer_binary_name=result.fuzzer_binary_name,\n revision=result.revision)\n\n\ndef process_testcase(request, _):\n \"\"\"Process testcase.\"\"\"\n tool_name_map = {\n untrusted_runner_pb2.ProcessTestcaseRequest.MINIMIZE: 'minimize',\n untrusted_runner_pb2.ProcessTestcaseRequest.CLEANSE: 'cleanse',\n }\n\n # TODO(ochang): Support other engines.\n assert request.engine == 'libFuzzer'\n assert request.operation in tool_name_map\n\n result = minimize_task.run_libfuzzer_engine(\n tool_name_map[request.operation], request.target_name, request.arguments,\n request.testcase_path, request.output_path, request.timeout)\n\n return untrusted_runner_pb2.EngineReproduceResult(\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n\n\ndef engine_fuzz(request, _):\n \"\"\"Run engine fuzzer.\"\"\"\n engine_impl = engine.get(request.engine)\n result, fuzzer_metadata = fuzz_task.run_engine_fuzzer(\n engine_impl, request.target_name, request.sync_corpus_directory,\n request.testcase_directory)\n\n crashes = [\n untrusted_runner_pb2.EngineCrash(\n input_path=crash.input_path,\n stacktrace=protobuf_utils.encode_utf8_if_unicode(crash.stacktrace),\n reproduce_args=crash.reproduce_args,\n crash_time=crash.crash_time) for crash in result.crashes\n ]\n\n packed_stats = {}\n for key, value in six.iteritems(result.stats):\n packed_value = Any()\n if isinstance(value, float):\n packed_value.Pack(wrappers_pb2.DoubleValue(value=value))\n elif isinstance(value, int):\n packed_value.Pack(wrappers_pb2.Int32Value(value=value))\n elif isinstance(value, six.string_types):\n packed_value.Pack(wrappers_pb2.StringValue(value=value))\n else:\n raise ValueError('Unknown stat type for ' + key)\n\n packed_stats[key] = packed_value\n\n return untrusted_runner_pb2.EngineFuzzResponse(\n logs=protobuf_utils.encode_utf8_if_unicode(result.logs),\n command=result.command,\n crashes=crashes,\n stats=packed_stats,\n time_executed=result.time_executed,\n fuzzer_metadata=fuzzer_metadata)\n\n\ndef engine_reproduce(request, _):\n \"\"\"Run engine reproduce.\"\"\"\n engine_impl = engine.get(request.engine)\n result = testcase_manager.engine_reproduce(engine_impl, request.target_name,\n request.testcase_path,\n request.arguments, request.timeout)\n return untrusted_runner_pb2.EngineReproduceResult(\n return_code=result.return_code,\n time_executed=result.time_executed,\n output=result.output)\n", "path": "src/python/bot/untrusted_runner/tasks_impl.py"}]}
| 2,223 | 118 |
gh_patches_debug_3051
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-2533
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pubsub message getting wrong attribute for publishTime
According the [REST docs](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage), a `PubsubMessage` has the field `publishTime`
In [message.py](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub/message.py), `from_api_repr` is getting the field `publishTimestamp` below:
```
instance._service_timestamp = api_repr.get('publishTimestamp')
```
The current tests are self-confirming of this issue as they simply set up the api_repr with `publishTimestamp`
A quick fix seems to adjust the following:
**message.py**
``` python
@classmethod
def from_api_repr(cls, api_repr):
"""Factory: construct message from API representation.
:type api_repr: dict or None
:param api_repr: The API representation of the message
:rtype: :class:`Message`
:returns: The message created from the response.
"""
data = base64.b64decode(api_repr.get('data', b''))
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
instance._service_timestamp = api_repr.get('publishTime')
return instance
```
**test_message.py**
``` python
def test_from_api_repr_no_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.attributes, {})
self.assertEqual(message.service_timestamp, TIMESTAMP)
def test_from_api_repr_w_attributes(self):
from base64 import b64encode as b64
DATA = b'DEADBEEF'
B64_DATA = b64(DATA)
MESSAGE_ID = '12345'
ATTRS = {'a': 'b'}
TIMESTAMP = '2016-03-18-19:38:22.001393427Z'
api_repr = {
'data': B64_DATA,
'messageId': MESSAGE_ID,
'publishTime': TIMESTAMP,
'attributes': ATTRS,
}
message = self._getTargetClass().from_api_repr(api_repr)
self.assertEqual(message.data, DATA)
self.assertEqual(message.message_id, MESSAGE_ID)
self.assertEqual(message.service_timestamp, TIMESTAMP)
self.assertEqual(message.attributes, ATTRS)
```
I don't currently have a contributor license signed, but will work on that. In the meantime, hoping that someone can pick this up.
</issue>
<code>
[start of pubsub/google/cloud/pubsub/message.py]
1 # Copyright 2015 Google Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Define API Topics."""
16
17 import base64
18
19 from google.cloud._helpers import _rfc3339_to_datetime
20
21
22 class Message(object):
23 """Messages can be published to a topic and received by subscribers.
24
25 See:
26 https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
27
28 :type data: bytes
29 :param data: the payload of the message.
30
31 :type message_id: string
32 :param message_id: An ID assigned to the message by the API.
33
34 :type attributes: dict or None
35 :param attributes: Extra metadata associated by the publisher with the
36 message.
37 """
38 _service_timestamp = None
39
40 def __init__(self, data, message_id, attributes=None):
41 self.data = data
42 self.message_id = message_id
43 self._attributes = attributes
44
45 @property
46 def attributes(self):
47 """Lazily-constructed attribute dictionary."""
48 if self._attributes is None:
49 self._attributes = {}
50 return self._attributes
51
52 @property
53 def timestamp(self):
54 """Return sortable timestamp from attributes, if passed.
55
56 Allows sorting messages in publication order (assuming consistent
57 clocks across all publishers).
58
59 :rtype: :class:`datetime.datetime`
60 :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp
61 :raises: ValueError if timestamp not in ``attributes``, or if it does
62 not match the RFC 3339 format.
63 """
64 stamp = self.attributes.get('timestamp')
65 if stamp is None:
66 raise ValueError('No timestamp')
67 return _rfc3339_to_datetime(stamp)
68
69 @property
70 def service_timestamp(self):
71 """Return server-set timestamp.
72
73 :rtype: string
74 :returns: timestamp (in UTC timezone) in RFC 3339 format
75 """
76 return self._service_timestamp
77
78 @classmethod
79 def from_api_repr(cls, api_repr):
80 """Factory: construct message from API representation.
81
82 :type api_repr: dict or None
83 :param api_repr: The API representation of the message
84
85 :rtype: :class:`Message`
86 :returns: The message created from the response.
87 """
88 data = base64.b64decode(api_repr.get('data', b''))
89 instance = cls(
90 data=data, message_id=api_repr['messageId'],
91 attributes=api_repr.get('attributes'))
92 instance._service_timestamp = api_repr.get('publishTimestamp')
93 return instance
94
[end of pubsub/google/cloud/pubsub/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pubsub/google/cloud/pubsub/message.py b/pubsub/google/cloud/pubsub/message.py
--- a/pubsub/google/cloud/pubsub/message.py
+++ b/pubsub/google/cloud/pubsub/message.py
@@ -89,5 +89,5 @@
instance = cls(
data=data, message_id=api_repr['messageId'],
attributes=api_repr.get('attributes'))
- instance._service_timestamp = api_repr.get('publishTimestamp')
+ instance._service_timestamp = api_repr.get('publishTime')
return instance
|
{"golden_diff": "diff --git a/pubsub/google/cloud/pubsub/message.py b/pubsub/google/cloud/pubsub/message.py\n--- a/pubsub/google/cloud/pubsub/message.py\n+++ b/pubsub/google/cloud/pubsub/message.py\n@@ -89,5 +89,5 @@\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n- instance._service_timestamp = api_repr.get('publishTimestamp')\n+ instance._service_timestamp = api_repr.get('publishTime')\n return instance\n", "issue": "Pubsub message getting wrong attribute for publishTime\nAccording the [REST docs](https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage), a `PubsubMessage` has the field `publishTime`\n\nIn [message.py](https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/pubsub/google/cloud/pubsub/message.py), `from_api_repr` is getting the field `publishTimestamp` below:\n\n```\ninstance._service_timestamp = api_repr.get('publishTimestamp')\n```\n\nThe current tests are self-confirming of this issue as they simply set up the api_repr with `publishTimestamp`\n\nA quick fix seems to adjust the following:\n**message.py**\n\n``` python\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTime')\n return instance\n```\n\n**test_message.py**\n\n``` python\n def test_from_api_repr_no_attributes(self):\n from base64 import b64encode as b64\n DATA = b'DEADBEEF'\n B64_DATA = b64(DATA)\n MESSAGE_ID = '12345'\n TIMESTAMP = '2016-03-18-19:38:22.001393427Z'\n api_repr = {\n 'data': B64_DATA,\n 'messageId': MESSAGE_ID,\n 'publishTime': TIMESTAMP,\n }\n message = self._getTargetClass().from_api_repr(api_repr)\n self.assertEqual(message.data, DATA)\n self.assertEqual(message.message_id, MESSAGE_ID)\n self.assertEqual(message.attributes, {})\n self.assertEqual(message.service_timestamp, TIMESTAMP)\n\n def test_from_api_repr_w_attributes(self):\n from base64 import b64encode as b64\n DATA = b'DEADBEEF'\n B64_DATA = b64(DATA)\n MESSAGE_ID = '12345'\n ATTRS = {'a': 'b'}\n TIMESTAMP = '2016-03-18-19:38:22.001393427Z'\n api_repr = {\n 'data': B64_DATA,\n 'messageId': MESSAGE_ID,\n 'publishTime': TIMESTAMP,\n 'attributes': ATTRS,\n }\n message = self._getTargetClass().from_api_repr(api_repr)\n self.assertEqual(message.data, DATA)\n self.assertEqual(message.message_id, MESSAGE_ID)\n self.assertEqual(message.service_timestamp, TIMESTAMP)\n self.assertEqual(message.attributes, ATTRS)\n```\n\nI don't currently have a contributor license signed, but will work on that. In the meantime, hoping that someone can pick this up.\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom google.cloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message.\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n _service_timestamp = None\n\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary.\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @property\n def service_timestamp(self):\n \"\"\"Return server-set timestamp.\n\n :rtype: string\n :returns: timestamp (in UTC timezone) in RFC 3339 format\n \"\"\"\n return self._service_timestamp\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n\n :rtype: :class:`Message`\n :returns: The message created from the response.\n \"\"\"\n data = base64.b64decode(api_repr.get('data', b''))\n instance = cls(\n data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n instance._service_timestamp = api_repr.get('publishTimestamp')\n return instance\n", "path": "pubsub/google/cloud/pubsub/message.py"}]}
| 2,084 | 115 |
gh_patches_debug_18674
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-512
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
protoc segfaults on macOS 12.0.1
## 🐛 Bug
The version of protoc used by the CompilerGym build segfaults on macOS 12.0.1:
```
$ bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc
[1] 82656 segmentation fault bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc
```
Thanks @mostafaelhoushi for discovering this!
## To Reproduce
Steps to reproduce the behavior:
1. Update to macOS 12.0.1.
1. Start from a clean build: `make distclean`.
1. Attempt to build CompilerGym:
```
$ make install BAZEL_BUILD_OPTS='--sandbox_debug'
...
/usr/bin/sandbox-exec -f /private/var/tmp/_bazel_cummins/c3f286fbbefcd6317d9b13e427e86632/sandbox/darwin-sandbox/3008/sandbox.sb /var/tmp/_bazel_cummins/install/97cf8d40e3de7fca7ef885fa763bde13/process-wrapper '--timeout=0' '--kill_delay=15' bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc '--python_out=bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python' -Iexternal/com_github_protocolbuffers_protobuf/python -Ibazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python/google/protobuf/timestamp.proto) sandbox-exec failed: error executing command
...
```
## Environment
Please fill in this checklist:
- CompilerGym: 0.2.1
- How you installed CompilerGym (conda, pip, source): source
- OS: macOS 12.0.1
- Python version: 3.8
- GCC/clang version (if compiling from source): Apple clang 12.0.5
- Bazel version (if compiling from source):
- Versions of any other relevant libraries:
You may use the PyTorch
[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)
to generate most of this information. You can get the script and run it with:
```sh
wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py
# For security purposes, please check the contents of collect_env.py before running it.
python collect_env.py
```
</issue>
<code>
[start of compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py]
1 # Copyright (c) Facebook, Inc. and its affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5 """Extract a list of passes form the LLVM source tree.
6
7 Usage:
8
9 $ extract_passes_from_llvm_source_tree /path/to/llvm/source/root
10
11 Optionally accepts a list of specific files to examine:
12
13 $ extract_passes_from_llvm_source_tree /path/to/llvm/source/root /path/to/llvm/source/file
14
15 Implementation notes
16 --------------------
17
18 This implements a not-very-good parser for the INITIALIZE_PASS() family of
19 macros, which are used in the LLVM sources to declare a pass using it's name,
20 flag, and docstring. Parsing known macros like this is fragile and likely to
21 break as the LLVM sources evolve. Currently only tested on LLVM 10.0.
22
23 A more robust solution would be to parse the C++ sources and extract all classes
24 which inherit from ModulePass etc.
25 """
26 import codecs
27 import csv
28 import logging
29 import os
30 import re
31 import subprocess
32 import sys
33 from pathlib import Path
34 from typing import Dict, Iterable, List, Optional, Tuple
35
36 from common import Pass
37 from config import CREATE_PASS_NAME_MAP
38
39 logger = logging.getLogger(__name__)
40
41 # A regular expression to match the start of an invocation of one of the
42 # InitializePass helper macros.
43 INITIALIZE_PASS_RE = r"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\("
44 # A regular expression to match static const string definitions.
45 CONST_CHAR_RE = r'^\s*static\s+const\s+char(\s+(?P<name>[a-zA-Z_]+)\s*\[\s*\]|\s*\*\s*(?P<ptr_name>[a-zA-Z_]+))\s*=\s*(?P<value>".+")\s*;'
46
47
48 class ParseError(ValueError):
49 def __init__(self, message: str, source: str, components: List[str]):
50 self.message = message
51 self.source = source
52 self.components = components
53
54
55 def parse_initialize_pass(
56 source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]
57 ) -> Iterable[Pass]:
58 """A shitty parser for INITIALIZE_PASS() macro invocations.."""
59 # Squish down to a single line.
60 source = re.sub(r"\n\s*", " ", input_source, re.MULTILINE)
61 # Contract multi-spaces to single space.
62 source = re.sub(r",", ", ", source)
63 source = re.sub(r"\s+", " ", source)
64 source = re.sub(r"\(\s+", "(", source)
65 source = re.sub(r"\)\s+", ")", source)
66
67 # Strip the INITIALIZE_PASS(...) macro.
68 match = re.match(rf"^\s*{INITIALIZE_PASS_RE}(?P<args>.+)\)", source)
69 if not match:
70 raise ParseError("Failed to match INITIALIZE_PASS regex", source, [])
71 source = match.group("args")
72
73 components = []
74 start = 0
75 in_quotes = False
76 in_comment = False
77 for i in range(len(source)):
78 if (
79 not in_comment
80 and source[i] == "/"
81 and i < len(source) - 1
82 and source[i + 1] == "*"
83 ):
84 in_comment = True
85 if (
86 in_comment
87 and source[i] == "*"
88 and i < len(source) - 1
89 and source[i + 1] == "/"
90 ):
91 in_comment = False
92 start = i + 2
93 if source[i] == '"':
94 in_quotes = not in_quotes
95 if not in_quotes and source[i] == ",":
96 components.append(source[start:i].strip())
97 start = i + 2
98 components.append(source[start:].strip())
99 if len(components) != 5:
100 raise ParseError(
101 f"Expected 5 components, found {len(components)}", source, components
102 )
103
104 pass_name, arg, name, cfg, analysis = components
105 # Strip quotation marks in arg and name.
106 if not arg:
107 raise ParseError(f"Empty arg: `{arg}`", source, components)
108 if not name:
109 raise ParseError(f"Empty name: `{name}`", source, components)
110
111 while arg in defines:
112 arg = defines[arg]
113 while name in defines:
114 name = defines[name]
115
116 if not (arg[0] == '"' and arg[-1] == '"'):
117 raise ParseError(f"Could not interpret arg `{arg}`", source, components)
118 arg = arg[1:-1]
119 if not (name[0] == '"' and name[-1] == '"'):
120 raise ParseError(f"Could not interpret name `{name}`", source, components)
121 name = name[1:-1]
122
123 # Convert cfg and analysis to bool.
124 if cfg not in {"true", "false"}:
125 raise ParseError(
126 f"Could not interpret bool cfg argument `{cfg}`", source, components
127 )
128 if analysis not in {"true", "false"}:
129 raise ParseError(
130 f"Could not interpret bool analysis argument `{analysis}`",
131 source,
132 components,
133 )
134 cfg = cfg == "true"
135 analysis = analysis == "true"
136
137 opts = {
138 "source": source_path,
139 "header": header,
140 "name": pass_name,
141 "flag": f"-{arg}",
142 "description": name,
143 "cfg": cfg,
144 "is_analysis": analysis,
145 }
146
147 pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)
148
149 if isinstance(pass_name_or_list, str):
150 opts["name"] = pass_name_or_list
151 yield Pass(**opts)
152 else:
153 for name in pass_name_or_list:
154 opts["name"] = name
155 yield Pass(**opts)
156
157
158 def build_defines(source: str) -> Dict[str, str]:
159 """A quick-and-dirty technique to build a translation table from #defines
160 and string literals to their values."""
161 defines = {}
162 lines = source.split("\n")
163 for i in range(len(lines)):
164 line = lines[i].strip()
165 if line.startswith("#define"):
166 # Match #define strings.
167 components = line[len("#define ") :].split()
168 name = components[0]
169 value = " ".join(components[1:]).strip()
170 if value == "\\":
171 value = lines[i + 1].strip()
172 defines[name] = value
173 else:
174 # Match string literals.
175 match = re.match(CONST_CHAR_RE, line)
176 if match:
177 defines[match.group("name") or match.group("ptr_name")] = match.group(
178 "value"
179 )
180 return defines
181
182
183 def handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:
184 """Parse the passes declared in a file."""
185 assert str(source_path).endswith(".cpp"), f"Unexpected file type: {source_path}"
186
187 header = Path("include/llvm/" + str(source_path)[len("lib") : -len("cpp")] + "h")
188 if not header.is_file():
189 header = ""
190
191 with codecs.open(source_path, "r", "utf-8") as f:
192 source = f.read()
193
194 defines = build_defines(source)
195
196 passes: List[Pass] = []
197
198 for match in re.finditer(INITIALIZE_PASS_RE, source):
199 start = match.start()
200 first_bracket = source.find("(", start)
201 bracket_depth = 1
202 end = first_bracket
203 for end in range(first_bracket + 1, len(source)):
204 if source[end] == "(":
205 bracket_depth += 1
206 elif source[end] == ")":
207 bracket_depth -= 1
208 if not bracket_depth:
209 break
210
211 try:
212 passes += list(
213 parse_initialize_pass(
214 source_path, header, source[start : end + 1], defines
215 )
216 )
217 except ParseError as e:
218 print(f"Parsing error: {e.message}", file=sys.stderr)
219 print(f"Parsed components: {e.components}", file=sys.stderr)
220 print(f"In line: {e.source}", file=sys.stderr)
221 print(f"In file: {source_path}", file=sys.stderr)
222 print("Fatal error. Aborting now.", file=sys.stderr)
223 sys.exit(1)
224
225 if passes:
226 logger.debug(
227 f"Extracted {len(passes)} {'passes' if len(passes) - 1 else 'pass'} from {source_path}",
228 )
229 else:
230 logger.debug(f"Found no passes in {source_path}")
231
232 return passes
233
234
235 def main(argv):
236 root = Path(argv[1])
237 assert root.is_dir(), f"Not a directory: {root}"
238 os.chdir(root)
239
240 if len(argv) > 2:
241 paths = [Path(path) for path in argv[2:]]
242 else:
243 # Get the names of all files which contain a pass definition.
244 matching_paths = []
245 grep = subprocess.check_output(
246 ["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
247 universal_newlines=True,
248 )
249 matching_paths += grep.strip().split("\n")
250 logger.debug("Processing %s files ...", len(matching_paths))
251 paths = [Path(path) for path in matching_paths]
252
253 # Build a list of pass entries.
254 rows = []
255 for path in sorted(paths):
256 passes = handle_file(path)
257 if passes:
258 rows += passes
259
260 writer = csv.writer(sys.stdout, delimiter=",", quotechar='"')
261 writer.writerow(Pass._fields)
262 writer.writerows(sorted(rows, key=lambda r: r.name))
263
264
265 if __name__ == "__main__":
266 main(sys.argv)
267
[end of compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py b/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py
--- a/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py
+++ b/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py
@@ -242,10 +242,17 @@
else:
# Get the names of all files which contain a pass definition.
matching_paths = []
- grep = subprocess.check_output(
- ["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
- universal_newlines=True,
- )
+ try:
+ grep = subprocess.check_output(
+ ["grep", "-l", "-E", rf"^\s*{INITIALIZE_PASS_RE}", "-R", "lib/"],
+ universal_newlines=True,
+ )
+ except subprocess.CalledProcessError:
+ print(
+ f"fatal: Failed to find any LLVM pass declarations in {root}",
+ file=sys.stderr,
+ )
+ sys.exit(1)
matching_paths += grep.strip().split("\n")
logger.debug("Processing %s files ...", len(matching_paths))
paths = [Path(path) for path in matching_paths]
|
{"golden_diff": "diff --git a/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py b/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py\n--- a/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py\n+++ b/compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py\n@@ -242,10 +242,17 @@\n else:\n # Get the names of all files which contain a pass definition.\n matching_paths = []\n- grep = subprocess.check_output(\n- [\"grep\", \"-l\", \"-E\", rf\"^\\s*{INITIALIZE_PASS_RE}\", \"-R\", \"lib/\"],\n- universal_newlines=True,\n- )\n+ try:\n+ grep = subprocess.check_output(\n+ [\"grep\", \"-l\", \"-E\", rf\"^\\s*{INITIALIZE_PASS_RE}\", \"-R\", \"lib/\"],\n+ universal_newlines=True,\n+ )\n+ except subprocess.CalledProcessError:\n+ print(\n+ f\"fatal: Failed to find any LLVM pass declarations in {root}\",\n+ file=sys.stderr,\n+ )\n+ sys.exit(1)\n matching_paths += grep.strip().split(\"\\n\")\n logger.debug(\"Processing %s files ...\", len(matching_paths))\n paths = [Path(path) for path in matching_paths]\n", "issue": "protoc segfaults on macOS 12.0.1\n## \ud83d\udc1b Bug\r\n\r\nThe version of protoc used by the CompilerGym build segfaults on macOS 12.0.1:\r\n\r\n```\r\n$ bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc\r\n[1] 82656 segmentation fault bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc\r\n```\r\n\r\nThanks @mostafaelhoushi for discovering this!\r\n\r\n## To Reproduce\r\n\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Update to macOS 12.0.1.\r\n1. Start from a clean build: `make distclean`.\r\n1. Attempt to build CompilerGym:\r\n\r\n```\r\n$ make install BAZEL_BUILD_OPTS='--sandbox_debug'\r\n...\r\n /usr/bin/sandbox-exec -f /private/var/tmp/_bazel_cummins/c3f286fbbefcd6317d9b13e427e86632/sandbox/darwin-sandbox/3008/sandbox.sb /var/tmp/_bazel_cummins/install/97cf8d40e3de7fca7ef885fa763bde13/process-wrapper '--timeout=0' '--kill_delay=15' bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/protoc '--python_out=bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python' -Iexternal/com_github_protocolbuffers_protobuf/python -Ibazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python bazel-out/host/bin/external/com_github_protocolbuffers_protobuf/python/google/protobuf/timestamp.proto) sandbox-exec failed: error executing command\r\n...\r\n```\r\n\r\n## Environment\r\n\r\nPlease fill in this checklist:\r\n\r\n- CompilerGym: 0.2.1\r\n- How you installed CompilerGym (conda, pip, source): source\r\n- OS: macOS 12.0.1\r\n- Python version: 3.8\r\n- GCC/clang version (if compiling from source): Apple clang 12.0.5\r\n- Bazel version (if compiling from source): \r\n- Versions of any other relevant libraries:\r\n\r\nYou may use the PyTorch\r\n[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py)\r\nto generate most of this information. You can get the script and run it with:\r\n\r\n```sh\r\nwget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py\r\n# For security purposes, please check the contents of collect_env.py before running it.\r\npython collect_env.py\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Extract a list of passes form the LLVM source tree.\n\nUsage:\n\n $ extract_passes_from_llvm_source_tree /path/to/llvm/source/root\n\nOptionally accepts a list of specific files to examine:\n\n $ extract_passes_from_llvm_source_tree /path/to/llvm/source/root /path/to/llvm/source/file\n\nImplementation notes\n--------------------\n\nThis implements a not-very-good parser for the INITIALIZE_PASS() family of\nmacros, which are used in the LLVM sources to declare a pass using it's name,\nflag, and docstring. Parsing known macros like this is fragile and likely to\nbreak as the LLVM sources evolve. Currently only tested on LLVM 10.0.\n\nA more robust solution would be to parse the C++ sources and extract all classes\nwhich inherit from ModulePass etc.\n\"\"\"\nimport codecs\nimport csv\nimport logging\nimport os\nimport re\nimport subprocess\nimport sys\nfrom pathlib import Path\nfrom typing import Dict, Iterable, List, Optional, Tuple\n\nfrom common import Pass\nfrom config import CREATE_PASS_NAME_MAP\n\nlogger = logging.getLogger(__name__)\n\n# A regular expression to match the start of an invocation of one of the\n# InitializePass helper macros.\nINITIALIZE_PASS_RE = r\"(INITIALIZE_PASS|INITIALIZE_PASS_BEGIN|INITIALIZE_PASS_WITH_OPTIONS|INITIALIZE_PASS_WITH_OPTIONS_BEGIN)\\(\"\n# A regular expression to match static const string definitions.\nCONST_CHAR_RE = r'^\\s*static\\s+const\\s+char(\\s+(?P<name>[a-zA-Z_]+)\\s*\\[\\s*\\]|\\s*\\*\\s*(?P<ptr_name>[a-zA-Z_]+))\\s*=\\s*(?P<value>\".+\")\\s*;'\n\n\nclass ParseError(ValueError):\n def __init__(self, message: str, source: str, components: List[str]):\n self.message = message\n self.source = source\n self.components = components\n\n\ndef parse_initialize_pass(\n source_path: Path, header: Optional[str], input_source: str, defines: Dict[str, str]\n) -> Iterable[Pass]:\n \"\"\"A shitty parser for INITIALIZE_PASS() macro invocations..\"\"\"\n # Squish down to a single line.\n source = re.sub(r\"\\n\\s*\", \" \", input_source, re.MULTILINE)\n # Contract multi-spaces to single space.\n source = re.sub(r\",\", \", \", source)\n source = re.sub(r\"\\s+\", \" \", source)\n source = re.sub(r\"\\(\\s+\", \"(\", source)\n source = re.sub(r\"\\)\\s+\", \")\", source)\n\n # Strip the INITIALIZE_PASS(...) macro.\n match = re.match(rf\"^\\s*{INITIALIZE_PASS_RE}(?P<args>.+)\\)\", source)\n if not match:\n raise ParseError(\"Failed to match INITIALIZE_PASS regex\", source, [])\n source = match.group(\"args\")\n\n components = []\n start = 0\n in_quotes = False\n in_comment = False\n for i in range(len(source)):\n if (\n not in_comment\n and source[i] == \"/\"\n and i < len(source) - 1\n and source[i + 1] == \"*\"\n ):\n in_comment = True\n if (\n in_comment\n and source[i] == \"*\"\n and i < len(source) - 1\n and source[i + 1] == \"/\"\n ):\n in_comment = False\n start = i + 2\n if source[i] == '\"':\n in_quotes = not in_quotes\n if not in_quotes and source[i] == \",\":\n components.append(source[start:i].strip())\n start = i + 2\n components.append(source[start:].strip())\n if len(components) != 5:\n raise ParseError(\n f\"Expected 5 components, found {len(components)}\", source, components\n )\n\n pass_name, arg, name, cfg, analysis = components\n # Strip quotation marks in arg and name.\n if not arg:\n raise ParseError(f\"Empty arg: `{arg}`\", source, components)\n if not name:\n raise ParseError(f\"Empty name: `{name}`\", source, components)\n\n while arg in defines:\n arg = defines[arg]\n while name in defines:\n name = defines[name]\n\n if not (arg[0] == '\"' and arg[-1] == '\"'):\n raise ParseError(f\"Could not interpret arg `{arg}`\", source, components)\n arg = arg[1:-1]\n if not (name[0] == '\"' and name[-1] == '\"'):\n raise ParseError(f\"Could not interpret name `{name}`\", source, components)\n name = name[1:-1]\n\n # Convert cfg and analysis to bool.\n if cfg not in {\"true\", \"false\"}:\n raise ParseError(\n f\"Could not interpret bool cfg argument `{cfg}`\", source, components\n )\n if analysis not in {\"true\", \"false\"}:\n raise ParseError(\n f\"Could not interpret bool analysis argument `{analysis}`\",\n source,\n components,\n )\n cfg = cfg == \"true\"\n analysis = analysis == \"true\"\n\n opts = {\n \"source\": source_path,\n \"header\": header,\n \"name\": pass_name,\n \"flag\": f\"-{arg}\",\n \"description\": name,\n \"cfg\": cfg,\n \"is_analysis\": analysis,\n }\n\n pass_name_or_list = CREATE_PASS_NAME_MAP.get(pass_name, pass_name)\n\n if isinstance(pass_name_or_list, str):\n opts[\"name\"] = pass_name_or_list\n yield Pass(**opts)\n else:\n for name in pass_name_or_list:\n opts[\"name\"] = name\n yield Pass(**opts)\n\n\ndef build_defines(source: str) -> Dict[str, str]:\n \"\"\"A quick-and-dirty technique to build a translation table from #defines\n and string literals to their values.\"\"\"\n defines = {}\n lines = source.split(\"\\n\")\n for i in range(len(lines)):\n line = lines[i].strip()\n if line.startswith(\"#define\"):\n # Match #define strings.\n components = line[len(\"#define \") :].split()\n name = components[0]\n value = \" \".join(components[1:]).strip()\n if value == \"\\\\\":\n value = lines[i + 1].strip()\n defines[name] = value\n else:\n # Match string literals.\n match = re.match(CONST_CHAR_RE, line)\n if match:\n defines[match.group(\"name\") or match.group(\"ptr_name\")] = match.group(\n \"value\"\n )\n return defines\n\n\ndef handle_file(source_path: Path) -> Tuple[Path, List[Pass]]:\n \"\"\"Parse the passes declared in a file.\"\"\"\n assert str(source_path).endswith(\".cpp\"), f\"Unexpected file type: {source_path}\"\n\n header = Path(\"include/llvm/\" + str(source_path)[len(\"lib\") : -len(\"cpp\")] + \"h\")\n if not header.is_file():\n header = \"\"\n\n with codecs.open(source_path, \"r\", \"utf-8\") as f:\n source = f.read()\n\n defines = build_defines(source)\n\n passes: List[Pass] = []\n\n for match in re.finditer(INITIALIZE_PASS_RE, source):\n start = match.start()\n first_bracket = source.find(\"(\", start)\n bracket_depth = 1\n end = first_bracket\n for end in range(first_bracket + 1, len(source)):\n if source[end] == \"(\":\n bracket_depth += 1\n elif source[end] == \")\":\n bracket_depth -= 1\n if not bracket_depth:\n break\n\n try:\n passes += list(\n parse_initialize_pass(\n source_path, header, source[start : end + 1], defines\n )\n )\n except ParseError as e:\n print(f\"Parsing error: {e.message}\", file=sys.stderr)\n print(f\"Parsed components: {e.components}\", file=sys.stderr)\n print(f\"In line: {e.source}\", file=sys.stderr)\n print(f\"In file: {source_path}\", file=sys.stderr)\n print(\"Fatal error. Aborting now.\", file=sys.stderr)\n sys.exit(1)\n\n if passes:\n logger.debug(\n f\"Extracted {len(passes)} {'passes' if len(passes) - 1 else 'pass'} from {source_path}\",\n )\n else:\n logger.debug(f\"Found no passes in {source_path}\")\n\n return passes\n\n\ndef main(argv):\n root = Path(argv[1])\n assert root.is_dir(), f\"Not a directory: {root}\"\n os.chdir(root)\n\n if len(argv) > 2:\n paths = [Path(path) for path in argv[2:]]\n else:\n # Get the names of all files which contain a pass definition.\n matching_paths = []\n grep = subprocess.check_output(\n [\"grep\", \"-l\", \"-E\", rf\"^\\s*{INITIALIZE_PASS_RE}\", \"-R\", \"lib/\"],\n universal_newlines=True,\n )\n matching_paths += grep.strip().split(\"\\n\")\n logger.debug(\"Processing %s files ...\", len(matching_paths))\n paths = [Path(path) for path in matching_paths]\n\n # Build a list of pass entries.\n rows = []\n for path in sorted(paths):\n passes = handle_file(path)\n if passes:\n rows += passes\n\n writer = csv.writer(sys.stdout, delimiter=\",\", quotechar='\"')\n writer.writerow(Pass._fields)\n writer.writerows(sorted(rows, key=lambda r: r.name))\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "compiler_gym/envs/llvm/service/passes/extract_passes_from_llvm_source_tree.py"}]}
| 3,994 | 315 |
gh_patches_debug_13455
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3811
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gcp serverless runtime error on implicit boto dependency
reported in gitter, gcp functions should not need to depend on boto3, looks like some of the securityhub work caused an implicit dependency on boto3.
```
textPayload: "ModuleNotFoundError: No module named 'boto3'" - Getting this error for the cloud function to stop a instance in GCP
instance-off
qte7iow5dhzi
Traceback (most recent call last): File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 346, in run_http_function result = _function_handler.invoke_user_function(flask.request) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 217, in invoke_user_function return call_user_function(request_or_event) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 210, in call_user_function return self._user_function(request_or_event) File "/user_code/main.py", line 21, in run from c7n_gcp.handler import run File "/user_code/c7n_gcp/handler.py", line 24, in <module> from c7n_gcp.entry import initialize_gcp File "/user_code/c7n_gcp/entry.py", line 18, in <module> import c7n_gcp.resources.bigquery File "/user_code/c7n_gcp/resources/bigquery.py", line 16, in <module> from c7n_gcp.query import QueryResourceManager, TypeInfo File "/user_code/c7n_gcp/query.py", line 23, in <module> from c7n.filters import FilterRegistry File "/user_code/c7n/filters/init.py", line 32, in <module> from .securityhub import SecurityHubFindingFilter File "/user_code/c7n/filters/securityhub.py", line 19, in <module> from c7n.resources import aws File "/user_code/c7n/resources/aws.py", line 31, in <module> import boto3 ModuleNotFoundError: No module named 'boto3
```
</issue>
<code>
[start of c7n/filters/securityhub.py]
1 # Copyright 2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 from c7n.utils import local_session, type_schema
17 from .core import Filter
18 from c7n.manager import resources
19 from c7n.resources import aws
20
21
22 class SecurityHubFindingFilter(Filter):
23 """Check if there are Security Hub Findings related to the resources
24 """
25 schema = type_schema(
26 'finding',
27 # Many folks do an aggregator region, allow them to use that
28 # for filtering.
29 region={'type': 'string'},
30 query={'type': 'object'})
31
32 permissions = ('securityhub:GetFindings',)
33 annotation_key = 'c7n:finding-filter'
34 query_shape = 'AwsSecurityFindingFilters'
35
36 def validate(self):
37 query = self.data.get('query')
38 if query:
39 aws.shape_validate(query, self.query_shape, 'securityhub')
40
41 def process(self, resources, event=None):
42 client = local_session(
43 self.manager.session_factory).client(
44 'securityhub', region_name=self.data.get('region'))
45 found = []
46 params = dict(self.data.get('query', {}))
47
48 for r_arn, resource in zip(self.manager.get_arns(resources), resources):
49 params['ResourceId'] = [{"Value": r_arn, "Comparison": "EQUALS"}]
50 findings = client.get_findings(Filters=params).get("Findings")
51 if len(findings) > 0:
52 resource[self.annotation_key] = findings
53 found.append(resource)
54 return found
55
56 @classmethod
57 def register_resources(klass, registry, resource_class):
58 """ meta model subscriber on resource registration.
59
60 SecurityHub Findings Filter
61 """
62 for rtype, resource_manager in registry.items():
63 if not resource_manager.has_arn():
64 continue
65 if 'post-finding' in resource_manager.action_registry:
66 continue
67 resource_class.filter_registry.register('finding', klass)
68
69
70 resources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)
71
[end of c7n/filters/securityhub.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/c7n/filters/securityhub.py b/c7n/filters/securityhub.py
--- a/c7n/filters/securityhub.py
+++ b/c7n/filters/securityhub.py
@@ -16,7 +16,6 @@
from c7n.utils import local_session, type_schema
from .core import Filter
from c7n.manager import resources
-from c7n.resources import aws
class SecurityHubFindingFilter(Filter):
@@ -36,6 +35,7 @@
def validate(self):
query = self.data.get('query')
if query:
+ from c7n.resources import aws
aws.shape_validate(query, self.query_shape, 'securityhub')
def process(self, resources, event=None):
|
{"golden_diff": "diff --git a/c7n/filters/securityhub.py b/c7n/filters/securityhub.py\n--- a/c7n/filters/securityhub.py\n+++ b/c7n/filters/securityhub.py\n@@ -16,7 +16,6 @@\n from c7n.utils import local_session, type_schema\n from .core import Filter\n from c7n.manager import resources\n-from c7n.resources import aws\n \n \n class SecurityHubFindingFilter(Filter):\n@@ -36,6 +35,7 @@\n def validate(self):\n query = self.data.get('query')\n if query:\n+ from c7n.resources import aws\n aws.shape_validate(query, self.query_shape, 'securityhub')\n \n def process(self, resources, event=None):\n", "issue": "gcp serverless runtime error on implicit boto dependency\nreported in gitter, gcp functions should not need to depend on boto3, looks like some of the securityhub work caused an implicit dependency on boto3.\r\n\r\n```\r\ntextPayload: \"ModuleNotFoundError: No module named 'boto3'\" - Getting this error for the cloud function to stop a instance in GCP\r\ninstance-off\r\nqte7iow5dhzi\r\nTraceback (most recent call last): File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 346, in run_http_function result = _function_handler.invoke_user_function(flask.request) File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 217, in invoke_user_function return call_user_function(request_or_event) File \"/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py\", line 210, in call_user_function return self._user_function(request_or_event) File \"/user_code/main.py\", line 21, in run from c7n_gcp.handler import run File \"/user_code/c7n_gcp/handler.py\", line 24, in <module> from c7n_gcp.entry import initialize_gcp File \"/user_code/c7n_gcp/entry.py\", line 18, in <module> import c7n_gcp.resources.bigquery File \"/user_code/c7n_gcp/resources/bigquery.py\", line 16, in <module> from c7n_gcp.query import QueryResourceManager, TypeInfo File \"/user_code/c7n_gcp/query.py\", line 23, in <module> from c7n.filters import FilterRegistry File \"/user_code/c7n/filters/init.py\", line 32, in <module> from .securityhub import SecurityHubFindingFilter File \"/user_code/c7n/filters/securityhub.py\", line 19, in <module> from c7n.resources import aws File \"/user_code/c7n/resources/aws.py\", line 31, in <module> import boto3 ModuleNotFoundError: No module named 'boto3\r\n```\n", "before_files": [{"content": "# Copyright 2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom c7n.utils import local_session, type_schema\nfrom .core import Filter\nfrom c7n.manager import resources\nfrom c7n.resources import aws\n\n\nclass SecurityHubFindingFilter(Filter):\n \"\"\"Check if there are Security Hub Findings related to the resources\n \"\"\"\n schema = type_schema(\n 'finding',\n # Many folks do an aggregator region, allow them to use that\n # for filtering.\n region={'type': 'string'},\n query={'type': 'object'})\n\n permissions = ('securityhub:GetFindings',)\n annotation_key = 'c7n:finding-filter'\n query_shape = 'AwsSecurityFindingFilters'\n\n def validate(self):\n query = self.data.get('query')\n if query:\n aws.shape_validate(query, self.query_shape, 'securityhub')\n\n def process(self, resources, event=None):\n client = local_session(\n self.manager.session_factory).client(\n 'securityhub', region_name=self.data.get('region'))\n found = []\n params = dict(self.data.get('query', {}))\n\n for r_arn, resource in zip(self.manager.get_arns(resources), resources):\n params['ResourceId'] = [{\"Value\": r_arn, \"Comparison\": \"EQUALS\"}]\n findings = client.get_findings(Filters=params).get(\"Findings\")\n if len(findings) > 0:\n resource[self.annotation_key] = findings\n found.append(resource)\n return found\n\n @classmethod\n def register_resources(klass, registry, resource_class):\n \"\"\" meta model subscriber on resource registration.\n\n SecurityHub Findings Filter\n \"\"\"\n for rtype, resource_manager in registry.items():\n if not resource_manager.has_arn():\n continue\n if 'post-finding' in resource_manager.action_registry:\n continue\n resource_class.filter_registry.register('finding', klass)\n\n\nresources.subscribe(resources.EVENT_REGISTER, SecurityHubFindingFilter.register_resources)\n", "path": "c7n/filters/securityhub.py"}]}
| 1,690 | 163 |
gh_patches_debug_20277
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-1080
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reduce detail level of timestamp on posts
**Is your feature request related to a problem? Please describe.**
I think the time when a post was posted is a tad too detailed. For posts in the last 24h, it changes every time you refresh.

**Describe the solution you'd like**
I think the firstmost unit would be enough.
Also, after a few days (I suggest 3), the date (Apr 28) rather than "2 weeks(, 4 days in the current version)" seems a bit more helpful. After 1 year, the date could be shown in "Apr 2021",
This is subjective of course, but imho Bookwyrm is a platform where the "when" doesn't really matter (in comparison to e.g. Mastodon where many are posting news and other stuff where the temporal context is more important).
**Describe alternatives you've considered**
Hovering over the time could show the exact time as a tooltip. I think of this rather as an addition than an alternative and think both would complement each other.
</issue>
<code>
[start of bookwyrm/templatetags/status_display.py]
1 """ template filters """
2 from django import template
3
4 from bookwyrm import models
5 from bookwyrm.templatetags.utilities import get_user_identifier
6
7
8 register = template.Library()
9
10
11 @register.filter(name="mentions")
12 def get_mentions(status, user):
13 """people to @ in a reply: the parent and all mentions"""
14 mentions = set([status.user] + list(status.mention_users.all()))
15 return (
16 " ".join("@" + get_user_identifier(m) for m in mentions if not m == user) + " "
17 )
18
19
20 @register.filter(name="replies")
21 def get_replies(status):
22 """get all direct replies to a status"""
23 # TODO: this limit could cause problems
24 return models.Status.objects.filter(
25 reply_parent=status,
26 deleted=False,
27 ).select_subclasses()[:10]
28
29
30 @register.filter(name="parent")
31 def get_parent(status):
32 """get the reply parent for a status"""
33 return (
34 models.Status.objects.filter(id=status.reply_parent_id)
35 .select_subclasses()
36 .get()
37 )
38
39
40 @register.filter(name="boosted_status")
41 def get_boosted(boost):
42 """load a boosted status. have to do this or it won't get foreign keys"""
43 return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)
44
[end of bookwyrm/templatetags/status_display.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/templatetags/status_display.py b/bookwyrm/templatetags/status_display.py
--- a/bookwyrm/templatetags/status_display.py
+++ b/bookwyrm/templatetags/status_display.py
@@ -1,6 +1,8 @@
""" template filters """
+from dateutil.relativedelta import relativedelta
from django import template
-
+from django.contrib.humanize.templatetags.humanize import naturaltime, naturalday
+from django.utils import timezone
from bookwyrm import models
from bookwyrm.templatetags.utilities import get_user_identifier
@@ -41,3 +43,17 @@
def get_boosted(boost):
"""load a boosted status. have to do this or it won't get foreign keys"""
return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)
+
+
[email protected](name="published_date")
+def get_published_date(date):
+ """less verbose combo of humanize filters"""
+ if not date:
+ return ""
+ now = timezone.now()
+ delta = relativedelta(now, date)
+ if delta.years:
+ return naturalday(date)
+ if delta.days:
+ return naturalday(date, "M j")
+ return naturaltime(date)
|
{"golden_diff": "diff --git a/bookwyrm/templatetags/status_display.py b/bookwyrm/templatetags/status_display.py\n--- a/bookwyrm/templatetags/status_display.py\n+++ b/bookwyrm/templatetags/status_display.py\n@@ -1,6 +1,8 @@\n \"\"\" template filters \"\"\"\n+from dateutil.relativedelta import relativedelta\n from django import template\n-\n+from django.contrib.humanize.templatetags.humanize import naturaltime, naturalday\n+from django.utils import timezone\n from bookwyrm import models\n from bookwyrm.templatetags.utilities import get_user_identifier\n \n@@ -41,3 +43,17 @@\n def get_boosted(boost):\n \"\"\"load a boosted status. have to do this or it won't get foreign keys\"\"\"\n return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)\n+\n+\[email protected](name=\"published_date\")\n+def get_published_date(date):\n+ \"\"\"less verbose combo of humanize filters\"\"\"\n+ if not date:\n+ return \"\"\n+ now = timezone.now()\n+ delta = relativedelta(now, date)\n+ if delta.years:\n+ return naturalday(date)\n+ if delta.days:\n+ return naturalday(date, \"M j\")\n+ return naturaltime(date)\n", "issue": "Reduce detail level of timestamp on posts\n**Is your feature request related to a problem? Please describe.**\r\nI think the time when a post was posted is a tad too detailed. For posts in the last 24h, it changes every time you refresh.\r\n\r\n\r\n**Describe the solution you'd like**\r\nI think the firstmost unit would be enough.\r\n\r\nAlso, after a few days (I suggest 3), the date (Apr 28) rather than \"2 weeks(, 4 days in the current version)\" seems a bit more helpful. After 1 year, the date could be shown in \"Apr 2021\",\r\n\r\nThis is subjective of course, but imho Bookwyrm is a platform where the \"when\" doesn't really matter (in comparison to e.g. Mastodon where many are posting news and other stuff where the temporal context is more important). \r\n\r\n**Describe alternatives you've considered**\r\nHovering over the time could show the exact time as a tooltip. I think of this rather as an addition than an alternative and think both would complement each other.\n", "before_files": [{"content": "\"\"\" template filters \"\"\"\nfrom django import template\n\nfrom bookwyrm import models\nfrom bookwyrm.templatetags.utilities import get_user_identifier\n\n\nregister = template.Library()\n\n\[email protected](name=\"mentions\")\ndef get_mentions(status, user):\n \"\"\"people to @ in a reply: the parent and all mentions\"\"\"\n mentions = set([status.user] + list(status.mention_users.all()))\n return (\n \" \".join(\"@\" + get_user_identifier(m) for m in mentions if not m == user) + \" \"\n )\n\n\[email protected](name=\"replies\")\ndef get_replies(status):\n \"\"\"get all direct replies to a status\"\"\"\n # TODO: this limit could cause problems\n return models.Status.objects.filter(\n reply_parent=status,\n deleted=False,\n ).select_subclasses()[:10]\n\n\[email protected](name=\"parent\")\ndef get_parent(status):\n \"\"\"get the reply parent for a status\"\"\"\n return (\n models.Status.objects.filter(id=status.reply_parent_id)\n .select_subclasses()\n .get()\n )\n\n\[email protected](name=\"boosted_status\")\ndef get_boosted(boost):\n \"\"\"load a boosted status. have to do this or it won't get foreign keys\"\"\"\n return models.Status.objects.select_subclasses().get(id=boost.boosted_status.id)\n", "path": "bookwyrm/templatetags/status_display.py"}]}
| 1,192 | 291 |
gh_patches_debug_35071
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-53
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auto release on PyPi on tags
General interest in that? Should be pretty easy with GitHub Actions, only have to set the a Pypi API key on your end.
Example: https://github.com/microsoft/playwright-python/new/master?filename=.github%2Fworkflows%2Fpython-publish.yml&workflow_template=python-publish
</issue>
<code>
[start of upload_package.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import subprocess
16
17 subprocess.run("python -m twine upload dist/*", shell=True)
18
[end of upload_package.py]
[start of setup.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import setuptools
16
17 with open("README.md", "r", encoding="utf-8") as fh:
18 long_description = fh.read()
19
20 setuptools.setup(
21 name="playwright",
22 version="0.0.3",
23 author="Microsoft Corporation",
24 author_email="",
25 description="A high-level API to automate web browsers",
26 long_description=long_description,
27 long_description_content_type="text/markdown",
28 url="https://github.com/Microsoft/playwright-python",
29 packages=setuptools.find_packages(),
30 include_package_data=True,
31 install_requires=["pyee", "typing-extensions",],
32 classifiers=[
33 "Topic :: Software Development :: Testing",
34 "Topic :: Internet :: WWW/HTTP :: Browsers",
35 "Intended Audience :: Developers",
36 "Programming Language :: Python :: 3",
37 "Programming Language :: Python :: 3.7",
38 "Programming Language :: Python :: 3.8",
39 "License :: OSI Approved :: Apache Software License",
40 "Operating System :: OS Independent",
41 ],
42 python_requires=">=3.7",
43 )
44
[end of setup.py]
[start of playwright/__init__.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from playwright.main import playwright_object
16 import playwright.helper as helper
17
18 chromium = playwright_object.chromium
19 firefox = playwright_object.firefox
20 webkit = playwright_object.webkit
21 devices = playwright_object.devices
22 browser_types = playwright_object.browser_types
23 Error = helper.Error
24 TimeoutError = helper.TimeoutError
25
26 __all__ = [
27 "browser_types",
28 "chromium",
29 "firefox",
30 "webkit",
31 "devices",
32 "Error",
33 "TimeoutError",
34 ]
35
[end of playwright/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright/__init__.py b/playwright/__init__.py
--- a/playwright/__init__.py
+++ b/playwright/__init__.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from playwright._repo_version import version as __version__ # noqa:F401
from playwright.main import playwright_object
import playwright.helper as helper
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,16 +19,15 @@
setuptools.setup(
name="playwright",
- version="0.0.3",
author="Microsoft Corporation",
author_email="",
description="A high-level API to automate web browsers",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Microsoft/playwright-python",
- packages=setuptools.find_packages(),
+ packages=["playwright"],
include_package_data=True,
- install_requires=["pyee", "typing-extensions",],
+ install_requires=["pyee", "typing-extensions"],
classifiers=[
"Topic :: Software Development :: Testing",
"Topic :: Internet :: WWW/HTTP :: Browsers",
@@ -40,4 +39,10 @@
"Operating System :: OS Independent",
],
python_requires=">=3.7",
+ use_scm_version={
+ "version_scheme": "post-release",
+ "write_to": "playwright/_repo_version.py",
+ "write_to_template": 'version = "{version}"\n',
+ },
+ setup_requires=["setuptools_scm"],
)
diff --git a/upload_package.py b/upload_package.py
deleted file mode 100644
--- a/upload_package.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import subprocess
-
-subprocess.run("python -m twine upload dist/*", shell=True)
|
{"golden_diff": "diff --git a/playwright/__init__.py b/playwright/__init__.py\n--- a/playwright/__init__.py\n+++ b/playwright/__init__.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from playwright._repo_version import version as __version__ # noqa:F401\n from playwright.main import playwright_object\n import playwright.helper as helper\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,16 +19,15 @@\n \n setuptools.setup(\n name=\"playwright\",\n- version=\"0.0.3\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n- packages=setuptools.find_packages(),\n+ packages=[\"playwright\"],\n include_package_data=True,\n- install_requires=[\"pyee\", \"typing-extensions\",],\n+ install_requires=[\"pyee\", \"typing-extensions\"],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n@@ -40,4 +39,10 @@\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n+ use_scm_version={\n+ \"version_scheme\": \"post-release\",\n+ \"write_to\": \"playwright/_repo_version.py\",\n+ \"write_to_template\": 'version = \"{version}\"\\n',\n+ },\n+ setup_requires=[\"setuptools_scm\"],\n )\ndiff --git a/upload_package.py b/upload_package.py\ndeleted file mode 100644\n--- a/upload_package.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-# Copyright (c) Microsoft Corporation.\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-import subprocess\n-\n-subprocess.run(\"python -m twine upload dist/*\", shell=True)\n", "issue": "Auto release on PyPi on tags\nGeneral interest in that? Should be pretty easy with GitHub Actions, only have to set the a Pypi API key on your end.\r\n\r\nExample: https://github.com/microsoft/playwright-python/new/master?filename=.github%2Fworkflows%2Fpython-publish.yml&workflow_template=python-publish\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport subprocess\n\nsubprocess.run(\"python -m twine upload dist/*\", shell=True)\n", "path": "upload_package.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"playwright\",\n version=\"0.0.3\",\n author=\"Microsoft Corporation\",\n author_email=\"\",\n description=\"A high-level API to automate web browsers\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Microsoft/playwright-python\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n install_requires=[\"pyee\", \"typing-extensions\",],\n classifiers=[\n \"Topic :: Software Development :: Testing\",\n \"Topic :: Internet :: WWW/HTTP :: Browsers\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.7\",\n)\n", "path": "setup.py"}, {"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom playwright.main import playwright_object\nimport playwright.helper as helper\n\nchromium = playwright_object.chromium\nfirefox = playwright_object.firefox\nwebkit = playwright_object.webkit\ndevices = playwright_object.devices\nbrowser_types = playwright_object.browser_types\nError = helper.Error\nTimeoutError = helper.TimeoutError\n\n__all__ = [\n \"browser_types\",\n \"chromium\",\n \"firefox\",\n \"webkit\",\n \"devices\",\n \"Error\",\n \"TimeoutError\",\n]\n", "path": "playwright/__init__.py"}]}
| 1,523 | 579 |
gh_patches_debug_16968
|
rasdani/github-patches
|
git_diff
|
digitalfabrik__integreat-cms-204
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add short description title to POIs
Additionally to the name of a POI, it might be beneficial to have a short title which describes the purpose of the POI. For example, if names of associations or locations are not self-explanatory, it could be helpful to show this title in a list view or similar whenever it is not suitable to show the full-text description of a POI.
</issue>
<code>
[start of backend/cms/views/pois/poi_form.py]
1 """
2 Form for creating a poi object and poi translation object
3 """
4
5 import logging
6
7 from django import forms
8 from django.utils.translation import ugettext_lazy as _
9
10 from ...models import POI, POITranslation
11 from ..utils.slug_utils import generate_unique_slug
12
13 logger = logging.getLogger(__name__)
14
15
16 class POIForm(forms.ModelForm):
17 """
18 DjangoForm Class, that can be rendered to create deliverable HTML
19
20 Args:
21 forms : Defines the form as an Model form related to a database object
22 """
23
24 class Meta:
25 model = POI
26 fields = ['address', 'postcode', 'city', 'country', 'latitude', 'longitude']
27
28 def __init__(self, *args, **kwargs):
29
30 logger.info(
31 'New POIForm instantiated with args %s and kwargs %s',
32 args,
33 kwargs
34 )
35
36 # pop kwarg to make sure the super class does not get this param
37 self.region = kwargs.pop('region', None)
38
39 # instantiate ModelForm
40 super(POIForm, self).__init__(*args, **kwargs)
41
42
43 # pylint: disable=W0221
44 def save(self, *args, **kwargs):
45
46 logger.info(
47 'POIForm saved with args %s and kwargs %s',
48 args,
49 kwargs
50 )
51
52 # don't commit saving of ModelForm, because required fields are still missing
53 kwargs['commit'] = False
54 poi = super(POIForm, self).save(*args, **kwargs)
55
56 if not self.instance.id:
57 # only update these values when poi is created
58 poi.region = self.region
59 poi.save()
60 return poi
61
62
63 class POITranslationForm(forms.ModelForm):
64 """
65 DjangoForm Class, that can be rendered to create deliverable HTML
66
67 Args:
68 forms : Defines the form as an Model form related to a database object
69 """
70
71 PUBLIC_CHOICES = (
72 (True, _('Public')),
73 (False, _('Private')),
74 )
75
76 class Meta:
77 model = POITranslation
78 fields = ['title', 'status', 'description', 'slug', 'public']
79
80 def __init__(self, *args, **kwargs):
81
82 logger.info(
83 'New POITranslationForm with args %s and kwargs %s',
84 args,
85 kwargs
86 )
87
88 # pop kwarg to make sure the super class does not get this param
89 self.region = kwargs.pop('region', None)
90 self.language = kwargs.pop('language', None)
91
92 super(POITranslationForm, self).__init__(*args, **kwargs)
93
94 self.fields['public'].widget = forms.Select(choices=self.PUBLIC_CHOICES)
95
96 # pylint: disable=W0221
97 def save(self, *args, **kwargs):
98
99 logger.info(
100 'POITranslationForm saved with args %s and kwargs %s',
101 args,
102 kwargs
103 )
104
105 # pop kwarg to make sure the super class does not get this param
106 poi = kwargs.pop('poi', None)
107 user = kwargs.pop('user', None)
108
109 if not self.instance.id:
110 # don't commit saving of ModelForm, because required fields are still missing
111 kwargs['commit'] = False
112
113 poi_translation = super(POITranslationForm, self).save(*args, **kwargs)
114
115 if not self.instance.id:
116 # only update these values when poi translation is created
117 poi_translation.poi = poi
118 poi_translation.creator = user
119 poi_translation.language = self.language
120
121 poi_translation.save()
122
123 return poi_translation
124
125 def clean_slug(self):
126 return generate_unique_slug(self, 'poi')
127
[end of backend/cms/views/pois/poi_form.py]
[start of backend/cms/models/poi.py]
1 """Model for Point of Interests
2
3 """
4 from django.db import models
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.conf import settings
7 from django.utils import timezone
8
9 from .region import Region
10 from .language import Language
11
12
13 class POI(models.Model):
14 """Object for Point of Interests
15
16 Args:
17 models : Databas model inherit from the standard django models
18 """
19
20 region = models.ForeignKey(Region, related_name='pois', on_delete=models.CASCADE)
21 address = models.CharField(max_length=250)
22 postcode = models.CharField(max_length=10)
23 city = models.CharField(max_length=250)
24 country = models.CharField(max_length=250)
25 latitude = models.FloatField()
26 longitude = models.FloatField()
27
28 @classmethod
29 def get_list_view(cls):
30 """Provides List of all POIs in german
31
32 Returns:
33 [POI]: List of all german POIs
34 """
35
36 poi_translations = POITranslation.objects.filter(
37 language='de'
38 ).select_related('creator')
39 pois = cls.objects.all().prefetch_related(
40 models.Prefetch('poi_translations', queryset=poi_translations)
41 ).filter(poi_translations__language='de')
42
43 return pois
44
45 class Meta:
46 default_permissions = ()
47 permissions = (
48 ('manage_pois', 'Can manage points of interest'),
49 )
50
51 @property
52 def languages(self):
53 poi_translations = self.translations.prefetch_related('language').all()
54 languages = []
55 for poi_translation in poi_translations:
56 languages.append(poi_translation.language)
57 return languages
58
59 def get_translation(self, language_code):
60 try:
61 poi_translation = self.translations.get(language__code=language_code)
62 except ObjectDoesNotExist:
63 poi_translation = None
64 return poi_translation
65
66
67 class POITranslation(models.Model):
68 """Translation of an Point of Interest
69
70 Args:
71 models : Databas model inherit from the standard django models
72 """
73 title = models.CharField(max_length=250)
74 slug = models.SlugField(max_length=200, blank=True)
75 poi = models.ForeignKey(POI, related_name='translations', null=True,
76 on_delete=models.SET_NULL)
77 permalink = models.CharField(max_length=60)
78 STATUS = (
79 ('draft', 'Entwurf'),
80 ('in-review', 'Ausstehender Review'),
81 ('reviewed', 'Review abgeschlossen'),
82 )
83 status = models.CharField(max_length=9, choices=STATUS, default='draft')
84 description = models.TextField()
85 language = models.ForeignKey(Language, on_delete=models.CASCADE)
86 version = models.PositiveIntegerField(default=0)
87 minor_edit = models.BooleanField(default=False)
88 public = models.BooleanField(default=False)
89 created_date = models.DateTimeField(default=timezone.now)
90 last_updated = models.DateTimeField(auto_now=True)
91 creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
92
93 class Meta:
94 default_permissions = ()
95
[end of backend/cms/models/poi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/backend/cms/models/poi.py b/backend/cms/models/poi.py
--- a/backend/cms/models/poi.py
+++ b/backend/cms/models/poi.py
@@ -81,6 +81,7 @@
('reviewed', 'Review abgeschlossen'),
)
status = models.CharField(max_length=9, choices=STATUS, default='draft')
+ short_description = models.CharField(max_length=250)
description = models.TextField()
language = models.ForeignKey(Language, on_delete=models.CASCADE)
version = models.PositiveIntegerField(default=0)
diff --git a/backend/cms/views/pois/poi_form.py b/backend/cms/views/pois/poi_form.py
--- a/backend/cms/views/pois/poi_form.py
+++ b/backend/cms/views/pois/poi_form.py
@@ -75,7 +75,7 @@
class Meta:
model = POITranslation
- fields = ['title', 'status', 'description', 'slug', 'public']
+ fields = ['title', 'short_description', 'status', 'description', 'slug', 'public']
def __init__(self, *args, **kwargs):
|
{"golden_diff": "diff --git a/backend/cms/models/poi.py b/backend/cms/models/poi.py\n--- a/backend/cms/models/poi.py\n+++ b/backend/cms/models/poi.py\n@@ -81,6 +81,7 @@\n ('reviewed', 'Review abgeschlossen'),\n )\n status = models.CharField(max_length=9, choices=STATUS, default='draft')\n+ short_description = models.CharField(max_length=250)\n description = models.TextField()\n language = models.ForeignKey(Language, on_delete=models.CASCADE)\n version = models.PositiveIntegerField(default=0)\ndiff --git a/backend/cms/views/pois/poi_form.py b/backend/cms/views/pois/poi_form.py\n--- a/backend/cms/views/pois/poi_form.py\n+++ b/backend/cms/views/pois/poi_form.py\n@@ -75,7 +75,7 @@\n \n class Meta:\n model = POITranslation\n- fields = ['title', 'status', 'description', 'slug', 'public']\n+ fields = ['title', 'short_description', 'status', 'description', 'slug', 'public']\n \n def __init__(self, *args, **kwargs):\n", "issue": "Add short description title to POIs\nAdditionally to the name of a POI, it might be beneficial to have a short title which describes the purpose of the POI. For example, if names of associations or locations are not self-explanatory, it could be helpful to show this title in a list view or similar whenever it is not suitable to show the full-text description of a POI.\n", "before_files": [{"content": "\"\"\"\nForm for creating a poi object and poi translation object\n\"\"\"\n\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ...models import POI, POITranslation\nfrom ..utils.slug_utils import generate_unique_slug\n\nlogger = logging.getLogger(__name__)\n\n\nclass POIForm(forms.ModelForm):\n \"\"\"\n DjangoForm Class, that can be rendered to create deliverable HTML\n\n Args:\n forms : Defines the form as an Model form related to a database object\n \"\"\"\n\n class Meta:\n model = POI\n fields = ['address', 'postcode', 'city', 'country', 'latitude', 'longitude']\n\n def __init__(self, *args, **kwargs):\n\n logger.info(\n 'New POIForm instantiated with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n self.region = kwargs.pop('region', None)\n\n # instantiate ModelForm\n super(POIForm, self).__init__(*args, **kwargs)\n\n\n # pylint: disable=W0221\n def save(self, *args, **kwargs):\n\n logger.info(\n 'POIForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # don't commit saving of ModelForm, because required fields are still missing\n kwargs['commit'] = False\n poi = super(POIForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n # only update these values when poi is created\n poi.region = self.region\n poi.save()\n return poi\n\n\nclass POITranslationForm(forms.ModelForm):\n \"\"\"\n DjangoForm Class, that can be rendered to create deliverable HTML\n\n Args:\n forms : Defines the form as an Model form related to a database object\n \"\"\"\n\n PUBLIC_CHOICES = (\n (True, _('Public')),\n (False, _('Private')),\n )\n\n class Meta:\n model = POITranslation\n fields = ['title', 'status', 'description', 'slug', 'public']\n\n def __init__(self, *args, **kwargs):\n\n logger.info(\n 'New POITranslationForm with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n self.region = kwargs.pop('region', None)\n self.language = kwargs.pop('language', None)\n\n super(POITranslationForm, self).__init__(*args, **kwargs)\n\n self.fields['public'].widget = forms.Select(choices=self.PUBLIC_CHOICES)\n\n # pylint: disable=W0221\n def save(self, *args, **kwargs):\n\n logger.info(\n 'POITranslationForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n poi = kwargs.pop('poi', None)\n user = kwargs.pop('user', None)\n\n if not self.instance.id:\n # don't commit saving of ModelForm, because required fields are still missing\n kwargs['commit'] = False\n\n poi_translation = super(POITranslationForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n # only update these values when poi translation is created\n poi_translation.poi = poi\n poi_translation.creator = user\n poi_translation.language = self.language\n\n poi_translation.save()\n\n return poi_translation\n\n def clean_slug(self):\n return generate_unique_slug(self, 'poi')\n", "path": "backend/cms/views/pois/poi_form.py"}, {"content": "\"\"\"Model for Point of Interests\n\n\"\"\"\nfrom django.db import models\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom .region import Region\nfrom .language import Language\n\n\nclass POI(models.Model):\n \"\"\"Object for Point of Interests\n\n Args:\n models : Databas model inherit from the standard django models\n \"\"\"\n\n region = models.ForeignKey(Region, related_name='pois', on_delete=models.CASCADE)\n address = models.CharField(max_length=250)\n postcode = models.CharField(max_length=10)\n city = models.CharField(max_length=250)\n country = models.CharField(max_length=250)\n latitude = models.FloatField()\n longitude = models.FloatField()\n\n @classmethod\n def get_list_view(cls):\n \"\"\"Provides List of all POIs in german\n\n Returns:\n [POI]: List of all german POIs\n \"\"\"\n\n poi_translations = POITranslation.objects.filter(\n language='de'\n ).select_related('creator')\n pois = cls.objects.all().prefetch_related(\n models.Prefetch('poi_translations', queryset=poi_translations)\n ).filter(poi_translations__language='de')\n\n return pois\n\n class Meta:\n default_permissions = ()\n permissions = (\n ('manage_pois', 'Can manage points of interest'),\n )\n\n @property\n def languages(self):\n poi_translations = self.translations.prefetch_related('language').all()\n languages = []\n for poi_translation in poi_translations:\n languages.append(poi_translation.language)\n return languages\n\n def get_translation(self, language_code):\n try:\n poi_translation = self.translations.get(language__code=language_code)\n except ObjectDoesNotExist:\n poi_translation = None\n return poi_translation\n\n\nclass POITranslation(models.Model):\n \"\"\"Translation of an Point of Interest\n\n Args:\n models : Databas model inherit from the standard django models\n \"\"\"\n title = models.CharField(max_length=250)\n slug = models.SlugField(max_length=200, blank=True)\n poi = models.ForeignKey(POI, related_name='translations', null=True,\n on_delete=models.SET_NULL)\n permalink = models.CharField(max_length=60)\n STATUS = (\n ('draft', 'Entwurf'),\n ('in-review', 'Ausstehender Review'),\n ('reviewed', 'Review abgeschlossen'),\n )\n status = models.CharField(max_length=9, choices=STATUS, default='draft')\n description = models.TextField()\n language = models.ForeignKey(Language, on_delete=models.CASCADE)\n version = models.PositiveIntegerField(default=0)\n minor_edit = models.BooleanField(default=False)\n public = models.BooleanField(default=False)\n created_date = models.DateTimeField(default=timezone.now)\n last_updated = models.DateTimeField(auto_now=True)\n creator = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)\n\n class Meta:\n default_permissions = ()\n", "path": "backend/cms/models/poi.py"}]}
| 2,561 | 250 |
gh_patches_debug_36108
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1463
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
User creation in the admin is broken
Sentry Issue: [CONCREXIT-3F](https://sentry.io/organizations/thalia/issues/1844597243/?referrer=github_integration)
```
FieldError: Unknown field(s) (password2, password1) specified for User
File "django/contrib/admin/options.py", line 702, in get_form
return modelform_factory(self.model, **defaults)
File "django/forms/models.py", line 554, in modelform_factory
return type(form)(class_name, (form,), form_class_attrs)
File "django/forms/models.py", line 267, in __new__
raise FieldError(message)
FieldError: Unknown field(s) (password2, password1) specified for User. Check fields/fieldsets/exclude attributes of class UserAdmin.
(15 additional frame(s) were not displayed)
...
File "django/utils/decorators.py", line 130, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "django/contrib/admin/options.py", line 1522, in changeform_view
return self._changeform_view(request, object_id, form_url, extra_context)
File "django/contrib/admin/options.py", line 1555, in _changeform_view
ModelForm = self.get_form(request, obj, change=not add)
File "django/contrib/auth/admin.py", line 80, in get_form
return super().get_form(request, obj, **defaults)
File "django/contrib/admin/options.py", line 704, in get_form
raise FieldError(
```
</issue>
<code>
[start of website/members/forms.py]
1 """Forms defined by the members package."""
2 from django import forms
3 from django.conf import settings
4 from django.contrib.auth import get_user_model
5 from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
6 from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
7 from django.core.validators import RegexValidator
8 from django.utils.translation import gettext_lazy as _
9
10 from members import emails
11 from .models import Profile
12
13
14 class ProfileForm(forms.ModelForm):
15 """Form with all the user editable fields of a Profile model."""
16
17 class Meta:
18 fields = [
19 "show_birthday",
20 "address_street",
21 "address_street2",
22 "address_postal_code",
23 "address_city",
24 "address_country",
25 "phone_number",
26 "emergency_contact",
27 "emergency_contact_phone_number",
28 "website",
29 "profile_description",
30 "nickname",
31 "initials",
32 "display_name_preference",
33 "photo",
34 "receive_optin",
35 "receive_newsletter",
36 "receive_magazine",
37 "email_gsuite_only",
38 ]
39 model = Profile
40
41 def __init__(self, *args, **kwargs):
42 super().__init__(*args, **kwargs)
43 if not kwargs["instance"].user.is_staff:
44 self.fields["email_gsuite_only"].widget = self.fields[
45 "email_gsuite_only"
46 ].hidden_widget()
47
48
49 class UserCreationForm(BaseUserCreationForm):
50 """Custom Form that removes the password fields from user creation and sends a welcome message when a user is created."""
51
52 # Don't forget to edit the formset in admin.py!
53 # This is a stupid quirk of the user admin.
54
55 # shadow the password fields to prevent validation errors,
56 # since we generate the passwords dynamically.
57 password1 = None
58 password2 = None
59
60 def __init__(self, *args, **kwargs):
61 super().__init__(*args, **kwargs)
62 for field in ("email", "first_name", "last_name"):
63 self.fields[field].required = True
64
65 send_welcome_email = forms.BooleanField(
66 label=_("Send welcome email"),
67 help_text=_("This email will include the generated password"),
68 required=False,
69 initial=True,
70 )
71
72 def clean(self):
73 if "username" in self.cleaned_data:
74 self.cleaned_data["username"] = self.cleaned_data["username"].lower()
75 super().clean()
76
77 def save(self, commit=True):
78 password = get_user_model().objects.make_random_password(length=15)
79 # pass the password on as if it was filled in, so that save() works
80 self.cleaned_data["password1"] = password
81 user = super().save(commit=False)
82 user.set_password(password)
83 if commit:
84 user.save()
85 if self.cleaned_data["send_welcome_email"]:
86 language = settings.LANGUAGE_CODE
87 emails.send_welcome_message(user, password, language)
88 return user
89
90 class Meta:
91 fields = ("username", "first_name", "last_name", "send_welcome_email")
92
93
94 class UserChangeForm(BaseUserChangeForm):
95 """Custom user edit form that adds fields for first/last name and email.
96
97 It also force-lowercases the username on save
98 """
99
100 username = forms.CharField(
101 label=_("Username"),
102 required=True,
103 help_text=_("Required. 64 characters or fewer. Letters and digits only."),
104 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 64}),
105 validators=[
106 RegexValidator(
107 regex="^[a-zA-Z0-9]{1,64}$",
108 message=_(
109 "Please use 64 characters or fewer. Letters and digits only."
110 ),
111 )
112 ],
113 )
114
115 first_name = forms.CharField(
116 label=_("First name"),
117 required=True,
118 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 30}),
119 )
120 last_name = forms.CharField(
121 label=_("Last name"),
122 required=True,
123 widget=forms.TextInput(attrs={"class": "vTextField", "maxlength": 150}),
124 )
125 email = forms.CharField(
126 label=_("Email address"),
127 required=True,
128 widget=forms.EmailInput(attrs={"class": "vTextField", "maxlength": 254}),
129 )
130
131 def clean(self):
132 if "username" in self.cleaned_data:
133 self.cleaned_data["username"] = self.cleaned_data["username"].lower()
134 super().clean()
135
[end of website/members/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/members/forms.py b/website/members/forms.py
--- a/website/members/forms.py
+++ b/website/members/forms.py
@@ -1,13 +1,10 @@
"""Forms defined by the members package."""
from django import forms
-from django.conf import settings
-from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from django.core.validators import RegexValidator
from django.utils.translation import gettext_lazy as _
-from members import emails
from .models import Profile
@@ -47,48 +44,15 @@
class UserCreationForm(BaseUserCreationForm):
- """Custom Form that removes the password fields from user creation and sends a welcome message when a user is created."""
-
- # Don't forget to edit the formset in admin.py!
- # This is a stupid quirk of the user admin.
-
- # shadow the password fields to prevent validation errors,
- # since we generate the passwords dynamically.
- password1 = None
- password2 = None
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- for field in ("email", "first_name", "last_name"):
- self.fields[field].required = True
-
- send_welcome_email = forms.BooleanField(
- label=_("Send welcome email"),
- help_text=_("This email will include the generated password"),
- required=False,
- initial=True,
- )
+ """Custom Form that lowercases the username on creation."""
def clean(self):
if "username" in self.cleaned_data:
self.cleaned_data["username"] = self.cleaned_data["username"].lower()
super().clean()
- def save(self, commit=True):
- password = get_user_model().objects.make_random_password(length=15)
- # pass the password on as if it was filled in, so that save() works
- self.cleaned_data["password1"] = password
- user = super().save(commit=False)
- user.set_password(password)
- if commit:
- user.save()
- if self.cleaned_data["send_welcome_email"]:
- language = settings.LANGUAGE_CODE
- emails.send_welcome_message(user, password, language)
- return user
-
class Meta:
- fields = ("username", "first_name", "last_name", "send_welcome_email")
+ fields = ("username", "first_name", "last_name")
class UserChangeForm(BaseUserChangeForm):
|
{"golden_diff": "diff --git a/website/members/forms.py b/website/members/forms.py\n--- a/website/members/forms.py\n+++ b/website/members/forms.py\n@@ -1,13 +1,10 @@\n \"\"\"Forms defined by the members package.\"\"\"\n from django import forms\n-from django.conf import settings\n-from django.contrib.auth import get_user_model\n from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm\n from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\n from django.core.validators import RegexValidator\n from django.utils.translation import gettext_lazy as _\n \n-from members import emails\n from .models import Profile\n \n \n@@ -47,48 +44,15 @@\n \n \n class UserCreationForm(BaseUserCreationForm):\n- \"\"\"Custom Form that removes the password fields from user creation and sends a welcome message when a user is created.\"\"\"\n-\n- # Don't forget to edit the formset in admin.py!\n- # This is a stupid quirk of the user admin.\n-\n- # shadow the password fields to prevent validation errors,\n- # since we generate the passwords dynamically.\n- password1 = None\n- password2 = None\n-\n- def __init__(self, *args, **kwargs):\n- super().__init__(*args, **kwargs)\n- for field in (\"email\", \"first_name\", \"last_name\"):\n- self.fields[field].required = True\n-\n- send_welcome_email = forms.BooleanField(\n- label=_(\"Send welcome email\"),\n- help_text=_(\"This email will include the generated password\"),\n- required=False,\n- initial=True,\n- )\n+ \"\"\"Custom Form that lowercases the username on creation.\"\"\"\n \n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n \n- def save(self, commit=True):\n- password = get_user_model().objects.make_random_password(length=15)\n- # pass the password on as if it was filled in, so that save() works\n- self.cleaned_data[\"password1\"] = password\n- user = super().save(commit=False)\n- user.set_password(password)\n- if commit:\n- user.save()\n- if self.cleaned_data[\"send_welcome_email\"]:\n- language = settings.LANGUAGE_CODE\n- emails.send_welcome_message(user, password, language)\n- return user\n-\n class Meta:\n- fields = (\"username\", \"first_name\", \"last_name\", \"send_welcome_email\")\n+ fields = (\"username\", \"first_name\", \"last_name\")\n \n \n class UserChangeForm(BaseUserChangeForm):\n", "issue": "User creation in the admin is broken\nSentry Issue: [CONCREXIT-3F](https://sentry.io/organizations/thalia/issues/1844597243/?referrer=github_integration)\n\n```\nFieldError: Unknown field(s) (password2, password1) specified for User\n File \"django/contrib/admin/options.py\", line 702, in get_form\n return modelform_factory(self.model, **defaults)\n File \"django/forms/models.py\", line 554, in modelform_factory\n return type(form)(class_name, (form,), form_class_attrs)\n File \"django/forms/models.py\", line 267, in __new__\n raise FieldError(message)\n\nFieldError: Unknown field(s) (password2, password1) specified for User. Check fields/fieldsets/exclude attributes of class UserAdmin.\n(15 additional frame(s) were not displayed)\n...\n File \"django/utils/decorators.py\", line 130, in _wrapped_view\n response = view_func(request, *args, **kwargs)\n File \"django/contrib/admin/options.py\", line 1522, in changeform_view\n return self._changeform_view(request, object_id, form_url, extra_context)\n File \"django/contrib/admin/options.py\", line 1555, in _changeform_view\n ModelForm = self.get_form(request, obj, change=not add)\n File \"django/contrib/auth/admin.py\", line 80, in get_form\n return super().get_form(request, obj, **defaults)\n File \"django/contrib/admin/options.py\", line 704, in get_form\n raise FieldError(\n```\n", "before_files": [{"content": "\"\"\"Forms defined by the members package.\"\"\"\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm\nfrom django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm\nfrom django.core.validators import RegexValidator\nfrom django.utils.translation import gettext_lazy as _\n\nfrom members import emails\nfrom .models import Profile\n\n\nclass ProfileForm(forms.ModelForm):\n \"\"\"Form with all the user editable fields of a Profile model.\"\"\"\n\n class Meta:\n fields = [\n \"show_birthday\",\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n \"phone_number\",\n \"emergency_contact\",\n \"emergency_contact_phone_number\",\n \"website\",\n \"profile_description\",\n \"nickname\",\n \"initials\",\n \"display_name_preference\",\n \"photo\",\n \"receive_optin\",\n \"receive_newsletter\",\n \"receive_magazine\",\n \"email_gsuite_only\",\n ]\n model = Profile\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not kwargs[\"instance\"].user.is_staff:\n self.fields[\"email_gsuite_only\"].widget = self.fields[\n \"email_gsuite_only\"\n ].hidden_widget()\n\n\nclass UserCreationForm(BaseUserCreationForm):\n \"\"\"Custom Form that removes the password fields from user creation and sends a welcome message when a user is created.\"\"\"\n\n # Don't forget to edit the formset in admin.py!\n # This is a stupid quirk of the user admin.\n\n # shadow the password fields to prevent validation errors,\n # since we generate the passwords dynamically.\n password1 = None\n password2 = None\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field in (\"email\", \"first_name\", \"last_name\"):\n self.fields[field].required = True\n\n send_welcome_email = forms.BooleanField(\n label=_(\"Send welcome email\"),\n help_text=_(\"This email will include the generated password\"),\n required=False,\n initial=True,\n )\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n\n def save(self, commit=True):\n password = get_user_model().objects.make_random_password(length=15)\n # pass the password on as if it was filled in, so that save() works\n self.cleaned_data[\"password1\"] = password\n user = super().save(commit=False)\n user.set_password(password)\n if commit:\n user.save()\n if self.cleaned_data[\"send_welcome_email\"]:\n language = settings.LANGUAGE_CODE\n emails.send_welcome_message(user, password, language)\n return user\n\n class Meta:\n fields = (\"username\", \"first_name\", \"last_name\", \"send_welcome_email\")\n\n\nclass UserChangeForm(BaseUserChangeForm):\n \"\"\"Custom user edit form that adds fields for first/last name and email.\n\n It also force-lowercases the username on save\n \"\"\"\n\n username = forms.CharField(\n label=_(\"Username\"),\n required=True,\n help_text=_(\"Required. 64 characters or fewer. Letters and digits only.\"),\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 64}),\n validators=[\n RegexValidator(\n regex=\"^[a-zA-Z0-9]{1,64}$\",\n message=_(\n \"Please use 64 characters or fewer. Letters and digits only.\"\n ),\n )\n ],\n )\n\n first_name = forms.CharField(\n label=_(\"First name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 30}),\n )\n last_name = forms.CharField(\n label=_(\"Last name\"),\n required=True,\n widget=forms.TextInput(attrs={\"class\": \"vTextField\", \"maxlength\": 150}),\n )\n email = forms.CharField(\n label=_(\"Email address\"),\n required=True,\n widget=forms.EmailInput(attrs={\"class\": \"vTextField\", \"maxlength\": 254}),\n )\n\n def clean(self):\n if \"username\" in self.cleaned_data:\n self.cleaned_data[\"username\"] = self.cleaned_data[\"username\"].lower()\n super().clean()\n", "path": "website/members/forms.py"}]}
| 2,156 | 575 |
gh_patches_debug_166
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-9516
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2024.4.0 LongRunningTransaction
**Describe the bug**
Prometheus alert for a long running transaction.
I think the transaction is
```
SELECT pg_advisory_unlock($1)
```
**To Reproduce**
No activity, sitting idle
**Expected behavior**
Shouldn't have the alert
**Screenshots**
**Logs**
**Version and Deployment (please complete the following information):**
2024.4.0 kubernetes
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of lifecycle/migrate.py]
1 #!/usr/bin/env python
2 """System Migration handler"""
3 from importlib.util import module_from_spec, spec_from_file_location
4 from inspect import getmembers, isclass
5 from os import environ, system
6 from pathlib import Path
7 from typing import Any
8
9 from psycopg import Connection, Cursor, connect
10 from structlog.stdlib import get_logger
11
12 from authentik.lib.config import CONFIG
13
14 LOGGER = get_logger()
15 ADV_LOCK_UID = 1000
16 LOCKED = False
17
18
19 class CommandError(Exception):
20 """Error raised when a system_crit command fails"""
21
22
23 class BaseMigration:
24 """Base System Migration"""
25
26 cur: Cursor
27 con: Connection
28
29 def __init__(self, cur: Any, con: Any):
30 self.cur = cur
31 self.con = con
32
33 def system_crit(self, command: str):
34 """Run system command"""
35 LOGGER.debug("Running system_crit command", command=command)
36 retval = system(command) # nosec
37 if retval != 0:
38 raise CommandError("Migration error")
39
40 def fake_migration(self, *app_migration: tuple[str, str]):
41 """Fake apply a list of migrations, arguments are
42 expected to be tuples of (app_label, migration_name)"""
43 for app, _migration in app_migration:
44 self.system_crit(f"./manage.py migrate {app} {_migration} --fake")
45
46 def needs_migration(self) -> bool:
47 """Return true if Migration needs to be run"""
48 return False
49
50 def run(self):
51 """Run the actual migration"""
52
53
54 def wait_for_lock(cursor: Cursor):
55 """lock an advisory lock to prevent multiple instances from migrating at once"""
56 LOGGER.info("waiting to acquire database lock")
57 cursor.execute("SELECT pg_advisory_lock(%s)", (ADV_LOCK_UID,))
58
59 global LOCKED # noqa: PLW0603
60 LOCKED = True
61
62
63 def release_lock(cursor: Cursor):
64 """Release database lock"""
65 if not LOCKED:
66 return
67 LOGGER.info("releasing database lock")
68 cursor.execute("SELECT pg_advisory_unlock(%s)", (ADV_LOCK_UID,))
69
70
71 def run_migrations():
72 conn = connect(
73 dbname=CONFIG.get("postgresql.name"),
74 user=CONFIG.get("postgresql.user"),
75 password=CONFIG.get("postgresql.password"),
76 host=CONFIG.get("postgresql.host"),
77 port=CONFIG.get_int("postgresql.port"),
78 sslmode=CONFIG.get("postgresql.sslmode"),
79 sslrootcert=CONFIG.get("postgresql.sslrootcert"),
80 sslcert=CONFIG.get("postgresql.sslcert"),
81 sslkey=CONFIG.get("postgresql.sslkey"),
82 )
83 curr = conn.cursor()
84 try:
85 for migration_path in Path(__file__).parent.absolute().glob("system_migrations/*.py"):
86 spec = spec_from_file_location("lifecycle.system_migrations", migration_path)
87 if not spec:
88 continue
89 mod = module_from_spec(spec)
90 spec.loader.exec_module(mod)
91
92 for name, sub in getmembers(mod, isclass):
93 if name != "Migration":
94 continue
95 migration = sub(curr, conn)
96 if migration.needs_migration():
97 wait_for_lock(curr)
98 LOGGER.info("Migration needs to be applied", migration=migration_path.name)
99 migration.run()
100 LOGGER.info("Migration finished applying", migration=migration_path.name)
101 release_lock(curr)
102 LOGGER.info("applying django migrations")
103 environ.setdefault("DJANGO_SETTINGS_MODULE", "authentik.root.settings")
104 wait_for_lock(curr)
105 try:
106 from django.core.management import execute_from_command_line
107 except ImportError as exc:
108 raise ImportError(
109 "Couldn't import Django. Are you sure it's installed and "
110 "available on your PYTHONPATH environment variable? Did you "
111 "forget to activate a virtual environment?"
112 ) from exc
113 execute_from_command_line(["", "migrate_schemas"])
114 execute_from_command_line(["", "migrate_schemas", "--schema", "template", "--tenant"])
115 execute_from_command_line(
116 ["", "check"] + ([] if CONFIG.get_bool("debug") else ["--deploy"])
117 )
118 finally:
119 release_lock(curr)
120
121
122 if __name__ == "__main__":
123 run_migrations()
124
[end of lifecycle/migrate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py
--- a/lifecycle/migrate.py
+++ b/lifecycle/migrate.py
@@ -117,6 +117,8 @@
)
finally:
release_lock(curr)
+ curr.close()
+ conn.close()
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/lifecycle/migrate.py b/lifecycle/migrate.py\n--- a/lifecycle/migrate.py\n+++ b/lifecycle/migrate.py\n@@ -117,6 +117,8 @@\n )\n finally:\n release_lock(curr)\n+ curr.close()\n+ conn.close()\n \n \n if __name__ == \"__main__\":\n", "issue": "2024.4.0 LongRunningTransaction\n**Describe the bug**\r\nPrometheus alert for a long running transaction.\r\n\r\nI think the transaction is\r\n\r\n```\r\nSELECT pg_advisory_unlock($1)\r\n```\r\n\r\n**To Reproduce**\r\nNo activity, sitting idle\r\n\r\n**Expected behavior**\r\nShouldn't have the alert\r\n\r\n**Screenshots**\r\n\r\n**Logs**\r\n\r\n**Version and Deployment (please complete the following information):**\r\n2024.4.0 kubernetes\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"System Migration handler\"\"\"\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom inspect import getmembers, isclass\nfrom os import environ, system\nfrom pathlib import Path\nfrom typing import Any\n\nfrom psycopg import Connection, Cursor, connect\nfrom structlog.stdlib import get_logger\n\nfrom authentik.lib.config import CONFIG\n\nLOGGER = get_logger()\nADV_LOCK_UID = 1000\nLOCKED = False\n\n\nclass CommandError(Exception):\n \"\"\"Error raised when a system_crit command fails\"\"\"\n\n\nclass BaseMigration:\n \"\"\"Base System Migration\"\"\"\n\n cur: Cursor\n con: Connection\n\n def __init__(self, cur: Any, con: Any):\n self.cur = cur\n self.con = con\n\n def system_crit(self, command: str):\n \"\"\"Run system command\"\"\"\n LOGGER.debug(\"Running system_crit command\", command=command)\n retval = system(command) # nosec\n if retval != 0:\n raise CommandError(\"Migration error\")\n\n def fake_migration(self, *app_migration: tuple[str, str]):\n \"\"\"Fake apply a list of migrations, arguments are\n expected to be tuples of (app_label, migration_name)\"\"\"\n for app, _migration in app_migration:\n self.system_crit(f\"./manage.py migrate {app} {_migration} --fake\")\n\n def needs_migration(self) -> bool:\n \"\"\"Return true if Migration needs to be run\"\"\"\n return False\n\n def run(self):\n \"\"\"Run the actual migration\"\"\"\n\n\ndef wait_for_lock(cursor: Cursor):\n \"\"\"lock an advisory lock to prevent multiple instances from migrating at once\"\"\"\n LOGGER.info(\"waiting to acquire database lock\")\n cursor.execute(\"SELECT pg_advisory_lock(%s)\", (ADV_LOCK_UID,))\n\n global LOCKED # noqa: PLW0603\n LOCKED = True\n\n\ndef release_lock(cursor: Cursor):\n \"\"\"Release database lock\"\"\"\n if not LOCKED:\n return\n LOGGER.info(\"releasing database lock\")\n cursor.execute(\"SELECT pg_advisory_unlock(%s)\", (ADV_LOCK_UID,))\n\n\ndef run_migrations():\n conn = connect(\n dbname=CONFIG.get(\"postgresql.name\"),\n user=CONFIG.get(\"postgresql.user\"),\n password=CONFIG.get(\"postgresql.password\"),\n host=CONFIG.get(\"postgresql.host\"),\n port=CONFIG.get_int(\"postgresql.port\"),\n sslmode=CONFIG.get(\"postgresql.sslmode\"),\n sslrootcert=CONFIG.get(\"postgresql.sslrootcert\"),\n sslcert=CONFIG.get(\"postgresql.sslcert\"),\n sslkey=CONFIG.get(\"postgresql.sslkey\"),\n )\n curr = conn.cursor()\n try:\n for migration_path in Path(__file__).parent.absolute().glob(\"system_migrations/*.py\"):\n spec = spec_from_file_location(\"lifecycle.system_migrations\", migration_path)\n if not spec:\n continue\n mod = module_from_spec(spec)\n spec.loader.exec_module(mod)\n\n for name, sub in getmembers(mod, isclass):\n if name != \"Migration\":\n continue\n migration = sub(curr, conn)\n if migration.needs_migration():\n wait_for_lock(curr)\n LOGGER.info(\"Migration needs to be applied\", migration=migration_path.name)\n migration.run()\n LOGGER.info(\"Migration finished applying\", migration=migration_path.name)\n release_lock(curr)\n LOGGER.info(\"applying django migrations\")\n environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"authentik.root.settings\")\n wait_for_lock(curr)\n try:\n from django.core.management import execute_from_command_line\n except ImportError as exc:\n raise ImportError(\n \"Couldn't import Django. Are you sure it's installed and \"\n \"available on your PYTHONPATH environment variable? Did you \"\n \"forget to activate a virtual environment?\"\n ) from exc\n execute_from_command_line([\"\", \"migrate_schemas\"])\n execute_from_command_line([\"\", \"migrate_schemas\", \"--schema\", \"template\", \"--tenant\"])\n execute_from_command_line(\n [\"\", \"check\"] + ([] if CONFIG.get_bool(\"debug\") else [\"--deploy\"])\n )\n finally:\n release_lock(curr)\n\n\nif __name__ == \"__main__\":\n run_migrations()\n", "path": "lifecycle/migrate.py"}]}
| 1,814 | 75 |
gh_patches_debug_31693
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-10923
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Security Vulnerability
Please check it here https://huntr.com/bounties/e3d7a994-bfd6-4772-ac9b-9aee1aa16a5f/
</issue>
<code>
[start of mlflow/store/artifact/local_artifact_repo.py]
1 import os
2 import shutil
3
4 from mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path
5 from mlflow.utils.file_utils import (
6 get_file_info,
7 list_all,
8 local_file_uri_to_path,
9 mkdir,
10 relative_path_to_artifact_path,
11 )
12
13
14 class LocalArtifactRepository(ArtifactRepository):
15 """Stores artifacts as files in a local directory."""
16
17 def __init__(self, *args, **kwargs):
18 super().__init__(*args, **kwargs)
19 self._artifact_dir = local_file_uri_to_path(self.artifact_uri)
20
21 @property
22 def artifact_dir(self):
23 return self._artifact_dir
24
25 def log_artifact(self, local_file, artifact_path=None):
26 verify_artifact_path(artifact_path)
27 # NOTE: The artifact_path is expected to be in posix format.
28 # Posix paths work fine on windows but just in case we normalize it here.
29 if artifact_path:
30 artifact_path = os.path.normpath(artifact_path)
31
32 artifact_dir = (
33 os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir
34 )
35 if not os.path.exists(artifact_dir):
36 mkdir(artifact_dir)
37 try:
38 shutil.copy2(local_file, os.path.join(artifact_dir, os.path.basename(local_file)))
39 except shutil.SameFileError:
40 pass
41
42 def _is_directory(self, artifact_path):
43 # NOTE: The path is expected to be in posix format.
44 # Posix paths work fine on windows but just in case we normalize it here.
45 path = os.path.normpath(artifact_path) if artifact_path else ""
46 list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir
47 return os.path.isdir(list_dir)
48
49 def log_artifacts(self, local_dir, artifact_path=None):
50 verify_artifact_path(artifact_path)
51 # NOTE: The artifact_path is expected to be in posix format.
52 # Posix paths work fine on windows but just in case we normalize it here.
53 if artifact_path:
54 artifact_path = os.path.normpath(artifact_path)
55 artifact_dir = (
56 os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir
57 )
58 if not os.path.exists(artifact_dir):
59 mkdir(artifact_dir)
60 shutil.copytree(src=local_dir, dst=artifact_dir, dirs_exist_ok=True)
61
62 def download_artifacts(self, artifact_path, dst_path=None):
63 """
64 Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem.
65 If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is
66 returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``.
67
68 :param artifact_path: Relative source path to the desired artifacts.
69 :param dst_path: Absolute path of the local filesystem destination directory to which to
70 download the specified artifacts. This directory must already exist. If
71 unspecified, the absolute path of the local artifact will be returned.
72
73 :return: Absolute path of the local filesystem location containing the desired artifacts.
74 """
75 if dst_path:
76 return super().download_artifacts(artifact_path, dst_path)
77 # NOTE: The artifact_path is expected to be in posix format.
78 # Posix paths work fine on windows but just in case we normalize it here.
79 local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))
80 if not os.path.exists(local_artifact_path):
81 raise OSError(f"No such file or directory: '{local_artifact_path}'")
82 return os.path.abspath(local_artifact_path)
83
84 def list_artifacts(self, path=None):
85 # NOTE: The path is expected to be in posix format.
86 # Posix paths work fine on windows but just in case we normalize it here.
87 if path:
88 path = os.path.normpath(path)
89 list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir
90 if os.path.isdir(list_dir):
91 artifact_files = list_all(list_dir, full_path=True)
92 infos = [
93 get_file_info(
94 f, relative_path_to_artifact_path(os.path.relpath(f, self.artifact_dir))
95 )
96 for f in artifact_files
97 ]
98 return sorted(infos, key=lambda f: f.path)
99 else:
100 return []
101
102 def _download_file(self, remote_file_path, local_path):
103 # NOTE: The remote_file_path is expected to be in posix format.
104 # Posix paths work fine on windows but just in case we normalize it here.
105 remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))
106 shutil.copy2(remote_file_path, local_path)
107
108 def delete_artifacts(self, artifact_path=None):
109 artifact_path = local_file_uri_to_path(
110 os.path.join(self._artifact_dir, artifact_path) if artifact_path else self._artifact_dir
111 )
112
113 if os.path.exists(artifact_path):
114 shutil.rmtree(artifact_path)
115
[end of mlflow/store/artifact/local_artifact_repo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mlflow/store/artifact/local_artifact_repo.py b/mlflow/store/artifact/local_artifact_repo.py
--- a/mlflow/store/artifact/local_artifact_repo.py
+++ b/mlflow/store/artifact/local_artifact_repo.py
@@ -9,6 +9,7 @@
mkdir,
relative_path_to_artifact_path,
)
+from mlflow.utils.uri import validate_path_is_safe
class LocalArtifactRepository(ArtifactRepository):
@@ -74,8 +75,9 @@
"""
if dst_path:
return super().download_artifacts(artifact_path, dst_path)
- # NOTE: The artifact_path is expected to be in posix format.
+ # NOTE: The artifact_path is expected to be a relative path in posix format.
# Posix paths work fine on windows but just in case we normalize it here.
+ artifact_path = validate_path_is_safe(artifact_path)
local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))
if not os.path.exists(local_artifact_path):
raise OSError(f"No such file or directory: '{local_artifact_path}'")
@@ -100,8 +102,9 @@
return []
def _download_file(self, remote_file_path, local_path):
- # NOTE: The remote_file_path is expected to be in posix format.
+ # NOTE: The remote_file_path is expected to be a relative path in posix format.
# Posix paths work fine on windows but just in case we normalize it here.
+ remote_file_path = validate_path_is_safe(remote_file_path)
remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))
shutil.copy2(remote_file_path, local_path)
|
{"golden_diff": "diff --git a/mlflow/store/artifact/local_artifact_repo.py b/mlflow/store/artifact/local_artifact_repo.py\n--- a/mlflow/store/artifact/local_artifact_repo.py\n+++ b/mlflow/store/artifact/local_artifact_repo.py\n@@ -9,6 +9,7 @@\n mkdir,\n relative_path_to_artifact_path,\n )\n+from mlflow.utils.uri import validate_path_is_safe\n \n \n class LocalArtifactRepository(ArtifactRepository):\n@@ -74,8 +75,9 @@\n \"\"\"\n if dst_path:\n return super().download_artifacts(artifact_path, dst_path)\n- # NOTE: The artifact_path is expected to be in posix format.\n+ # NOTE: The artifact_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n+ artifact_path = validate_path_is_safe(artifact_path)\n local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))\n if not os.path.exists(local_artifact_path):\n raise OSError(f\"No such file or directory: '{local_artifact_path}'\")\n@@ -100,8 +102,9 @@\n return []\n \n def _download_file(self, remote_file_path, local_path):\n- # NOTE: The remote_file_path is expected to be in posix format.\n+ # NOTE: The remote_file_path is expected to be a relative path in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n+ remote_file_path = validate_path_is_safe(remote_file_path)\n remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))\n shutil.copy2(remote_file_path, local_path)\n", "issue": "[BUG] Security Vulnerability\nPlease check it here https://huntr.com/bounties/e3d7a994-bfd6-4772-ac9b-9aee1aa16a5f/\n", "before_files": [{"content": "import os\nimport shutil\n\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository, verify_artifact_path\nfrom mlflow.utils.file_utils import (\n get_file_info,\n list_all,\n local_file_uri_to_path,\n mkdir,\n relative_path_to_artifact_path,\n)\n\n\nclass LocalArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts as files in a local directory.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._artifact_dir = local_file_uri_to_path(self.artifact_uri)\n\n @property\n def artifact_dir(self):\n return self._artifact_dir\n\n def log_artifact(self, local_file, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n try:\n shutil.copy2(local_file, os.path.join(artifact_dir, os.path.basename(local_file)))\n except shutil.SameFileError:\n pass\n\n def _is_directory(self, artifact_path):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n path = os.path.normpath(artifact_path) if artifact_path else \"\"\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n return os.path.isdir(list_dir)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n verify_artifact_path(artifact_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if artifact_path:\n artifact_path = os.path.normpath(artifact_path)\n artifact_dir = (\n os.path.join(self.artifact_dir, artifact_path) if artifact_path else self.artifact_dir\n )\n if not os.path.exists(artifact_dir):\n mkdir(artifact_dir)\n shutil.copytree(src=local_dir, dst=artifact_dir, dirs_exist_ok=True)\n\n def download_artifacts(self, artifact_path, dst_path=None):\n \"\"\"\n Artifacts tracked by ``LocalArtifactRepository`` already exist on the local filesystem.\n If ``dst_path`` is ``None``, the absolute filesystem path of the specified artifact is\n returned. If ``dst_path`` is not ``None``, the local artifact is copied to ``dst_path``.\n\n :param artifact_path: Relative source path to the desired artifacts.\n :param dst_path: Absolute path of the local filesystem destination directory to which to\n download the specified artifacts. This directory must already exist. If\n unspecified, the absolute path of the local artifact will be returned.\n\n :return: Absolute path of the local filesystem location containing the desired artifacts.\n \"\"\"\n if dst_path:\n return super().download_artifacts(artifact_path, dst_path)\n # NOTE: The artifact_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n local_artifact_path = os.path.join(self.artifact_dir, os.path.normpath(artifact_path))\n if not os.path.exists(local_artifact_path):\n raise OSError(f\"No such file or directory: '{local_artifact_path}'\")\n return os.path.abspath(local_artifact_path)\n\n def list_artifacts(self, path=None):\n # NOTE: The path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n if path:\n path = os.path.normpath(path)\n list_dir = os.path.join(self.artifact_dir, path) if path else self.artifact_dir\n if os.path.isdir(list_dir):\n artifact_files = list_all(list_dir, full_path=True)\n infos = [\n get_file_info(\n f, relative_path_to_artifact_path(os.path.relpath(f, self.artifact_dir))\n )\n for f in artifact_files\n ]\n return sorted(infos, key=lambda f: f.path)\n else:\n return []\n\n def _download_file(self, remote_file_path, local_path):\n # NOTE: The remote_file_path is expected to be in posix format.\n # Posix paths work fine on windows but just in case we normalize it here.\n remote_file_path = os.path.join(self.artifact_dir, os.path.normpath(remote_file_path))\n shutil.copy2(remote_file_path, local_path)\n\n def delete_artifacts(self, artifact_path=None):\n artifact_path = local_file_uri_to_path(\n os.path.join(self._artifact_dir, artifact_path) if artifact_path else self._artifact_dir\n )\n\n if os.path.exists(artifact_path):\n shutil.rmtree(artifact_path)\n", "path": "mlflow/store/artifact/local_artifact_repo.py"}]}
| 1,923 | 377 |
gh_patches_debug_16611
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-2011
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Opengl -a flag not working with opengl
## Description of bug / unexpected behavior
<!-- Add a clear and concise description of the problem you encountered. -->
The behaviour of the -a flag is to output all scenes, however only a single scene is output when using the opengl renderer
## Expected behavior
<!-- Add a clear and concise description of what you expected to happen. -->
Expect all scenes to be previewed with the -p flag and output. I guess it is not applicable with interactive mode?
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
Run multple scenes with the `-a` flag and opengl renderer, for example `python -m manim example_scenes.py --renderer opengl -a -pql`
<details><summary>Code for reproducing the problem</summary>
```py
class SquareToCircle(Scene):
def construct(self):
circle = Circle()
circle.set_fill(PINK, opacity=0.5)
square = Square()
square.rotate(PI / 4)
self.play(Create(square))
self.play(Transform(square, circle))
self.play(FadeOut(square))
class CircleToSquare(Scene):
def construct(self):
circle = Circle()
circle.set_fill(PINK, opacity=0.5)
square = Square()
square.rotate(PI / 4)
self.play(Create(circle))
self.play(Transform(circle, square))
self.play(FadeOut(circle))
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
<details><summary>Images/GIFs</summary>
<!-- PASTE MEDIA HERE -->
</details>
## Logs
<details><summary>Terminal output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
PASTE HERE OR PROVIDE LINK TO https://pastebin.com/ OR SIMILAR
```
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):
- RAM:
- Python version (`python/py/python3 --version`):
- Installed modules (provide output from `pip list`):
```
PASTE HERE
```
</details>
<details><summary>LaTeX details</summary>
+ LaTeX distribution (e.g. TeX Live 2020):
+ Installed LaTeX packages:
<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->
</details>
<details><summary>FFMPEG</summary>
Output of `ffmpeg -version`:
```
PASTE HERE
```
</details>
## Additional comments
<!-- Add further context that you think might be relevant for this issue here. -->
</issue>
<code>
[start of manim/cli/render/commands.py]
1 """Manim's default subcommand, render.
2
3 Manim's render subcommand is accessed in the command-line interface via
4 ``manim``, but can be more explicitly accessed with ``manim render``. Here you
5 can specify options, and arguments for the render command.
6
7 """
8 import json
9 import sys
10 from pathlib import Path
11
12 import click
13 import cloup
14 import requests
15
16 from ... import __version__, config, console, error_console, logger
17 from ...constants import EPILOG
18 from ...utils.module_ops import scene_classes_from_file
19 from .ease_of_access_options import ease_of_access_options
20 from .global_options import global_options
21 from .output_options import output_options
22 from .render_options import render_options
23
24
25 @cloup.command(
26 context_settings=None,
27 epilog=EPILOG,
28 )
29 @click.argument("file", type=Path, required=True)
30 @click.argument("scene_names", required=False, nargs=-1)
31 @global_options
32 @output_options
33 @render_options
34 @ease_of_access_options
35 def render(
36 **args,
37 ):
38 """Render SCENE(S) from the input FILE.
39
40 FILE is the file path of the script.
41
42 SCENES is an optional list of scenes in the file.
43 """
44
45 if args["use_opengl_renderer"]:
46 logger.warning(
47 "--use_opengl_renderer is deprecated, please use --renderer=opengl instead!",
48 )
49 args["renderer"] = "opengl"
50
51 if args["use_webgl_renderer"]:
52 logger.warning(
53 "--use_webgl_renderer is deprecated, please use --renderer=webgl instead!",
54 )
55 args["renderer"] = "webgl"
56
57 if args["use_webgl_renderer"] and args["use_opengl_renderer"]:
58 logger.warning("You may select only one renderer!")
59 sys.exit()
60
61 if args["save_as_gif"]:
62 logger.warning("--save_as_gif is deprecated, please use --format=gif instead!")
63 args["format"] = "gif"
64
65 if args["save_pngs"]:
66 logger.warning("--save_pngs is deprecated, please use --format=png instead!")
67 args["format"] = "png"
68
69 if args["show_in_file_browser"]:
70 logger.warning(
71 "The short form of show_in_file_browser is deprecated and will be moved to support --format.",
72 )
73
74 class ClickArgs:
75 def __init__(self, args):
76 for name in args:
77 setattr(self, name, args[name])
78
79 def _get_kwargs(self):
80 return list(self.__dict__.items())
81
82 def __eq__(self, other):
83 if not isinstance(other, ClickArgs):
84 return NotImplemented
85 return vars(self) == vars(other)
86
87 def __contains__(self, key):
88 return key in self.__dict__
89
90 def __repr__(self):
91 return str(self.__dict__)
92
93 click_args = ClickArgs(args)
94 if args["jupyter"]:
95 return click_args
96
97 config.digest_args(click_args)
98 file = args["file"]
99 if config.renderer == "opengl":
100 from manim.renderer.opengl_renderer import OpenGLRenderer
101
102 try:
103 renderer = OpenGLRenderer()
104 keep_running = True
105 while keep_running:
106 for SceneClass in scene_classes_from_file(file):
107 scene = SceneClass(renderer)
108 status = scene.render()
109 if status:
110 continue
111 else:
112 keep_running = False
113 break
114 except Exception:
115 error_console.print_exception()
116 sys.exit(1)
117 elif config.renderer == "webgl":
118 try:
119 from manim.grpc.impl import frame_server_impl
120
121 server = frame_server_impl.get(file)
122 server.start()
123 server.wait_for_termination()
124 except ModuleNotFoundError:
125 console.print(
126 "Dependencies for the WebGL render are missing. Run "
127 "pip install manim[webgl_renderer] to install them.",
128 )
129 error_console.print_exception()
130 sys.exit(1)
131 else:
132 for SceneClass in scene_classes_from_file(file):
133 try:
134 scene = SceneClass()
135 scene.render()
136 except Exception:
137 error_console.print_exception()
138 sys.exit(1)
139
140 if config.notify_outdated_version:
141 manim_info_url = "https://pypi.org/pypi/manim/json"
142 warn_prompt = "Cannot check if latest release of manim is installed"
143 req_info = {}
144
145 try:
146 req_info = requests.get(manim_info_url)
147 req_info.raise_for_status()
148
149 stable = req_info.json()["info"]["version"]
150 if stable != __version__:
151 console.print(
152 f"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.",
153 )
154 console.print(
155 "You should consider upgrading via [yellow]pip install -U manim[/yellow]",
156 )
157 except requests.exceptions.HTTPError:
158 logger.debug(f"HTTP Error: {warn_prompt}")
159 except requests.exceptions.ConnectionError:
160 logger.debug(f"Connection Error: {warn_prompt}")
161 except requests.exceptions.Timeout:
162 logger.debug(f"Timed Out: {warn_prompt}")
163 except json.JSONDecodeError:
164 logger.debug(warn_prompt)
165 logger.debug(f"Error decoding JSON from {manim_info_url}")
166 except Exception:
167 logger.debug(f"Something went wrong: {warn_prompt}")
168
169 return args
170
[end of manim/cli/render/commands.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py
--- a/manim/cli/render/commands.py
+++ b/manim/cli/render/commands.py
@@ -105,12 +105,16 @@
while keep_running:
for SceneClass in scene_classes_from_file(file):
scene = SceneClass(renderer)
- status = scene.render()
- if status:
+ rerun = scene.render()
+ if rerun or config["write_all"]:
+ renderer.num_plays = 0
continue
else:
keep_running = False
break
+ if config["write_all"]:
+ keep_running = False
+
except Exception:
error_console.print_exception()
sys.exit(1)
|
{"golden_diff": "diff --git a/manim/cli/render/commands.py b/manim/cli/render/commands.py\n--- a/manim/cli/render/commands.py\n+++ b/manim/cli/render/commands.py\n@@ -105,12 +105,16 @@\n while keep_running:\n for SceneClass in scene_classes_from_file(file):\n scene = SceneClass(renderer)\n- status = scene.render()\n- if status:\n+ rerun = scene.render()\n+ if rerun or config[\"write_all\"]:\n+ renderer.num_plays = 0\n continue\n else:\n keep_running = False\n break\n+ if config[\"write_all\"]:\n+ keep_running = False\n+\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n", "issue": "Opengl -a flag not working with opengl\n## Description of bug / unexpected behavior\r\n<!-- Add a clear and concise description of the problem you encountered. -->\r\n\r\nThe behaviour of the -a flag is to output all scenes, however only a single scene is output when using the opengl renderer\r\n\r\n## Expected behavior\r\n<!-- Add a clear and concise description of what you expected to happen. -->\r\n\r\nExpect all scenes to be previewed with the -p flag and output. I guess it is not applicable with interactive mode?\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\nRun multple scenes with the `-a` flag and opengl renderer, for example `python -m manim example_scenes.py --renderer opengl -a -pql`\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nclass SquareToCircle(Scene):\r\n def construct(self):\r\n circle = Circle()\r\n circle.set_fill(PINK, opacity=0.5)\r\n\r\n square = Square() \r\n square.rotate(PI / 4)\r\n\r\n self.play(Create(square))\r\n self.play(Transform(square, circle))\r\n self.play(FadeOut(square))\r\n\r\nclass CircleToSquare(Scene):\r\n def construct(self):\r\n circle = Circle() \r\n circle.set_fill(PINK, opacity=0.5)\r\n\r\n square = Square()\r\n square.rotate(PI / 4) \r\n\r\n self.play(Create(circle)) \r\n self.play(Transform(circle, square)) \r\n self.play(FadeOut(circle)) \r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\n\r\n<details><summary>Images/GIFs</summary>\r\n\r\n<!-- PASTE MEDIA HERE -->\r\n\r\n</details>\r\n\r\n\r\n## Logs\r\n<details><summary>Terminal output</summary>\r\n<!-- Add \"-v DEBUG\" when calling manim to generate more detailed logs -->\r\n\r\n```\r\nPASTE HERE OR PROVIDE LINK TO https://pastebin.com/ OR SIMILAR\r\n```\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS (with version, e.g Windows 10 v2004 or macOS 10.15 (Catalina)):\r\n- RAM:\r\n- Python version (`python/py/python3 --version`):\r\n- Installed modules (provide output from `pip list`):\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n<details><summary>LaTeX details</summary>\r\n\r\n+ LaTeX distribution (e.g. TeX Live 2020):\r\n+ Installed LaTeX packages:\r\n<!-- output of `tlmgr list --only-installed` for TeX Live or a screenshot of the Packages page for MikTeX -->\r\n</details>\r\n\r\n<details><summary>FFMPEG</summary>\r\n\r\nOutput of `ffmpeg -version`:\r\n\r\n```\r\nPASTE HERE\r\n```\r\n</details>\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant for this issue here. -->\r\n\n", "before_files": [{"content": "\"\"\"Manim's default subcommand, render.\n\nManim's render subcommand is accessed in the command-line interface via\n``manim``, but can be more explicitly accessed with ``manim render``. Here you\ncan specify options, and arguments for the render command.\n\n\"\"\"\nimport json\nimport sys\nfrom pathlib import Path\n\nimport click\nimport cloup\nimport requests\n\nfrom ... import __version__, config, console, error_console, logger\nfrom ...constants import EPILOG\nfrom ...utils.module_ops import scene_classes_from_file\nfrom .ease_of_access_options import ease_of_access_options\nfrom .global_options import global_options\nfrom .output_options import output_options\nfrom .render_options import render_options\n\n\[email protected](\n context_settings=None,\n epilog=EPILOG,\n)\[email protected](\"file\", type=Path, required=True)\[email protected](\"scene_names\", required=False, nargs=-1)\n@global_options\n@output_options\n@render_options\n@ease_of_access_options\ndef render(\n **args,\n):\n \"\"\"Render SCENE(S) from the input FILE.\n\n FILE is the file path of the script.\n\n SCENES is an optional list of scenes in the file.\n \"\"\"\n\n if args[\"use_opengl_renderer\"]:\n logger.warning(\n \"--use_opengl_renderer is deprecated, please use --renderer=opengl instead!\",\n )\n args[\"renderer\"] = \"opengl\"\n\n if args[\"use_webgl_renderer\"]:\n logger.warning(\n \"--use_webgl_renderer is deprecated, please use --renderer=webgl instead!\",\n )\n args[\"renderer\"] = \"webgl\"\n\n if args[\"use_webgl_renderer\"] and args[\"use_opengl_renderer\"]:\n logger.warning(\"You may select only one renderer!\")\n sys.exit()\n\n if args[\"save_as_gif\"]:\n logger.warning(\"--save_as_gif is deprecated, please use --format=gif instead!\")\n args[\"format\"] = \"gif\"\n\n if args[\"save_pngs\"]:\n logger.warning(\"--save_pngs is deprecated, please use --format=png instead!\")\n args[\"format\"] = \"png\"\n\n if args[\"show_in_file_browser\"]:\n logger.warning(\n \"The short form of show_in_file_browser is deprecated and will be moved to support --format.\",\n )\n\n class ClickArgs:\n def __init__(self, args):\n for name in args:\n setattr(self, name, args[name])\n\n def _get_kwargs(self):\n return list(self.__dict__.items())\n\n def __eq__(self, other):\n if not isinstance(other, ClickArgs):\n return NotImplemented\n return vars(self) == vars(other)\n\n def __contains__(self, key):\n return key in self.__dict__\n\n def __repr__(self):\n return str(self.__dict__)\n\n click_args = ClickArgs(args)\n if args[\"jupyter\"]:\n return click_args\n\n config.digest_args(click_args)\n file = args[\"file\"]\n if config.renderer == \"opengl\":\n from manim.renderer.opengl_renderer import OpenGLRenderer\n\n try:\n renderer = OpenGLRenderer()\n keep_running = True\n while keep_running:\n for SceneClass in scene_classes_from_file(file):\n scene = SceneClass(renderer)\n status = scene.render()\n if status:\n continue\n else:\n keep_running = False\n break\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n elif config.renderer == \"webgl\":\n try:\n from manim.grpc.impl import frame_server_impl\n\n server = frame_server_impl.get(file)\n server.start()\n server.wait_for_termination()\n except ModuleNotFoundError:\n console.print(\n \"Dependencies for the WebGL render are missing. Run \"\n \"pip install manim[webgl_renderer] to install them.\",\n )\n error_console.print_exception()\n sys.exit(1)\n else:\n for SceneClass in scene_classes_from_file(file):\n try:\n scene = SceneClass()\n scene.render()\n except Exception:\n error_console.print_exception()\n sys.exit(1)\n\n if config.notify_outdated_version:\n manim_info_url = \"https://pypi.org/pypi/manim/json\"\n warn_prompt = \"Cannot check if latest release of manim is installed\"\n req_info = {}\n\n try:\n req_info = requests.get(manim_info_url)\n req_info.raise_for_status()\n\n stable = req_info.json()[\"info\"][\"version\"]\n if stable != __version__:\n console.print(\n f\"You are using manim version [red]v{__version__}[/red], but version [green]v{stable}[/green] is available.\",\n )\n console.print(\n \"You should consider upgrading via [yellow]pip install -U manim[/yellow]\",\n )\n except requests.exceptions.HTTPError:\n logger.debug(f\"HTTP Error: {warn_prompt}\")\n except requests.exceptions.ConnectionError:\n logger.debug(f\"Connection Error: {warn_prompt}\")\n except requests.exceptions.Timeout:\n logger.debug(f\"Timed Out: {warn_prompt}\")\n except json.JSONDecodeError:\n logger.debug(warn_prompt)\n logger.debug(f\"Error decoding JSON from {manim_info_url}\")\n except Exception:\n logger.debug(f\"Something went wrong: {warn_prompt}\")\n\n return args\n", "path": "manim/cli/render/commands.py"}]}
| 2,718 | 167 |
gh_patches_debug_25018
|
rasdani/github-patches
|
git_diff
|
magenta__magenta-1851
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'tfds_data_dir'(GANSynth)
Hi, I got this error on GANSynth demo colab . How can I resolve it?

</issue>
<code>
[start of magenta/models/gansynth/gansynth_generate.py]
1 # Copyright 2020 The Magenta Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Lint as: python3
16 r"""Generate samples with a pretrained GANSynth model.
17
18 To use a config of hyperparameters and manual hparams:
19 >>> python magenta/models/gansynth/generate.py \
20 >>> --ckpt_dir=/path/to/ckpt/dir --output_dir=/path/to/output/dir \
21 >>> --midi_file=/path/to/file.mid
22
23 If a MIDI file is specified, notes are synthesized with interpolation between
24 latent vectors in time. If no MIDI file is given, a random batch of notes is
25 synthesized.
26 """
27
28 import os
29
30 import absl.flags
31 from magenta.models.gansynth.lib import flags as lib_flags
32 from magenta.models.gansynth.lib import generate_util as gu
33 from magenta.models.gansynth.lib import model as lib_model
34 from magenta.models.gansynth.lib import util
35 import tensorflow.compat.v1 as tf
36
37
38 absl.flags.DEFINE_string('ckpt_dir',
39 '/tmp/gansynth/acoustic_only',
40 'Path to the base directory of pretrained checkpoints.'
41 'The base directory should contain many '
42 '"stage_000*" subdirectories.')
43 absl.flags.DEFINE_string('output_dir',
44 '/tmp/gansynth/samples',
45 'Path to directory to save wave files.')
46 absl.flags.DEFINE_string('midi_file',
47 '',
48 'Path to a MIDI file (.mid) to synthesize.')
49 absl.flags.DEFINE_integer('batch_size', 8, 'Batch size for generation.')
50 absl.flags.DEFINE_float('secs_per_instrument', 6.0,
51 'In random interpolations, the seconds it takes to '
52 'interpolate from one instrument to another.')
53
54 FLAGS = absl.flags.FLAGS
55 tf.logging.set_verbosity(tf.logging.INFO)
56
57
58 def main(unused_argv):
59 absl.flags.FLAGS.alsologtostderr = True
60
61 # Load the model
62 flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})
63 model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)
64
65 # Make an output directory if it doesn't exist
66 output_dir = util.expand_path(FLAGS.output_dir)
67 if not tf.gfile.Exists(output_dir):
68 tf.gfile.MakeDirs(output_dir)
69
70 if FLAGS.midi_file:
71 # If a MIDI file is provided, synthesize interpolations across the clip
72 unused_ns, notes = gu.load_midi(FLAGS.midi_file)
73
74 # Distribute latent vectors linearly in time
75 z_instruments, t_instruments = gu.get_random_instruments(
76 model,
77 notes['end_times'][-1],
78 secs_per_instrument=FLAGS.secs_per_instrument)
79
80 # Get latent vectors for each note
81 z_notes = gu.get_z_notes(notes['start_times'], z_instruments, t_instruments)
82
83 # Generate audio for each note
84 print('Generating {} samples...'.format(len(z_notes)))
85 audio_notes = model.generate_samples_from_z(z_notes, notes['pitches'])
86
87 # Make a single audio clip
88 audio_clip = gu.combine_notes(audio_notes,
89 notes['start_times'],
90 notes['end_times'],
91 notes['velocities'])
92
93 # Write the wave files
94 fname = os.path.join(output_dir, 'generated_clip.wav')
95 gu.save_wav(audio_clip, fname)
96 else:
97 # Otherwise, just generate a batch of random sounds
98 waves = model.generate_samples(FLAGS.batch_size)
99 # Write the wave files
100 for i in range(len(waves)):
101 fname = os.path.join(output_dir, 'generated_{}.wav'.format(i))
102 gu.save_wav(waves[i], fname)
103
104
105 def console_entry_point():
106 tf.disable_v2_behavior()
107 tf.app.run(main)
108
109
110 if __name__ == '__main__':
111 console_entry_point()
112
[end of magenta/models/gansynth/gansynth_generate.py]
[start of magenta/version.py]
1 # Copyright 2020 The Magenta Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 r"""Separate file for storing the current version of Magenta.
16
17 Stored in a separate file so that setup.py can reference the version without
18 pulling in all the dependencies in __init__.py.
19 """
20
21 __version__ = '2.1.2'
22
[end of magenta/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/magenta/models/gansynth/gansynth_generate.py b/magenta/models/gansynth/gansynth_generate.py
--- a/magenta/models/gansynth/gansynth_generate.py
+++ b/magenta/models/gansynth/gansynth_generate.py
@@ -50,6 +50,9 @@
absl.flags.DEFINE_float('secs_per_instrument', 6.0,
'In random interpolations, the seconds it takes to '
'interpolate from one instrument to another.')
+absl.flags.DEFINE_string('tfds_data_dir',
+ 'gs://tfds-data/datasets',
+ 'Data directory for the TFDS dataset used to train.')
FLAGS = absl.flags.FLAGS
tf.logging.set_verbosity(tf.logging.INFO)
@@ -59,7 +62,11 @@
absl.flags.FLAGS.alsologtostderr = True
# Load the model
- flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})
+ flags = lib_flags.Flags(
+ {
+ 'batch_size_schedule': [FLAGS.batch_size],
+ 'tfds_data_dir': FLAGS.tfds_data_dir
+ })
model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)
# Make an output directory if it doesn't exist
diff --git a/magenta/version.py b/magenta/version.py
--- a/magenta/version.py
+++ b/magenta/version.py
@@ -18,4 +18,4 @@
pulling in all the dependencies in __init__.py.
"""
-__version__ = '2.1.2'
+__version__ = '2.1.3'
|
{"golden_diff": "diff --git a/magenta/models/gansynth/gansynth_generate.py b/magenta/models/gansynth/gansynth_generate.py\n--- a/magenta/models/gansynth/gansynth_generate.py\n+++ b/magenta/models/gansynth/gansynth_generate.py\n@@ -50,6 +50,9 @@\n absl.flags.DEFINE_float('secs_per_instrument', 6.0,\n 'In random interpolations, the seconds it takes to '\n 'interpolate from one instrument to another.')\n+absl.flags.DEFINE_string('tfds_data_dir',\n+ 'gs://tfds-data/datasets',\n+ 'Data directory for the TFDS dataset used to train.')\n \n FLAGS = absl.flags.FLAGS\n tf.logging.set_verbosity(tf.logging.INFO)\n@@ -59,7 +62,11 @@\n absl.flags.FLAGS.alsologtostderr = True\n \n # Load the model\n- flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})\n+ flags = lib_flags.Flags(\n+ {\n+ 'batch_size_schedule': [FLAGS.batch_size],\n+ 'tfds_data_dir': FLAGS.tfds_data_dir\n+ })\n model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)\n \n # Make an output directory if it doesn't exist\ndiff --git a/magenta/version.py b/magenta/version.py\n--- a/magenta/version.py\n+++ b/magenta/version.py\n@@ -18,4 +18,4 @@\n pulling in all the dependencies in __init__.py.\n \"\"\"\n \n-__version__ = '2.1.2'\n+__version__ = '2.1.3'\n", "issue": "KeyError: 'tfds_data_dir'(GANSynth)\nHi, I got this error on GANSynth demo colab . How can I resolve it?\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Generate samples with a pretrained GANSynth model.\n\nTo use a config of hyperparameters and manual hparams:\n>>> python magenta/models/gansynth/generate.py \\\n>>> --ckpt_dir=/path/to/ckpt/dir --output_dir=/path/to/output/dir \\\n>>> --midi_file=/path/to/file.mid\n\nIf a MIDI file is specified, notes are synthesized with interpolation between\nlatent vectors in time. If no MIDI file is given, a random batch of notes is\nsynthesized.\n\"\"\"\n\nimport os\n\nimport absl.flags\nfrom magenta.models.gansynth.lib import flags as lib_flags\nfrom magenta.models.gansynth.lib import generate_util as gu\nfrom magenta.models.gansynth.lib import model as lib_model\nfrom magenta.models.gansynth.lib import util\nimport tensorflow.compat.v1 as tf\n\n\nabsl.flags.DEFINE_string('ckpt_dir',\n '/tmp/gansynth/acoustic_only',\n 'Path to the base directory of pretrained checkpoints.'\n 'The base directory should contain many '\n '\"stage_000*\" subdirectories.')\nabsl.flags.DEFINE_string('output_dir',\n '/tmp/gansynth/samples',\n 'Path to directory to save wave files.')\nabsl.flags.DEFINE_string('midi_file',\n '',\n 'Path to a MIDI file (.mid) to synthesize.')\nabsl.flags.DEFINE_integer('batch_size', 8, 'Batch size for generation.')\nabsl.flags.DEFINE_float('secs_per_instrument', 6.0,\n 'In random interpolations, the seconds it takes to '\n 'interpolate from one instrument to another.')\n\nFLAGS = absl.flags.FLAGS\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef main(unused_argv):\n absl.flags.FLAGS.alsologtostderr = True\n\n # Load the model\n flags = lib_flags.Flags({'batch_size_schedule': [FLAGS.batch_size]})\n model = lib_model.Model.load_from_path(FLAGS.ckpt_dir, flags)\n\n # Make an output directory if it doesn't exist\n output_dir = util.expand_path(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n if FLAGS.midi_file:\n # If a MIDI file is provided, synthesize interpolations across the clip\n unused_ns, notes = gu.load_midi(FLAGS.midi_file)\n\n # Distribute latent vectors linearly in time\n z_instruments, t_instruments = gu.get_random_instruments(\n model,\n notes['end_times'][-1],\n secs_per_instrument=FLAGS.secs_per_instrument)\n\n # Get latent vectors for each note\n z_notes = gu.get_z_notes(notes['start_times'], z_instruments, t_instruments)\n\n # Generate audio for each note\n print('Generating {} samples...'.format(len(z_notes)))\n audio_notes = model.generate_samples_from_z(z_notes, notes['pitches'])\n\n # Make a single audio clip\n audio_clip = gu.combine_notes(audio_notes,\n notes['start_times'],\n notes['end_times'],\n notes['velocities'])\n\n # Write the wave files\n fname = os.path.join(output_dir, 'generated_clip.wav')\n gu.save_wav(audio_clip, fname)\n else:\n # Otherwise, just generate a batch of random sounds\n waves = model.generate_samples(FLAGS.batch_size)\n # Write the wave files\n for i in range(len(waves)):\n fname = os.path.join(output_dir, 'generated_{}.wav'.format(i))\n gu.save_wav(waves[i], fname)\n\n\ndef console_entry_point():\n tf.disable_v2_behavior()\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n", "path": "magenta/models/gansynth/gansynth_generate.py"}, {"content": "# Copyright 2020 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Separate file for storing the current version of Magenta.\n\nStored in a separate file so that setup.py can reference the version without\npulling in all the dependencies in __init__.py.\n\"\"\"\n\n__version__ = '2.1.2'\n", "path": "magenta/version.py"}]}
| 2,042 | 357 |
gh_patches_debug_38325
|
rasdani/github-patches
|
git_diff
|
encode__starlette-8
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Request should present a scope-like interface
The `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.
</issue>
<code>
[start of starlette/request.py]
1 from starlette.datastructures import URL, Headers, QueryParams
2 import json
3
4
5 class Request:
6 def __init__(self, scope, receive):
7 self._scope = scope
8 self._receive = receive
9 self._stream_consumed = False
10
11 @property
12 def method(self):
13 return self._scope["method"]
14
15 @property
16 def url(self):
17 if not hasattr(self, "_url"):
18 scheme = self._scope["scheme"]
19 host, port = self._scope["server"]
20 path = self._scope["path"]
21 query_string = self._scope["query_string"]
22
23 if (scheme == "http" and port != 80) or (scheme == "https" and port != 443):
24 url = "%s://%s:%s%s" % (scheme, host, port, path)
25 else:
26 url = "%s://%s%s" % (scheme, host, path)
27
28 if query_string:
29 url += "?" + query_string.decode()
30
31 self._url = URL(url)
32 return self._url
33
34 @property
35 def headers(self):
36 if not hasattr(self, "_headers"):
37 self._headers = Headers(
38 [
39 (key.decode(), value.decode())
40 for key, value in self._scope["headers"]
41 ]
42 )
43 return self._headers
44
45 @property
46 def query_params(self):
47 if not hasattr(self, "_query_params"):
48 query_string = self._scope["query_string"].decode()
49 self._query_params = QueryParams(query_string)
50 return self._query_params
51
52 async def stream(self):
53 if hasattr(self, "_body"):
54 yield self._body
55 return
56
57 if self._stream_consumed:
58 raise RuntimeError("Stream consumed")
59
60 self._stream_consumed = True
61 while True:
62 message = await self._receive()
63 if message["type"] == "http.request":
64 yield message.get("body", b"")
65 if not message.get("more_body", False):
66 break
67
68 async def body(self):
69 if not hasattr(self, "_body"):
70 body = b""
71 async for chunk in self.stream():
72 body += chunk
73 self._body = body
74 return self._body
75
76 async def json(self):
77 if not hasattr(self, "_json"):
78 body = await self.body()
79 self._json = json.loads(body)
80 return self._json
81
[end of starlette/request.py]
[start of starlette/decorators.py]
1 from starlette.request import Request
2 from starlette.response import Response
3 from starlette.types import ASGIInstance, Receive, Send, Scope
4
5
6 def asgi_application(func):
7 def app(scope: Scope) -> ASGIInstance:
8 async def awaitable(receive: Receive, send: Send) -> None:
9 request = Request(scope, receive)
10 response = func(request)
11 await response(receive, send)
12
13 return awaitable
14
15 return app
16
[end of starlette/decorators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlette/decorators.py b/starlette/decorators.py
--- a/starlette/decorators.py
+++ b/starlette/decorators.py
@@ -5,8 +5,10 @@
def asgi_application(func):
def app(scope: Scope) -> ASGIInstance:
+ request = Request(scope)
+
async def awaitable(receive: Receive, send: Send) -> None:
- request = Request(scope, receive)
+ request.set_receive_channel(receive)
response = func(request)
await response(receive, send)
diff --git a/starlette/request.py b/starlette/request.py
--- a/starlette/request.py
+++ b/starlette/request.py
@@ -1,19 +1,33 @@
from starlette.datastructures import URL, Headers, QueryParams
+from collections.abc import Mapping
import json
+import typing
-class Request:
- def __init__(self, scope, receive):
+class Request(Mapping):
+ def __init__(self, scope, receive=None):
self._scope = scope
self._receive = receive
self._stream_consumed = False
+ def __getitem__(self, key):
+ return self._scope[key]
+
+ def __iter__(self):
+ return iter(self._scope)
+
+ def __len__(self):
+ return len(self._scope)
+
+ def set_receive_channel(self, receive):
+ self._receive = receive
+
@property
- def method(self):
+ def method(self) -> str:
return self._scope["method"]
@property
- def url(self):
+ def url(self) -> URL:
if not hasattr(self, "_url"):
scheme = self._scope["scheme"]
host, port = self._scope["server"]
@@ -32,7 +46,7 @@
return self._url
@property
- def headers(self):
+ def headers(self) -> Headers:
if not hasattr(self, "_headers"):
self._headers = Headers(
[
@@ -43,7 +57,7 @@
return self._headers
@property
- def query_params(self):
+ def query_params(self) -> QueryParams:
if not hasattr(self, "_query_params"):
query_string = self._scope["query_string"].decode()
self._query_params = QueryParams(query_string)
@@ -57,6 +71,9 @@
if self._stream_consumed:
raise RuntimeError("Stream consumed")
+ if self._receive is None:
+ raise RuntimeError("Receive channel has not been made available")
+
self._stream_consumed = True
while True:
message = await self._receive()
|
{"golden_diff": "diff --git a/starlette/decorators.py b/starlette/decorators.py\n--- a/starlette/decorators.py\n+++ b/starlette/decorators.py\n@@ -5,8 +5,10 @@\n \n def asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n+ request = Request(scope)\n+\n async def awaitable(receive: Receive, send: Send) -> None:\n- request = Request(scope, receive)\n+ request.set_receive_channel(receive)\n response = func(request)\n await response(receive, send)\n \ndiff --git a/starlette/request.py b/starlette/request.py\n--- a/starlette/request.py\n+++ b/starlette/request.py\n@@ -1,19 +1,33 @@\n from starlette.datastructures import URL, Headers, QueryParams\n+from collections.abc import Mapping\n import json\n+import typing\n \n \n-class Request:\n- def __init__(self, scope, receive):\n+class Request(Mapping):\n+ def __init__(self, scope, receive=None):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n \n+ def __getitem__(self, key):\n+ return self._scope[key]\n+\n+ def __iter__(self):\n+ return iter(self._scope)\n+\n+ def __len__(self):\n+ return len(self._scope)\n+\n+ def set_receive_channel(self, receive):\n+ self._receive = receive\n+\n @property\n- def method(self):\n+ def method(self) -> str:\n return self._scope[\"method\"]\n \n @property\n- def url(self):\n+ def url(self) -> URL:\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n@@ -32,7 +46,7 @@\n return self._url\n \n @property\n- def headers(self):\n+ def headers(self) -> Headers:\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n@@ -43,7 +57,7 @@\n return self._headers\n \n @property\n- def query_params(self):\n+ def query_params(self) -> QueryParams:\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n@@ -57,6 +71,9 @@\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n \n+ if self._receive is None:\n+ raise RuntimeError(\"Receive channel has not been made available\")\n+\n self._stream_consumed = True\n while True:\n message = await self._receive()\n", "issue": "Request should present a scope-like interface\nThe `Request` class should present a dict-like interface so that it can be used in the same way as `scope`. Should also allow it to be instantiated without a `receive` channel being set initially.\n", "before_files": [{"content": "from starlette.datastructures import URL, Headers, QueryParams\nimport json\n\n\nclass Request:\n def __init__(self, scope, receive):\n self._scope = scope\n self._receive = receive\n self._stream_consumed = False\n\n @property\n def method(self):\n return self._scope[\"method\"]\n\n @property\n def url(self):\n if not hasattr(self, \"_url\"):\n scheme = self._scope[\"scheme\"]\n host, port = self._scope[\"server\"]\n path = self._scope[\"path\"]\n query_string = self._scope[\"query_string\"]\n\n if (scheme == \"http\" and port != 80) or (scheme == \"https\" and port != 443):\n url = \"%s://%s:%s%s\" % (scheme, host, port, path)\n else:\n url = \"%s://%s%s\" % (scheme, host, path)\n\n if query_string:\n url += \"?\" + query_string.decode()\n\n self._url = URL(url)\n return self._url\n\n @property\n def headers(self):\n if not hasattr(self, \"_headers\"):\n self._headers = Headers(\n [\n (key.decode(), value.decode())\n for key, value in self._scope[\"headers\"]\n ]\n )\n return self._headers\n\n @property\n def query_params(self):\n if not hasattr(self, \"_query_params\"):\n query_string = self._scope[\"query_string\"].decode()\n self._query_params = QueryParams(query_string)\n return self._query_params\n\n async def stream(self):\n if hasattr(self, \"_body\"):\n yield self._body\n return\n\n if self._stream_consumed:\n raise RuntimeError(\"Stream consumed\")\n\n self._stream_consumed = True\n while True:\n message = await self._receive()\n if message[\"type\"] == \"http.request\":\n yield message.get(\"body\", b\"\")\n if not message.get(\"more_body\", False):\n break\n\n async def body(self):\n if not hasattr(self, \"_body\"):\n body = b\"\"\n async for chunk in self.stream():\n body += chunk\n self._body = body\n return self._body\n\n async def json(self):\n if not hasattr(self, \"_json\"):\n body = await self.body()\n self._json = json.loads(body)\n return self._json\n", "path": "starlette/request.py"}, {"content": "from starlette.request import Request\nfrom starlette.response import Response\nfrom starlette.types import ASGIInstance, Receive, Send, Scope\n\n\ndef asgi_application(func):\n def app(scope: Scope) -> ASGIInstance:\n async def awaitable(receive: Receive, send: Send) -> None:\n request = Request(scope, receive)\n response = func(request)\n await response(receive, send)\n\n return awaitable\n\n return app\n", "path": "starlette/decorators.py"}]}
| 1,403 | 599 |
gh_patches_debug_16899
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmdetection-1099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling'
I have successful install the mmdetection by the command "pip install -v -e .". But I have the problem in the test. Would anyone help me ?
(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python
Python 3.7.3 (default, Mar 27 2019, 22:11:17)
[GCC 7.3.0] :: Anaconda, Inc. on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> from mmdet.apis import init_detector
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py", line 2, in <module>
from .inference import inference_detector, init_detector, show_result
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py", line 9, in <module>
from mmdet.core import get_classes
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py", line 1, in <module>
from .anchor import * # noqa: F401, F403
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py", line 2, in <module>
from .anchor_target import anchor_inside_flags, anchor_target
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py", line 3, in <module>
from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py", line 1, in <module>
from .assign_sampling import assign_and_sample, build_assigner, build_sampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py", line 3, in <module>
from . import assigners, samplers
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py", line 2, in <module>
from .combined_sampler import CombinedSampler
File "/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py", line 1, in <module>
from ..assign_sampling import build_sampler
ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)
</issue>
<code>
[start of mmdet/core/bbox/__init__.py]
1 from .assign_sampling import assign_and_sample, build_assigner, build_sampler
2 from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
3 from .bbox_target import bbox_target
4 from .geometry import bbox_overlaps
5 from .samplers import (BaseSampler, CombinedSampler,
6 InstanceBalancedPosSampler, IoUBalancedNegSampler,
7 PseudoSampler, RandomSampler, SamplingResult)
8 from .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,
9 bbox_mapping, bbox_mapping_back, delta2bbox,
10 distance2bbox, roi2bbox)
11
12 __all__ = [
13 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
14 'BaseSampler', 'PseudoSampler', 'RandomSampler',
15 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
16 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',
17 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',
18 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
19 'distance2bbox', 'bbox_target'
20 ]
21
[end of mmdet/core/bbox/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py
--- a/mmdet/core/bbox/__init__.py
+++ b/mmdet/core/bbox/__init__.py
@@ -1,4 +1,3 @@
-from .assign_sampling import assign_and_sample, build_assigner, build_sampler
from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner
from .bbox_target import bbox_target
from .geometry import bbox_overlaps
@@ -9,6 +8,9 @@
bbox_mapping, bbox_mapping_back, delta2bbox,
distance2bbox, roi2bbox)
+from .assign_sampling import ( # isort:skip, avoid recursive imports
+ assign_and_sample, build_assigner, build_sampler)
+
__all__ = [
'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',
'BaseSampler', 'PseudoSampler', 'RandomSampler',
|
{"golden_diff": "diff --git a/mmdet/core/bbox/__init__.py b/mmdet/core/bbox/__init__.py\n--- a/mmdet/core/bbox/__init__.py\n+++ b/mmdet/core/bbox/__init__.py\n@@ -1,4 +1,3 @@\n-from .assign_sampling import assign_and_sample, build_assigner, build_sampler\n from .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\n from .bbox_target import bbox_target\n from .geometry import bbox_overlaps\n@@ -9,6 +8,9 @@\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n \n+from .assign_sampling import ( # isort:skip, avoid recursive imports\n+ assign_and_sample, build_assigner, build_sampler)\n+\n __all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n", "issue": "ImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' \nI have successful install the mmdetection by the command \"pip install -v -e .\". But I have the problem in the test. Would anyone help me ?\r\n\r\n(lab) gpuserver@ubuntu:~/ht/labs/mmdetection-master$ python\r\nPython 3.7.3 (default, Mar 27 2019, 22:11:17) \r\n[GCC 7.3.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> from mmdet.apis import init_detector\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/__init__.py\", line 2, in <module>\r\n from .inference import inference_detector, init_detector, show_result\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/apis/inference.py\", line 9, in <module>\r\n from mmdet.core import get_classes\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/__init__.py\", line 1, in <module>\r\n from .anchor import * # noqa: F401, F403\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/__init__.py\", line 2, in <module>\r\n from .anchor_target import anchor_inside_flags, anchor_target\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/anchor/anchor_target.py\", line 3, in <module>\r\n from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/__init__.py\", line 1, in <module>\r\n from .assign_sampling import assign_and_sample, build_assigner, build_sampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py\", line 3, in <module>\r\n from . import assigners, samplers\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/__init__.py\", line 2, in <module>\r\n from .combined_sampler import CombinedSampler\r\n File \"/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py\", line 1, in <module>\r\n from ..assign_sampling import build_sampler\r\nImportError: cannot import name 'build_sampler' from 'mmdet.core.bbox.assign_sampling' (/home/gpuserver/ht/labs/mmdetection-master/mmdet/core/bbox/assign_sampling.py)\n", "before_files": [{"content": "from .assign_sampling import assign_and_sample, build_assigner, build_sampler\nfrom .assigners import AssignResult, BaseAssigner, MaxIoUAssigner\nfrom .bbox_target import bbox_target\nfrom .geometry import bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n InstanceBalancedPosSampler, IoUBalancedNegSampler,\n PseudoSampler, RandomSampler, SamplingResult)\nfrom .transforms import (bbox2delta, bbox2result, bbox2roi, bbox_flip,\n bbox_mapping, bbox_mapping_back, delta2bbox,\n distance2bbox, roi2bbox)\n\n__all__ = [\n 'bbox_overlaps', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult',\n 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n 'SamplingResult', 'build_assigner', 'build_sampler', 'assign_and_sample',\n 'bbox2delta', 'delta2bbox', 'bbox_flip', 'bbox_mapping',\n 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',\n 'distance2bbox', 'bbox_target'\n]\n", "path": "mmdet/core/bbox/__init__.py"}]}
| 1,498 | 217 |
gh_patches_debug_2125
|
rasdani/github-patches
|
git_diff
|
PyGithub__PyGithub-946
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PaginatedList reversed property loses http headers
In reversed(), 'headers' parameter is not passed to PaginatedList(). It makes some APIs not reversible. For example, get_stargazers_with_dates() which requires "Accept: application/vnd.github.v3.star+json" header in the API call.
</issue>
<code>
[start of github/PaginatedList.py]
1 # -*- coding: utf-8 -*-
2
3 ############################ Copyrights and license ############################
4 # #
5 # Copyright 2012 Vincent Jacques <[email protected]> #
6 # Copyright 2012 Zearin <[email protected]> #
7 # Copyright 2013 AKFish <[email protected]> #
8 # Copyright 2013 Bill Mill <[email protected]> #
9 # Copyright 2013 Vincent Jacques <[email protected]> #
10 # Copyright 2013 davidbrai <[email protected]> #
11 # Copyright 2014 Thialfihar <[email protected]> #
12 # Copyright 2014 Vincent Jacques <[email protected]> #
13 # Copyright 2015 Dan Vanderkam <[email protected]> #
14 # Copyright 2015 Eliot Walker <[email protected]> #
15 # Copyright 2016 Peter Buckley <[email protected]> #
16 # Copyright 2017 Jannis Gebauer <[email protected]> #
17 # Copyright 2018 Gilad Shefer <[email protected]> #
18 # Copyright 2018 Joel Koglin <[email protected]> #
19 # Copyright 2018 Wan Liuyang <[email protected]> #
20 # Copyright 2018 sfdye <[email protected]> #
21 # #
22 # This file is part of PyGithub. #
23 # http://pygithub.readthedocs.io/ #
24 # #
25 # PyGithub is free software: you can redistribute it and/or modify it under #
26 # the terms of the GNU Lesser General Public License as published by the Free #
27 # Software Foundation, either version 3 of the License, or (at your option) #
28 # any later version. #
29 # #
30 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
31 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
32 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
33 # details. #
34 # #
35 # You should have received a copy of the GNU Lesser General Public License #
36 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
37 # #
38 ################################################################################
39
40 try:
41 from urllib.parse import parse_qs
42 except ImportError:
43 from urlparse import parse_qs
44
45 import github.GithubObject
46
47
48 class PaginatedListBase:
49 def __init__(self):
50 self.__elements = list()
51
52 def __getitem__(self, index):
53 assert isinstance(index, (int, slice))
54 if isinstance(index, (int, long)):
55 self.__fetchToIndex(index)
56 return self.__elements[index]
57 else:
58 return self._Slice(self, index)
59
60 def __iter__(self):
61 for element in self.__elements:
62 yield element
63 while self._couldGrow():
64 newElements = self._grow()
65 for element in newElements:
66 yield element
67
68 def _isBiggerThan(self, index):
69 return len(self.__elements) > index or self._couldGrow()
70
71 def __fetchToIndex(self, index):
72 while len(self.__elements) <= index and self._couldGrow():
73 self._grow()
74
75 def _grow(self):
76 newElements = self._fetchNextPage()
77 self.__elements += newElements
78 return newElements
79
80 class _Slice:
81 def __init__(self, theList, theSlice):
82 self.__list = theList
83 self.__start = theSlice.start or 0
84 self.__stop = theSlice.stop
85 self.__step = theSlice.step or 1
86
87 def __iter__(self):
88 index = self.__start
89 while not self.__finished(index):
90 if self.__list._isBiggerThan(index):
91 yield self.__list[index]
92 index += self.__step
93 else:
94 return
95
96 def __finished(self, index):
97 return self.__stop is not None and index >= self.__stop
98
99
100 class PaginatedList(PaginatedListBase):
101 """
102 This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.
103
104 You can simply enumerate through instances of this class::
105
106 for repo in user.get_repos():
107 print(repo.name)
108
109 If you want to know the total number of items in the list::
110
111 print(user.get_repos().totalCount)
112 print(len(user.get_repos()))
113
114 You can also index them or take slices::
115
116 second_repo = user.get_repos()[1]
117 first_repos = user.get_repos()[:10]
118
119 If you want to iterate in reversed order, just do::
120
121 for repo in user.get_repos().reversed:
122 print(repo.name)
123
124 And if you really need it, you can explicitly access a specific page::
125
126 some_repos = user.get_repos().get_page(0)
127 some_other_repos = user.get_repos().get_page(3)
128 """
129
130 def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item="items"):
131 PaginatedListBase.__init__(self)
132 self.__requester = requester
133 self.__contentClass = contentClass
134 self.__firstUrl = firstUrl
135 self.__firstParams = firstParams or ()
136 self.__nextUrl = firstUrl
137 self.__nextParams = firstParams or {}
138 self.__headers = headers
139 self.__list_item = list_item
140 if self.__requester.per_page != 30:
141 self.__nextParams["per_page"] = self.__requester.per_page
142 self._reversed = False
143 self.__totalCount = None
144
145 @property
146 def totalCount(self):
147 if not self.__totalCount:
148 params = {} if self.__nextParams is None else self.__nextParams.copy()
149 # set per_page = 1 so the totalCount is just the number of pages
150 params.update({"per_page": 1})
151 headers, data = self.__requester.requestJsonAndCheck(
152 "GET",
153 self.__firstUrl,
154 parameters=params,
155 headers=self.__headers
156 )
157 if 'link' not in headers:
158 self.__totalCount = len(data) if data else 0
159 else:
160 links = self.__parseLinkHeader(headers)
161 lastUrl = links.get("last")
162 self.__totalCount = int(parse_qs(lastUrl)['page'][0])
163 return self.__totalCount
164
165 def _getLastPageUrl(self):
166 headers, data = self.__requester.requestJsonAndCheck(
167 "GET",
168 self.__firstUrl,
169 parameters=self.__nextParams,
170 headers=self.__headers
171 )
172 links = self.__parseLinkHeader(headers)
173 lastUrl = links.get("last")
174 return lastUrl
175
176 @property
177 def reversed(self):
178 r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
179 r.__reverse()
180 return r
181
182 def __reverse(self):
183 self._reversed = True
184 lastUrl = self._getLastPageUrl()
185 if lastUrl:
186 self.__nextUrl = lastUrl
187
188 def _couldGrow(self):
189 return self.__nextUrl is not None
190
191 def _fetchNextPage(self):
192 headers, data = self.__requester.requestJsonAndCheck(
193 "GET",
194 self.__nextUrl,
195 parameters=self.__nextParams,
196 headers=self.__headers
197 )
198 data = data if data else []
199
200 self.__nextUrl = None
201 if len(data) > 0:
202 links = self.__parseLinkHeader(headers)
203 if self._reversed:
204 if "prev" in links:
205 self.__nextUrl = links["prev"]
206 elif "next" in links:
207 self.__nextUrl = links["next"]
208 self.__nextParams = None
209
210 if self.__list_item in data:
211 self.__totalCount = data.get('total_count')
212 data = data[self.__list_item]
213
214 content = [
215 self.__contentClass(self.__requester, headers, element, completed=False)
216 for element in data if element is not None
217 ]
218 if self._reversed:
219 return content[::-1]
220 return content
221
222 def __parseLinkHeader(self, headers):
223 links = {}
224 if "link" in headers:
225 linkHeaders = headers["link"].split(", ")
226 for linkHeader in linkHeaders:
227 (url, rel) = linkHeader.split("; ")
228 url = url[1:-1]
229 rel = rel[5:-1]
230 links[rel] = url
231 return links
232
233 def get_page(self, page):
234 params = dict(self.__firstParams)
235 if page != 0:
236 params["page"] = page + 1
237 if self.__requester.per_page != 30:
238 params["per_page"] = self.__requester.per_page
239 headers, data = self.__requester.requestJsonAndCheck(
240 "GET",
241 self.__firstUrl,
242 parameters=params,
243 headers=self.__headers
244 )
245
246 if self.__list_item in data:
247 self.__totalCount = data.get('total_count')
248 data = data[self.__list_item]
249
250 return [
251 self.__contentClass(self.__requester, headers, element, completed=False)
252 for element in data
253 ]
254
[end of github/PaginatedList.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/github/PaginatedList.py b/github/PaginatedList.py
--- a/github/PaginatedList.py
+++ b/github/PaginatedList.py
@@ -175,7 +175,7 @@
@property
def reversed(self):
- r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)
+ r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)
r.__reverse()
return r
|
{"golden_diff": "diff --git a/github/PaginatedList.py b/github/PaginatedList.py\n--- a/github/PaginatedList.py\n+++ b/github/PaginatedList.py\n@@ -175,7 +175,7 @@\n \n @property\n def reversed(self):\n- r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)\n+ r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams, self.__headers, self.__list_item)\n r.__reverse()\n return r\n", "issue": "PaginatedList reversed property loses http headers\nIn reversed(), 'headers' parameter is not passed to PaginatedList(). It makes some APIs not reversible. For example, get_stargazers_with_dates() which requires \"Accept: application/vnd.github.v3.star+json\" header in the API call.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\ntry:\n from urllib.parse import parse_qs\nexcept ImportError:\n from urlparse import parse_qs\n\nimport github.GithubObject\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, (int, long)):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n for element in self.__elements:\n yield element\n while self._couldGrow():\n newElements = self._grow()\n for element in newElements:\n yield element\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <http://developer.github.com/v3/#pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n print(len(user.get_repos()))\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(self, contentClass, requester, firstUrl, firstParams, headers=None, list_item=\"items\"):\n PaginatedListBase.__init__(self)\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n if 'link' not in headers:\n self.__totalCount = len(data) if data else 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n self.__totalCount = int(parse_qs(lastUrl)['page'][0])\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(self.__contentClass, self.__requester, self.__firstUrl, self.__firstParams)\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__nextUrl,\n parameters=self.__nextParams,\n headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n (url, rel) = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\",\n self.__firstUrl,\n parameters=params,\n headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get('total_count')\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py"}]}
| 3,349 | 132 |
gh_patches_debug_2947
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-878
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't create Invites
**Describe the bug**
When creating a new invite, the following appears:

Rest of the page is blank.
It appeared since the last update I did a few days ago (don't know at which commit exactly, sorry) and didn't change with the last one.
**Additional context**
It doesn't matter what I set for Expiry and Use limit.
Also, there's an invite in the list that has "Max uses: None" that I'm not sure where it comes from.
</issue>
<code>
[start of bookwyrm/forms.py]
1 """ using django model forms """
2 import datetime
3 from collections import defaultdict
4
5 from django import forms
6 from django.forms import ModelForm, PasswordInput, widgets
7 from django.forms.widgets import Textarea
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10
11 from bookwyrm import models
12
13
14 class CustomForm(ModelForm):
15 """ add css classes to the forms """
16
17 def __init__(self, *args, **kwargs):
18 css_classes = defaultdict(lambda: "")
19 css_classes["text"] = "input"
20 css_classes["password"] = "input"
21 css_classes["email"] = "input"
22 css_classes["number"] = "input"
23 css_classes["checkbox"] = "checkbox"
24 css_classes["textarea"] = "textarea"
25 super(CustomForm, self).__init__(*args, **kwargs)
26 for visible in self.visible_fields():
27 if hasattr(visible.field.widget, "input_type"):
28 input_type = visible.field.widget.input_type
29 if isinstance(visible.field.widget, Textarea):
30 input_type = "textarea"
31 visible.field.widget.attrs["cols"] = None
32 visible.field.widget.attrs["rows"] = None
33 visible.field.widget.attrs["class"] = css_classes[input_type]
34
35
36 # pylint: disable=missing-class-docstring
37 class LoginForm(CustomForm):
38 class Meta:
39 model = models.User
40 fields = ["localname", "password"]
41 help_texts = {f: None for f in fields}
42 widgets = {
43 "password": PasswordInput(),
44 }
45
46
47 class RegisterForm(CustomForm):
48 class Meta:
49 model = models.User
50 fields = ["localname", "email", "password"]
51 help_texts = {f: None for f in fields}
52 widgets = {"password": PasswordInput()}
53
54
55 class RatingForm(CustomForm):
56 class Meta:
57 model = models.ReviewRating
58 fields = ["user", "book", "rating", "privacy"]
59
60
61 class ReviewForm(CustomForm):
62 class Meta:
63 model = models.Review
64 fields = [
65 "user",
66 "book",
67 "name",
68 "content",
69 "rating",
70 "content_warning",
71 "sensitive",
72 "privacy",
73 ]
74
75
76 class CommentForm(CustomForm):
77 class Meta:
78 model = models.Comment
79 fields = [
80 "user",
81 "book",
82 "content",
83 "content_warning",
84 "sensitive",
85 "privacy",
86 "progress",
87 "progress_mode",
88 ]
89
90
91 class QuotationForm(CustomForm):
92 class Meta:
93 model = models.Quotation
94 fields = [
95 "user",
96 "book",
97 "quote",
98 "content",
99 "content_warning",
100 "sensitive",
101 "privacy",
102 ]
103
104
105 class ReplyForm(CustomForm):
106 class Meta:
107 model = models.Status
108 fields = [
109 "user",
110 "content",
111 "content_warning",
112 "sensitive",
113 "reply_parent",
114 "privacy",
115 ]
116
117
118 class StatusForm(CustomForm):
119 class Meta:
120 model = models.Status
121 fields = ["user", "content", "content_warning", "sensitive", "privacy"]
122
123
124 class EditUserForm(CustomForm):
125 class Meta:
126 model = models.User
127 fields = [
128 "avatar",
129 "name",
130 "email",
131 "summary",
132 "show_goal",
133 "manually_approves_followers",
134 "discoverable",
135 "preferred_timezone",
136 ]
137 help_texts = {f: None for f in fields}
138
139
140 class LimitedEditUserForm(CustomForm):
141 class Meta:
142 model = models.User
143 fields = [
144 "avatar",
145 "name",
146 "summary",
147 "manually_approves_followers",
148 "discoverable",
149 ]
150 help_texts = {f: None for f in fields}
151
152
153 class TagForm(CustomForm):
154 class Meta:
155 model = models.Tag
156 fields = ["name"]
157 help_texts = {f: None for f in fields}
158 labels = {"name": "Add a tag"}
159
160
161 class CoverForm(CustomForm):
162 class Meta:
163 model = models.Book
164 fields = ["cover"]
165 help_texts = {f: None for f in fields}
166
167
168 class EditionForm(CustomForm):
169 class Meta:
170 model = models.Edition
171 exclude = [
172 "remote_id",
173 "origin_id",
174 "created_date",
175 "updated_date",
176 "edition_rank",
177 "authors",
178 "parent_work",
179 "shelves",
180 "subjects", # TODO
181 "subject_places", # TODO
182 "connector",
183 ]
184
185
186 class AuthorForm(CustomForm):
187 class Meta:
188 model = models.Author
189 exclude = [
190 "remote_id",
191 "origin_id",
192 "created_date",
193 "updated_date",
194 ]
195
196
197 class ImportForm(forms.Form):
198 csv_file = forms.FileField()
199
200
201 class ExpiryWidget(widgets.Select):
202 def value_from_datadict(self, data, files, name):
203 """ human-readable exiration time buckets """
204 selected_string = super().value_from_datadict(data, files, name)
205
206 if selected_string == "day":
207 interval = datetime.timedelta(days=1)
208 elif selected_string == "week":
209 interval = datetime.timedelta(days=7)
210 elif selected_string == "month":
211 interval = datetime.timedelta(days=31) # Close enough?
212 elif selected_string == "forever":
213 return None
214 else:
215 return selected_string # "This will raise
216
217 return timezone.now() + interval
218
219
220 class InviteRequestForm(CustomForm):
221 def clean(self):
222 """ make sure the email isn't in use by a registered user """
223 cleaned_data = super().clean()
224 email = cleaned_data.get("email")
225 if email and models.User.objects.filter(email=email).exists():
226 self.add_error("email", _("A user with this email already exists."))
227
228 class Meta:
229 model = models.InviteRequest
230 fields = ["email"]
231
232
233 class CreateInviteForm(CustomForm):
234 class Meta:
235 model = models.SiteInvite
236 exclude = ["code", "user", "times_used"]
237 widgets = {
238 "expiry": ExpiryWidget(
239 choices=[
240 ("day", _("One Day")),
241 ("week", _("One Week")),
242 ("month", _("One Month")),
243 ("forever", _("Does Not Expire")),
244 ]
245 ),
246 "use_limit": widgets.Select(
247 choices=[
248 (i, _("%(count)d uses" % {"count": i}))
249 for i in [1, 5, 10, 25, 50, 100]
250 ]
251 + [(None, _("Unlimited"))]
252 ),
253 }
254
255
256 class ShelfForm(CustomForm):
257 class Meta:
258 model = models.Shelf
259 fields = ["user", "name", "privacy"]
260
261
262 class GoalForm(CustomForm):
263 class Meta:
264 model = models.AnnualGoal
265 fields = ["user", "year", "goal", "privacy"]
266
267
268 class SiteForm(CustomForm):
269 class Meta:
270 model = models.SiteSettings
271 exclude = []
272
273
274 class ListForm(CustomForm):
275 class Meta:
276 model = models.List
277 fields = ["user", "name", "description", "curation", "privacy"]
278
279
280 class ReportForm(CustomForm):
281 class Meta:
282 model = models.Report
283 fields = ["user", "reporter", "statuses", "note"]
284
[end of bookwyrm/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py
--- a/bookwyrm/forms.py
+++ b/bookwyrm/forms.py
@@ -233,7 +233,7 @@
class CreateInviteForm(CustomForm):
class Meta:
model = models.SiteInvite
- exclude = ["code", "user", "times_used"]
+ exclude = ["code", "user", "times_used", "invitees"]
widgets = {
"expiry": ExpiryWidget(
choices=[
|
{"golden_diff": "diff --git a/bookwyrm/forms.py b/bookwyrm/forms.py\n--- a/bookwyrm/forms.py\n+++ b/bookwyrm/forms.py\n@@ -233,7 +233,7 @@\n class CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n- exclude = [\"code\", \"user\", \"times_used\"]\n+ exclude = [\"code\", \"user\", \"times_used\", \"invitees\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n", "issue": "Can't create Invites\n**Describe the bug**\r\nWhen creating a new invite, the following appears:\r\n\r\nRest of the page is blank.\r\n\r\nIt appeared since the last update I did a few days ago (don't know at which commit exactly, sorry) and didn't change with the last one.\r\n\r\n**Additional context**\r\nIt doesn't matter what I set for Expiry and Use limit.\r\nAlso, there's an invite in the list that has \"Max uses: None\" that I'm not sure where it comes from.\r\n\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nimport datetime\nfrom collections import defaultdict\n\nfrom django import forms\nfrom django.forms import ModelForm, PasswordInput, widgets\nfrom django.forms.widgets import Textarea\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom bookwyrm import models\n\n\nclass CustomForm(ModelForm):\n \"\"\" add css classes to the forms \"\"\"\n\n def __init__(self, *args, **kwargs):\n css_classes = defaultdict(lambda: \"\")\n css_classes[\"text\"] = \"input\"\n css_classes[\"password\"] = \"input\"\n css_classes[\"email\"] = \"input\"\n css_classes[\"number\"] = \"input\"\n css_classes[\"checkbox\"] = \"checkbox\"\n css_classes[\"textarea\"] = \"textarea\"\n super(CustomForm, self).__init__(*args, **kwargs)\n for visible in self.visible_fields():\n if hasattr(visible.field.widget, \"input_type\"):\n input_type = visible.field.widget.input_type\n if isinstance(visible.field.widget, Textarea):\n input_type = \"textarea\"\n visible.field.widget.attrs[\"cols\"] = None\n visible.field.widget.attrs[\"rows\"] = None\n visible.field.widget.attrs[\"class\"] = css_classes[input_type]\n\n\n# pylint: disable=missing-class-docstring\nclass LoginForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\n \"password\": PasswordInput(),\n }\n\n\nclass RegisterForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\"localname\", \"email\", \"password\"]\n help_texts = {f: None for f in fields}\n widgets = {\"password\": PasswordInput()}\n\n\nclass RatingForm(CustomForm):\n class Meta:\n model = models.ReviewRating\n fields = [\"user\", \"book\", \"rating\", \"privacy\"]\n\n\nclass ReviewForm(CustomForm):\n class Meta:\n model = models.Review\n fields = [\n \"user\",\n \"book\",\n \"name\",\n \"content\",\n \"rating\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass CommentForm(CustomForm):\n class Meta:\n model = models.Comment\n fields = [\n \"user\",\n \"book\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n \"progress\",\n \"progress_mode\",\n ]\n\n\nclass QuotationForm(CustomForm):\n class Meta:\n model = models.Quotation\n fields = [\n \"user\",\n \"book\",\n \"quote\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"privacy\",\n ]\n\n\nclass ReplyForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\n \"user\",\n \"content\",\n \"content_warning\",\n \"sensitive\",\n \"reply_parent\",\n \"privacy\",\n ]\n\n\nclass StatusForm(CustomForm):\n class Meta:\n model = models.Status\n fields = [\"user\", \"content\", \"content_warning\", \"sensitive\", \"privacy\"]\n\n\nclass EditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"email\",\n \"summary\",\n \"show_goal\",\n \"manually_approves_followers\",\n \"discoverable\",\n \"preferred_timezone\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass LimitedEditUserForm(CustomForm):\n class Meta:\n model = models.User\n fields = [\n \"avatar\",\n \"name\",\n \"summary\",\n \"manually_approves_followers\",\n \"discoverable\",\n ]\n help_texts = {f: None for f in fields}\n\n\nclass TagForm(CustomForm):\n class Meta:\n model = models.Tag\n fields = [\"name\"]\n help_texts = {f: None for f in fields}\n labels = {\"name\": \"Add a tag\"}\n\n\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n \"edition_rank\",\n \"authors\",\n \"parent_work\",\n \"shelves\",\n \"subjects\", # TODO\n \"subject_places\", # TODO\n \"connector\",\n ]\n\n\nclass AuthorForm(CustomForm):\n class Meta:\n model = models.Author\n exclude = [\n \"remote_id\",\n \"origin_id\",\n \"created_date\",\n \"updated_date\",\n ]\n\n\nclass ImportForm(forms.Form):\n csv_file = forms.FileField()\n\n\nclass ExpiryWidget(widgets.Select):\n def value_from_datadict(self, data, files, name):\n \"\"\" human-readable exiration time buckets \"\"\"\n selected_string = super().value_from_datadict(data, files, name)\n\n if selected_string == \"day\":\n interval = datetime.timedelta(days=1)\n elif selected_string == \"week\":\n interval = datetime.timedelta(days=7)\n elif selected_string == \"month\":\n interval = datetime.timedelta(days=31) # Close enough?\n elif selected_string == \"forever\":\n return None\n else:\n return selected_string # \"This will raise\n\n return timezone.now() + interval\n\n\nclass InviteRequestForm(CustomForm):\n def clean(self):\n \"\"\" make sure the email isn't in use by a registered user \"\"\"\n cleaned_data = super().clean()\n email = cleaned_data.get(\"email\")\n if email and models.User.objects.filter(email=email).exists():\n self.add_error(\"email\", _(\"A user with this email already exists.\"))\n\n class Meta:\n model = models.InviteRequest\n fields = [\"email\"]\n\n\nclass CreateInviteForm(CustomForm):\n class Meta:\n model = models.SiteInvite\n exclude = [\"code\", \"user\", \"times_used\"]\n widgets = {\n \"expiry\": ExpiryWidget(\n choices=[\n (\"day\", _(\"One Day\")),\n (\"week\", _(\"One Week\")),\n (\"month\", _(\"One Month\")),\n (\"forever\", _(\"Does Not Expire\")),\n ]\n ),\n \"use_limit\": widgets.Select(\n choices=[\n (i, _(\"%(count)d uses\" % {\"count\": i}))\n for i in [1, 5, 10, 25, 50, 100]\n ]\n + [(None, _(\"Unlimited\"))]\n ),\n }\n\n\nclass ShelfForm(CustomForm):\n class Meta:\n model = models.Shelf\n fields = [\"user\", \"name\", \"privacy\"]\n\n\nclass GoalForm(CustomForm):\n class Meta:\n model = models.AnnualGoal\n fields = [\"user\", \"year\", \"goal\", \"privacy\"]\n\n\nclass SiteForm(CustomForm):\n class Meta:\n model = models.SiteSettings\n exclude = []\n\n\nclass ListForm(CustomForm):\n class Meta:\n model = models.List\n fields = [\"user\", \"name\", \"description\", \"curation\", \"privacy\"]\n\n\nclass ReportForm(CustomForm):\n class Meta:\n model = models.Report\n fields = [\"user\", \"reporter\", \"statuses\", \"note\"]\n", "path": "bookwyrm/forms.py"}]}
| 3,089 | 112 |
gh_patches_debug_35992
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-8
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Safe App Provider information
The service should (optionally) include information about the provider of the application.
The provider information should have at least the following fields:
```json
{
"name" : <string>,
"url": <string>
}
```
</issue>
<code>
[start of src/safe_apps/admin.py]
1 from django.contrib import admin
2
3 from .models import SafeApp
4
5 admin.site.register(SafeApp)
6
[end of src/safe_apps/admin.py]
[start of src/safe_apps/models.py]
1 from django.contrib.postgres.fields import ArrayField
2 from django.db import models
3
4
5 class SafeApp(models.Model):
6 url = models.URLField(primary_key=True)
7 name = models.CharField(max_length=200)
8 icon_url = models.URLField()
9 description = models.CharField(max_length=200)
10 networks = ArrayField(models.IntegerField())
11
[end of src/safe_apps/models.py]
[start of src/safe_apps/migrations/0001_initial.py]
1 # Generated by Django 3.2 on 2021-04-28 13:39
2
3 import django.contrib.postgres.fields
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 initial = True
10
11 dependencies = [
12 ]
13
14 operations = [
15 migrations.CreateModel(
16 name='SafeApp',
17 fields=[
18 ('url', models.URLField(primary_key=True, serialize=False)),
19 ('name', models.CharField(max_length=200)),
20 ('icon_url', models.URLField()),
21 ('description', models.CharField(max_length=200)),
22 ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
23 ],
24 ),
25 ]
26
[end of src/safe_apps/migrations/0001_initial.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py
--- a/src/safe_apps/admin.py
+++ b/src/safe_apps/admin.py
@@ -1,5 +1,6 @@
from django.contrib import admin
-from .models import SafeApp
+from .models import SafeApp, Provider
-admin.site.register(SafeApp)
+models = [SafeApp, Provider]
+admin.site.register(models)
diff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py
--- a/src/safe_apps/migrations/0001_initial.py
+++ b/src/safe_apps/migrations/0001_initial.py
@@ -1,7 +1,8 @@
-# Generated by Django 3.2 on 2021-04-28 13:39
+# Generated by Django 3.2 on 2021-04-29 08:22
import django.contrib.postgres.fields
from django.db import migrations, models
+import django.db.models.deletion
class Migration(migrations.Migration):
@@ -12,6 +13,13 @@
]
operations = [
+ migrations.CreateModel(
+ name='Provider',
+ fields=[
+ ('url', models.URLField(primary_key=True, serialize=False)),
+ ('name', models.CharField(max_length=200)),
+ ],
+ ),
migrations.CreateModel(
name='SafeApp',
fields=[
@@ -20,6 +28,7 @@
('icon_url', models.URLField()),
('description', models.CharField(max_length=200)),
('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),
],
),
]
diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -2,9 +2,21 @@
from django.db import models
+class Provider(models.Model):
+ url = models.URLField(primary_key=True)
+ name = models.CharField(max_length=200)
+
+ def __str__(self):
+ return f'{self.name} | {self.url}'
+
+
class SafeApp(models.Model):
url = models.URLField(primary_key=True)
name = models.CharField(max_length=200)
icon_url = models.URLField()
description = models.CharField(max_length=200)
networks = ArrayField(models.IntegerField())
+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)
+
+ def __str__(self):
+ return f'{self.name} | {self.url} | networks={self.networks}'
|
{"golden_diff": "diff --git a/src/safe_apps/admin.py b/src/safe_apps/admin.py\n--- a/src/safe_apps/admin.py\n+++ b/src/safe_apps/admin.py\n@@ -1,5 +1,6 @@\n from django.contrib import admin\n \n-from .models import SafeApp\n+from .models import SafeApp, Provider\n \n-admin.site.register(SafeApp)\n+models = [SafeApp, Provider]\n+admin.site.register(models)\ndiff --git a/src/safe_apps/migrations/0001_initial.py b/src/safe_apps/migrations/0001_initial.py\n--- a/src/safe_apps/migrations/0001_initial.py\n+++ b/src/safe_apps/migrations/0001_initial.py\n@@ -1,7 +1,8 @@\n-# Generated by Django 3.2 on 2021-04-28 13:39\n+# Generated by Django 3.2 on 2021-04-29 08:22\n \n import django.contrib.postgres.fields\n from django.db import migrations, models\n+import django.db.models.deletion\n \n \n class Migration(migrations.Migration):\n@@ -12,6 +13,13 @@\n ]\n \n operations = [\n+ migrations.CreateModel(\n+ name='Provider',\n+ fields=[\n+ ('url', models.URLField(primary_key=True, serialize=False)),\n+ ('name', models.CharField(max_length=200)),\n+ ],\n+ ),\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n@@ -20,6 +28,7 @@\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n+ ('provider', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='safe_apps.provider')),\n ],\n ),\n ]\ndiff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -2,9 +2,21 @@\n from django.db import models\n \n \n+class Provider(models.Model):\n+ url = models.URLField(primary_key=True)\n+ name = models.CharField(max_length=200)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url}'\n+\n+\n class SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n+ provider = models.ForeignKey(Provider, null=True, on_delete=models.SET_NULL)\n+\n+ def __str__(self):\n+ return f'{self.name} | {self.url} | networks={self.networks}'\n", "issue": "Add Safe App Provider information\nThe service should (optionally) include information about the provider of the application.\r\n\r\nThe provider information should have at least the following fields:\r\n\r\n```json\r\n{\r\n \"name\" : <string>,\r\n \"url\": <string>\r\n}\r\n```\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import SafeApp\n\nadmin.site.register(SafeApp)\n", "path": "src/safe_apps/admin.py"}, {"content": "from django.contrib.postgres.fields import ArrayField\nfrom django.db import models\n\n\nclass SafeApp(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n icon_url = models.URLField()\n description = models.CharField(max_length=200)\n networks = ArrayField(models.IntegerField())\n", "path": "src/safe_apps/models.py"}, {"content": "# Generated by Django 3.2 on 2021-04-28 13:39\n\nimport django.contrib.postgres.fields\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='SafeApp',\n fields=[\n ('url', models.URLField(primary_key=True, serialize=False)),\n ('name', models.CharField(max_length=200)),\n ('icon_url', models.URLField()),\n ('description', models.CharField(max_length=200)),\n ('networks', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),\n ],\n ),\n ]\n", "path": "src/safe_apps/migrations/0001_initial.py"}]}
| 962 | 634 |
gh_patches_debug_17476
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-840
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not possible to install Ray from git
I want to install Ray for Python 3.
```
$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip
$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python
```
But this tries to install it for python2.7, and not python3, failing with:
```
CMake Error at cmake_modules/FindNumPy.cmake:62 (message):
NumPy import failure:
Traceback (most recent call last):
File "<string>", line 1, in <module>
ImportError: No module named numpy
```
Because numpy is installed only for Python 3.
</issue>
<code>
[start of python/setup.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 import os
6 import shutil
7 import subprocess
8 import sys
9
10 from setuptools import setup, find_packages, Distribution
11 import setuptools.command.build_ext as _build_ext
12
13 # Ideally, we could include these files by putting them in a
14 # MANIFEST.in or using the package_data argument to setup, but the
15 # MANIFEST.in gets applied at the very beginning when setup.py runs
16 # before these files have been created, so we have to move the files
17 # manually.
18 ray_files = [
19 "ray/core/src/common/thirdparty/redis/src/redis-server",
20 "ray/core/src/common/redis_module/libray_redis_module.so",
21 "ray/core/src/plasma/plasma_store",
22 "ray/core/src/plasma/plasma_manager",
23 "ray/core/src/local_scheduler/local_scheduler",
24 "ray/core/src/local_scheduler/liblocal_scheduler_library.so",
25 "ray/core/src/numbuf/libnumbuf.so",
26 "ray/core/src/global_scheduler/global_scheduler",
27 "ray/WebUI.ipynb"
28 ]
29
30
31 class build_ext(_build_ext.build_ext):
32 def run(self):
33 # Note: We are passing in sys.executable so that we use the same
34 # version of Python to build pyarrow inside the build.sh script. Note
35 # that certain flags will not be passed along such as --user or sudo.
36 # TODO(rkn): Fix this.
37 subprocess.check_call(["../build.sh", sys.executable])
38
39 # We also need to install pyarrow along with Ray, so make sure that the
40 # relevant non-Python pyarrow files get copied.
41 pyarrow_files = [
42 os.path.join("ray/pyarrow_files/pyarrow", filename)
43 for filename in os.listdir("./ray/pyarrow_files/pyarrow")
44 if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow",
45 filename))]
46
47 files_to_include = ray_files + pyarrow_files
48
49 for filename in files_to_include:
50 self.move_file(filename)
51 # Copy over the autogenerated flatbuffer Python bindings.
52 generated_python_directory = "ray/core/generated"
53 for filename in os.listdir(generated_python_directory):
54 if filename[-3:] == ".py":
55 self.move_file(os.path.join(generated_python_directory,
56 filename))
57
58 def move_file(self, filename):
59 # TODO(rkn): This feels very brittle. It may not handle all cases. See
60 # https://github.com/apache/arrow/blob/master/python/setup.py for an
61 # example.
62 source = filename
63 destination = os.path.join(self.build_lib, filename)
64 # Create the target directory if it doesn't already exist.
65 parent_directory = os.path.dirname(destination)
66 if not os.path.exists(parent_directory):
67 os.makedirs(parent_directory)
68 print("Copying {} to {}.".format(source, destination))
69 shutil.copy(source, destination)
70
71
72 class BinaryDistribution(Distribution):
73 def has_ext_modules(self):
74 return True
75
76
77 setup(name="ray",
78 version="0.1.2",
79 packages=find_packages(),
80 cmdclass={"build_ext": build_ext},
81 # The BinaryDistribution argument triggers build_ext.
82 distclass=BinaryDistribution,
83 install_requires=["numpy",
84 "funcsigs",
85 "click",
86 "colorama",
87 "psutil",
88 "redis",
89 "cloudpickle >= 0.2.2",
90 "flatbuffers"],
91 entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
92 include_package_data=True,
93 zip_safe=False,
94 license="Apache 2.0")
95
[end of python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/setup.py b/python/setup.py
--- a/python/setup.py
+++ b/python/setup.py
@@ -81,12 +81,15 @@
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=["numpy",
+ "cython",
"funcsigs",
"click",
"colorama",
"psutil",
"redis",
"cloudpickle >= 0.2.2",
+ # The six module is required by pyarrow.
+ "six >= 1.0.0",
"flatbuffers"],
entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]},
include_package_data=True,
|
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -81,12 +81,15 @@\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n+ \"cython\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n+ # The six module is required by pyarrow.\n+ \"six >= 1.0.0\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n", "issue": "Not possible to install Ray from git\nI want to install Ray for Python 3.\r\n\r\n```\r\n$ apt-get install -y cmake pkg-config python3-dev build-essential autoconf curl libtool libboost-all-dev unzip\r\n$ pip3 install git+https://github.com/ray-project/ray.git@37282330c0ea687fd1b983176dce85731fcf189d#subdirectory=python\r\n```\r\n\r\nBut this tries to install it for python2.7, and not python3, failing with:\r\n\r\n```\r\n CMake Error at cmake_modules/FindNumPy.cmake:62 (message):\r\n NumPy import failure:\r\n \r\n Traceback (most recent call last):\r\n \r\n File \"<string>\", line 1, in <module>\r\n \r\n ImportError: No module named numpy\r\n```\r\n\r\nBecause numpy is installed only for Python 3.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/numbuf/libnumbuf.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.1.2\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n \"flatbuffers\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
| 1,683 | 156 |
gh_patches_debug_9371
|
rasdani/github-patches
|
git_diff
|
cal-itp__benefits-688
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Engineering: Front-end - Redesign the `Get started with Login.gov` button
Redesign the `Get started with Login.gov` button to new specs:
- The text `Get started with` is moved into the button
- New font required: Public Sans https://github.com/uswds/public-sans
- Button should elegantly and responsively resize itself in 1-line mode and 2-line mode
- The fallback `Login.gov` text should also be there for screenreaders
- Desktop composition: 289px x 41px
<img width="334" alt="image" src="https://user-images.githubusercontent.com/3673236/172735124-93f631bc-d655-4aff-a6d9-faf07dcef0e2.png">
- Mobile composition: 2-line mobile size, 289px x 72px
<img width="363" alt="image" src="https://user-images.githubusercontent.com/3673236/172735067-3e785d8f-235b-4e32-af0a-bcf17eed6ca5.png">
https://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(IAL2-Login.gov)?node-id=3830%3A9882
</issue>
<code>
[start of benefits/settings.py]
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6
7 def _filter_empty(ls):
8 return [s for s in ls if s]
9
10
11 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
13
14 # SECURITY WARNING: keep the secret key used in production secret!
15 SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "secret")
16
17 # SECURITY WARNING: don't run with debug turned on in production!
18 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
19
20 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
21
22 ALLOWED_HOSTS = _filter_empty(os.environ.get("DJANGO_ALLOWED_HOSTS", "localhost,127.0.0.1").split(","))
23
24 # Application definition
25
26 INSTALLED_APPS = [
27 "django.contrib.messages",
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 "benefits.oauth",
34 ]
35
36 if ADMIN:
37 INSTALLED_APPS.extend(
38 [
39 "django.contrib.admin",
40 "django.contrib.auth",
41 "django.contrib.contenttypes",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.contrib.messages.middleware.MessageMiddleware",
49 "django.middleware.locale.LocaleMiddleware",
50 "benefits.core.middleware.Healthcheck",
51 "django.middleware.common.CommonMiddleware",
52 "django.middleware.csrf.CsrfViewMiddleware",
53 "django.middleware.clickjacking.XFrameOptionsMiddleware",
54 "csp.middleware.CSPMiddleware",
55 "benefits.core.middleware.ChangedLanguageEvent",
56 ]
57
58 if ADMIN:
59 MIDDLEWARE.extend(
60 [
61 "django.contrib.auth.middleware.AuthenticationMiddleware",
62 "django.contrib.messages.middleware.MessageMiddleware",
63 ]
64 )
65
66 if DEBUG:
67 MIDDLEWARE.extend(["benefits.core.middleware.DebugSession"])
68
69 CSRF_COOKIE_AGE = None
70 CSRF_COOKIE_SAMESITE = "Strict"
71 CSRF_COOKIE_HTTPONLY = True
72 CSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get("DJANGO_TRUSTED_ORIGINS", "http://localhost,http://127.0.0.1").split(","))
73
74 # With `Strict`, the user loses their Django session between leaving our app to
75 # sign in with OAuth, and coming back into our app from the OAuth redirect.
76 # This is because `Strict` disallows our cookie being sent from an external
77 # domain and so the session cookie is lost.
78 #
79 # `Lax` allows the cookie to travel with the user and be sent back to us by the
80 # OAuth server, as long as the request is "safe" i.e. GET
81 SESSION_COOKIE_SAMESITE = "Lax"
82 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
83 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
84 SESSION_COOKIE_NAME = "_benefitssessionid"
85
86 if not DEBUG:
87 CSRF_COOKIE_SECURE = True
88 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
89 SESSION_COOKIE_SECURE = True
90
91 SECURE_BROWSER_XSS_FILTER = True
92
93 # the NGINX reverse proxy sits in front of the application in deployed environments
94 # SSL terminates before getting to Django, and NGINX adds this header to indicate
95 # if the original request was secure or not
96 #
97 # See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header
98 if not DEBUG:
99 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
100
101 ROOT_URLCONF = "benefits.urls"
102
103 template_ctx_processors = [
104 "django.template.context_processors.request",
105 "django.contrib.messages.context_processors.messages",
106 "benefits.core.context_processors.analytics",
107 "benefits.core.context_processors.authentication",
108 "benefits.core.context_processors.recaptcha",
109 ]
110
111 if DEBUG:
112 template_ctx_processors.extend(
113 [
114 "django.template.context_processors.debug",
115 "benefits.core.context_processors.debug",
116 ]
117 )
118
119 if ADMIN:
120 template_ctx_processors.extend(
121 [
122 "django.contrib.auth.context_processors.auth",
123 "django.contrib.messages.context_processors.messages",
124 ]
125 )
126
127 TEMPLATES = [
128 {
129 "BACKEND": "django.template.backends.django.DjangoTemplates",
130 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
131 "APP_DIRS": True,
132 "OPTIONS": {
133 "context_processors": template_ctx_processors,
134 },
135 },
136 ]
137
138 WSGI_APPLICATION = "benefits.wsgi.application"
139
140 DATABASES = {
141 "default": {
142 "ENGINE": "django.db.backends.sqlite3",
143 "NAME": "django.db",
144 }
145 }
146
147 # Password validation
148
149 AUTH_PASSWORD_VALIDATORS = []
150
151 if ADMIN:
152 AUTH_PASSWORD_VALIDATORS.extend(
153 [
154 {
155 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
156 },
157 {
158 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
159 },
160 {
161 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
162 },
163 {
164 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
165 },
166 ]
167 )
168
169 # OAuth configuration
170
171 OAUTH_AUTHORITY = os.environ.get("DJANGO_OAUTH_AUTHORITY", "http://example.com")
172 OAUTH_CLIENT_NAME = os.environ.get("DJANGO_OAUTH_CLIENT_NAME", "benefits-oauth-client-name")
173 OAUTH_CLIENT_ID = os.environ.get("DJANGO_OAUTH_CLIENT_ID", "benefits-oauth-client-id")
174
175 if OAUTH_CLIENT_NAME:
176 AUTHLIB_OAUTH_CLIENTS = {
177 OAUTH_CLIENT_NAME: {
178 "client_id": OAUTH_CLIENT_ID,
179 "server_metadata_url": f"{OAUTH_AUTHORITY}/.well-known/openid-configuration",
180 "client_kwargs": {"code_challenge_method": "S256", "scope": "openid"},
181 }
182 }
183
184 # Internationalization
185
186 LANGUAGE_CODE = "en"
187
188 LANGUAGE_COOKIE_HTTPONLY = True
189 LANGUAGE_COOKIE_SAMESITE = "Strict"
190 LANGUAGE_COOKIE_SECURE = True
191
192 LANGUAGES = [("en", "English"), ("es", "Español")]
193
194 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
195
196 USE_I18N = True
197 USE_L10N = True
198
199 TIME_ZONE = "UTC"
200 USE_TZ = True
201
202 # Static files (CSS, JavaScript, Images)
203
204 STATIC_URL = "/static/"
205 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
206 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
207 STATIC_ROOT = os.path.join(BASE_DIR, "static")
208
209 # Logging configuration
210
211 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
212 LOGGING = {
213 "version": 1,
214 "disable_existing_loggers": False,
215 "formatters": {
216 "default": {
217 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
218 "datefmt": "%d/%b/%Y %H:%M:%S",
219 "style": "{",
220 },
221 },
222 "handlers": {
223 "default": {"class": "logging.StreamHandler", "formatter": "default"},
224 },
225 "root": {
226 "handlers": ["default"],
227 "level": LOG_LEVEL,
228 },
229 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
230 }
231
232 # Analytics configuration
233
234 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
235
236 # rate limit configuration
237
238 # number of requests allowed in the given period
239 RATE_LIMIT = int(os.environ.get("DJANGO_RATE_LIMIT", 5))
240
241 # HTTP request methods to rate limit
242 RATE_LIMIT_METHODS = os.environ.get("DJANGO_RATE_LIMIT_METHODS", "POST").upper().split(",")
243
244 # number of seconds before additional requests are denied
245 RATE_LIMIT_PERIOD = int(os.environ.get("DJANGO_RATE_LIMIT_PERIOD", 60))
246
247 # Rate Limit feature flag
248 RATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))
249
250 # reCAPTCHA configuration
251
252 RECAPTCHA_API_URL = os.environ.get("DJANGO_RECAPTCHA_API_URL", "https://www.google.com/recaptcha/api.js")
253 RECAPTCHA_SITE_KEY = os.environ.get("DJANGO_RECAPTCHA_SITE_KEY")
254 RECAPTCHA_SECRET_KEY = os.environ.get("DJANGO_RECAPTCHA_SECRET_KEY")
255 RECAPTCHA_VERIFY_URL = os.environ.get("DJANGO_RECAPTCHA_VERIFY_URL", "https://www.google.com/recaptcha/api/siteverify")
256 RECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))
257
258 # Content Security Policy
259 # Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
260
261 # In particular, note that the inner single-quotes are required!
262 # https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
263
264 CSP_DEFAULT_SRC = ["'self'"]
265
266 CSP_CONNECT_SRC = ["'self'", "https://api.amplitude.com/"]
267 env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
268 CSP_CONNECT_SRC.extend(env_connect_src)
269
270 CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
271 env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
272 CSP_FONT_SRC.extend(env_font_src)
273
274 CSP_FRAME_ANCESTORS = ["'none'"]
275
276 CSP_FRAME_SRC = ["'none'"]
277 env_frame_src = _filter_empty(os.environ.get("DJANGO_CSP_FRAME_SRC", "").split(","))
278 CSP_FRAME_SRC.extend(env_frame_src)
279 if RECAPTCHA_ENABLED:
280 CSP_FRAME_SRC.append("https://www.google.com")
281
282
283 CSP_SCRIPT_SRC = [
284 "'unsafe-inline'",
285 "https://california.azureedge.net/cdt/statetemplate/",
286 "https://cdn.amplitude.com/libs/",
287 "https://code.jquery.com/",
288 "*.littlepay.com",
289 ]
290 env_script_src = _filter_empty(os.environ.get("DJANGO_CSP_SCRIPT_SRC", "").split(","))
291 CSP_SCRIPT_SRC.extend(env_script_src)
292 if RECAPTCHA_ENABLED:
293 CSP_SCRIPT_SRC.extend(["https://www.google.com/recaptcha/", "https://www.gstatic.com/recaptcha/releases/"])
294
295 CSP_STYLE_SRC = [
296 "'self'",
297 "'unsafe-inline'",
298 "https://california.azureedge.net/cdt/statetemplate/",
299 "https://fonts.googleapis.com/css",
300 ]
301 env_style_src = _filter_empty(os.environ.get("DJANGO_CSP_STYLE_SRC", "").split(","))
302 CSP_STYLE_SRC.extend(env_style_src)
303
[end of benefits/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -267,7 +267,7 @@
env_connect_src = _filter_empty(os.environ.get("DJANGO_CSP_CONNECT_SRC", "").split(","))
CSP_CONNECT_SRC.extend(env_connect_src)
-CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
+CSP_FONT_SRC = ["'self'", "https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
env_font_src = _filter_empty(os.environ.get("DJANGO_CSP_FONT_SRC", "").split(","))
CSP_FONT_SRC.extend(env_font_src)
|
{"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -267,7 +267,7 @@\n env_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\n CSP_CONNECT_SRC.extend(env_connect_src)\n \n-CSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n+CSP_FONT_SRC = [\"'self'\", \"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n env_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\n CSP_FONT_SRC.extend(env_font_src)\n", "issue": "Engineering: Front-end - Redesign the `Get started with Login.gov` button\nRedesign the `Get started with Login.gov` button to new specs:\r\n\r\n\r\n- The text `Get started with` is moved into the button\r\n- New font required: Public Sans https://github.com/uswds/public-sans\r\n- Button should elegantly and responsively resize itself in 1-line mode and 2-line mode\r\n- The fallback `Login.gov` text should also be there for screenreaders\r\n- Desktop composition: 289px x 41px\r\n<img width=\"334\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/172735124-93f631bc-d655-4aff-a6d9-faf07dcef0e2.png\">\r\n\r\n- Mobile composition: 2-line mobile size, 289px x 72px\r\n<img width=\"363\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/172735067-3e785d8f-235b-4e32-af0a-bcf17eed6ca5.png\">\r\n\r\n\r\nhttps://www.figma.com/file/SeSd3LaLd6WkbEYhmtKpO3/Benefits-(IAL2-Login.gov)?node-id=3830%3A9882\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n\ndef _filter_empty(ls):\n return [s for s in ls if s]\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\"DJANGO_SECRET_KEY\", \"secret\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = _filter_empty(os.environ.get(\"DJANGO_ALLOWED_HOSTS\", \"localhost,127.0.0.1\").split(\",\"))\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.messages\",\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n \"benefits.oauth\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nif DEBUG:\n MIDDLEWARE.extend([\"benefits.core.middleware.DebugSession\"])\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\nCSRF_TRUSTED_ORIGINS = _filter_empty(os.environ.get(\"DJANGO_TRUSTED_ORIGINS\", \"http://localhost,http://127.0.0.1\").split(\",\"))\n\n# With `Strict`, the user loses their Django session between leaving our app to\n# sign in with OAuth, and coming back into our app from the OAuth redirect.\n# This is because `Strict` disallows our cookie being sent from an external\n# domain and so the session cookie is lost.\n#\n# `Lax` allows the cookie to travel with the user and be sent back to us by the\n# OAuth server, as long as the request is \"safe\" i.e. GET\nSESSION_COOKIE_SAMESITE = \"Lax\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\nSESSION_COOKIE_NAME = \"_benefitssessionid\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nSECURE_BROWSER_XSS_FILTER = True\n\n# the NGINX reverse proxy sits in front of the application in deployed environments\n# SSL terminates before getting to Django, and NGINX adds this header to indicate\n# if the original request was secure or not\n#\n# See https://docs.djangoproject.com/en/3.2/ref/settings/#secure-proxy-ssl-header\nif not DEBUG:\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"django.contrib.messages.context_processors.messages\",\n \"benefits.core.context_processors.analytics\",\n \"benefits.core.context_processors.authentication\",\n \"benefits.core.context_processors.recaptcha\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": \"django.db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# OAuth configuration\n\nOAUTH_AUTHORITY = os.environ.get(\"DJANGO_OAUTH_AUTHORITY\", \"http://example.com\")\nOAUTH_CLIENT_NAME = os.environ.get(\"DJANGO_OAUTH_CLIENT_NAME\", \"benefits-oauth-client-name\")\nOAUTH_CLIENT_ID = os.environ.get(\"DJANGO_OAUTH_CLIENT_ID\", \"benefits-oauth-client-id\")\n\nif OAUTH_CLIENT_NAME:\n AUTHLIB_OAUTH_CLIENTS = {\n OAUTH_CLIENT_NAME: {\n \"client_id\": OAUTH_CLIENT_ID,\n \"server_metadata_url\": f\"{OAUTH_AUTHORITY}/.well-known/openid-configuration\",\n \"client_kwargs\": {\"code_challenge_method\": \"S256\", \"scope\": \"openid\"},\n }\n }\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# rate limit configuration\n\n# number of requests allowed in the given period\nRATE_LIMIT = int(os.environ.get(\"DJANGO_RATE_LIMIT\", 5))\n\n# HTTP request methods to rate limit\nRATE_LIMIT_METHODS = os.environ.get(\"DJANGO_RATE_LIMIT_METHODS\", \"POST\").upper().split(\",\")\n\n# number of seconds before additional requests are denied\nRATE_LIMIT_PERIOD = int(os.environ.get(\"DJANGO_RATE_LIMIT_PERIOD\", 60))\n\n# Rate Limit feature flag\nRATE_LIMIT_ENABLED = all((RATE_LIMIT > 0, len(RATE_LIMIT_METHODS) > 0, RATE_LIMIT_PERIOD > 0))\n\n# reCAPTCHA configuration\n\nRECAPTCHA_API_URL = os.environ.get(\"DJANGO_RECAPTCHA_API_URL\", \"https://www.google.com/recaptcha/api.js\")\nRECAPTCHA_SITE_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SITE_KEY\")\nRECAPTCHA_SECRET_KEY = os.environ.get(\"DJANGO_RECAPTCHA_SECRET_KEY\")\nRECAPTCHA_VERIFY_URL = os.environ.get(\"DJANGO_RECAPTCHA_VERIFY_URL\", \"https://www.google.com/recaptcha/api/siteverify\")\nRECAPTCHA_ENABLED = all((RECAPTCHA_API_URL, RECAPTCHA_SITE_KEY, RECAPTCHA_SECRET_KEY, RECAPTCHA_VERIFY_URL))\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\nenv_connect_src = _filter_empty(os.environ.get(\"DJANGO_CSP_CONNECT_SRC\", \"\").split(\",\"))\nCSP_CONNECT_SRC.extend(env_connect_src)\n\nCSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\nenv_font_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FONT_SRC\", \"\").split(\",\"))\nCSP_FONT_SRC.extend(env_font_src)\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\n\nCSP_FRAME_SRC = [\"'none'\"]\nenv_frame_src = _filter_empty(os.environ.get(\"DJANGO_CSP_FRAME_SRC\", \"\").split(\",\"))\nCSP_FRAME_SRC.extend(env_frame_src)\nif RECAPTCHA_ENABLED:\n CSP_FRAME_SRC.append(\"https://www.google.com\")\n\n\nCSP_SCRIPT_SRC = [\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://code.jquery.com/\",\n \"*.littlepay.com\",\n]\nenv_script_src = _filter_empty(os.environ.get(\"DJANGO_CSP_SCRIPT_SRC\", \"\").split(\",\"))\nCSP_SCRIPT_SRC.extend(env_script_src)\nif RECAPTCHA_ENABLED:\n CSP_SCRIPT_SRC.extend([\"https://www.google.com/recaptcha/\", \"https://www.gstatic.com/recaptcha/releases/\"])\n\nCSP_STYLE_SRC = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://fonts.googleapis.com/css\",\n]\nenv_style_src = _filter_empty(os.environ.get(\"DJANGO_CSP_STYLE_SRC\", \"\").split(\",\"))\nCSP_STYLE_SRC.extend(env_style_src)\n", "path": "benefits/settings.py"}]}
| 3,986 | 165 |
gh_patches_debug_35523
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-557
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TypeError using SentryAsgiMiddleware with FastAPI/Starlette app
I cannot get `SentryAsgiMiddleware` to work with our FastAPI app. We tried to follow the example in the [Sentry docs](https://docs.sentry.io/platforms/python/asgi/), so the app module basically looks like this:
```python
from fastapi import FastAPI
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
...
app = FastAPI()
@app.post()
...
app = SentryAsgiMiddleware(app)
```
This gives an error on all requests, see the following stack trace:
```python
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/usr/local/lib/python3.7/site-packages/uvicorn/middleware/asgi2.py", line 7, in __call__
await instance(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in run_asgi2
scope, lambda: self.app(scope)(receive, send)
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 93, in _run_app
raise exc from None
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 90, in _run_app
return await callback()
File "/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py", line 54, in <lambda>
scope, lambda: self.app(scope)(receive, send)
TypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'
```
Library versions:
- python==3.7.5
- sentry-sdk==0.13.2
- uvicorn==0.10.8
- fastapi==0.42.0
- starlette==0.12.9
</issue>
<code>
[start of sentry_sdk/integrations/asgi.py]
1 """
2 An ASGI middleware.
3
4 Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
5 """
6
7 import functools
8 import urllib
9
10 from sentry_sdk._types import MYPY
11 from sentry_sdk.hub import Hub, _should_send_default_pii
12 from sentry_sdk.integrations._wsgi_common import _filter_headers
13 from sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function
14 from sentry_sdk.tracing import Span
15
16 if MYPY:
17 from typing import Dict
18 from typing import Any
19 from typing import Optional
20
21 from sentry_sdk._types import Event, Hint
22
23
24 _asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
25
26
27 def _capture_exception(hub, exc):
28 # type: (Hub, Any) -> None
29
30 # Check client here as it might have been unset while streaming response
31 if hub.client is not None:
32 event, hint = event_from_exception(
33 exc,
34 client_options=hub.client.options,
35 mechanism={"type": "asgi", "handled": False},
36 )
37 hub.capture_event(event, hint=hint)
38
39
40 class SentryAsgiMiddleware:
41 __slots__ = ("app",)
42
43 def __init__(self, app):
44 # type: (Any) -> None
45 self.app = app
46
47 def __call__(self, scope, receive=None, send=None):
48 # type: (Any, Any, Any) -> Any
49 if receive is None or send is None:
50
51 async def run_asgi2(receive, send):
52 # type: (Any, Any) -> Any
53 return await self._run_app(
54 scope, lambda: self.app(scope)(receive, send)
55 )
56
57 return run_asgi2
58 else:
59 return self._run_app(scope, lambda: self.app(scope, receive, send))
60
61 async def _run_app(self, scope, callback):
62 # type: (Any, Any) -> Any
63 if _asgi_middleware_applied.get(False):
64 return await callback()
65
66 _asgi_middleware_applied.set(True)
67 try:
68 hub = Hub(Hub.current)
69 with hub:
70 with hub.configure_scope() as sentry_scope:
71 sentry_scope.clear_breadcrumbs()
72 sentry_scope._name = "asgi"
73 processor = functools.partial(
74 self.event_processor, asgi_scope=scope
75 )
76 sentry_scope.add_event_processor(processor)
77
78 if scope["type"] in ("http", "websocket"):
79 span = Span.continue_from_headers(dict(scope["headers"]))
80 span.op = "{}.server".format(scope["type"])
81 else:
82 span = Span()
83 span.op = "asgi.server"
84
85 span.set_tag("asgi.type", scope["type"])
86 span.transaction = "generic ASGI request"
87
88 with hub.start_span(span) as span:
89 try:
90 return await callback()
91 except Exception as exc:
92 _capture_exception(hub, exc)
93 raise exc from None
94 finally:
95 _asgi_middleware_applied.set(False)
96
97 def event_processor(self, event, hint, asgi_scope):
98 # type: (Event, Hint, Any) -> Optional[Event]
99 request_info = event.get("request", {})
100
101 if asgi_scope["type"] in ("http", "websocket"):
102 request_info["url"] = self.get_url(asgi_scope)
103 request_info["method"] = asgi_scope["method"]
104 request_info["headers"] = _filter_headers(self.get_headers(asgi_scope))
105 request_info["query_string"] = self.get_query(asgi_scope)
106
107 if asgi_scope.get("client") and _should_send_default_pii():
108 request_info["env"] = {"REMOTE_ADDR": asgi_scope["client"][0]}
109
110 if asgi_scope.get("endpoint"):
111 # Webframeworks like Starlette mutate the ASGI env once routing is
112 # done, which is sometime after the request has started. If we have
113 # an endpoint, overwrite our path-based transaction name.
114 event["transaction"] = self.get_transaction(asgi_scope)
115
116 event["request"] = request_info
117
118 return event
119
120 def get_url(self, scope):
121 # type: (Any) -> str
122 """
123 Extract URL from the ASGI scope, without also including the querystring.
124 """
125 scheme = scope.get("scheme", "http")
126 server = scope.get("server", None)
127 path = scope.get("root_path", "") + scope["path"]
128
129 for key, value in scope["headers"]:
130 if key == b"host":
131 host_header = value.decode("latin-1")
132 return "%s://%s%s" % (scheme, host_header, path)
133
134 if server is not None:
135 host, port = server
136 default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
137 if port != default_port:
138 return "%s://%s:%s%s" % (scheme, host, port, path)
139 return "%s://%s%s" % (scheme, host, path)
140 return path
141
142 def get_query(self, scope):
143 # type: (Any) -> Any
144 """
145 Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
146 """
147 return urllib.parse.unquote(scope["query_string"].decode("latin-1"))
148
149 def get_headers(self, scope):
150 # type: (Any) -> Dict[str, Any]
151 """
152 Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
153 """
154 headers = {} # type: Dict[str, str]
155 for raw_key, raw_value in scope["headers"]:
156 key = raw_key.decode("latin-1")
157 value = raw_value.decode("latin-1")
158 if key in headers:
159 headers[key] = headers[key] + ", " + value
160 else:
161 headers[key] = value
162 return headers
163
164 def get_transaction(self, scope):
165 # type: (Any) -> Optional[str]
166 """
167 Return a transaction string to identify the routed endpoint.
168 """
169 return transaction_from_function(scope["endpoint"])
170
[end of sentry_sdk/integrations/asgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py
--- a/sentry_sdk/integrations/asgi.py
+++ b/sentry_sdk/integrations/asgi.py
@@ -4,7 +4,9 @@
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
"""
+import asyncio
import functools
+import inspect
import urllib
from sentry_sdk._types import MYPY
@@ -17,6 +19,7 @@
from typing import Dict
from typing import Any
from typing import Optional
+ from typing import Callable
from sentry_sdk._types import Event, Hint
@@ -37,26 +40,45 @@
hub.capture_event(event, hint=hint)
+def _looks_like_asgi3(app):
+ # type: (Any) -> bool
+ """
+ Try to figure out if an application object supports ASGI3.
+
+ This is how uvicorn figures out the application version as well.
+ """
+ if inspect.isclass(app):
+ return hasattr(app, "__await__")
+ elif inspect.isfunction(app):
+ return asyncio.iscoroutinefunction(app)
+ else:
+ call = getattr(app, "__call__", None) # noqa
+ return asyncio.iscoroutinefunction(call)
+
+
class SentryAsgiMiddleware:
- __slots__ = ("app",)
+ __slots__ = ("app", "__call__")
def __init__(self, app):
# type: (Any) -> None
self.app = app
- def __call__(self, scope, receive=None, send=None):
- # type: (Any, Any, Any) -> Any
- if receive is None or send is None:
+ if _looks_like_asgi3(app):
+ self.__call__ = self._run_asgi3 # type: Callable[..., Any]
+ else:
+ self.__call__ = self._run_asgi2
- async def run_asgi2(receive, send):
- # type: (Any, Any) -> Any
- return await self._run_app(
- scope, lambda: self.app(scope)(receive, send)
- )
+ def _run_asgi2(self, scope):
+ # type: (Any) -> Any
+ async def inner(receive, send):
+ # type: (Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope)(receive, send))
- return run_asgi2
- else:
- return self._run_app(scope, lambda: self.app(scope, receive, send))
+ return inner
+
+ async def _run_asgi3(self, scope, receive, send):
+ # type: (Any, Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py\n--- a/sentry_sdk/integrations/asgi.py\n+++ b/sentry_sdk/integrations/asgi.py\n@@ -4,7 +4,9 @@\n Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n \"\"\"\n \n+import asyncio\n import functools\n+import inspect\n import urllib\n \n from sentry_sdk._types import MYPY\n@@ -17,6 +19,7 @@\n from typing import Dict\n from typing import Any\n from typing import Optional\n+ from typing import Callable\n \n from sentry_sdk._types import Event, Hint\n \n@@ -37,26 +40,45 @@\n hub.capture_event(event, hint=hint)\n \n \n+def _looks_like_asgi3(app):\n+ # type: (Any) -> bool\n+ \"\"\"\n+ Try to figure out if an application object supports ASGI3.\n+\n+ This is how uvicorn figures out the application version as well.\n+ \"\"\"\n+ if inspect.isclass(app):\n+ return hasattr(app, \"__await__\")\n+ elif inspect.isfunction(app):\n+ return asyncio.iscoroutinefunction(app)\n+ else:\n+ call = getattr(app, \"__call__\", None) # noqa\n+ return asyncio.iscoroutinefunction(call)\n+\n+\n class SentryAsgiMiddleware:\n- __slots__ = (\"app\",)\n+ __slots__ = (\"app\", \"__call__\")\n \n def __init__(self, app):\n # type: (Any) -> None\n self.app = app\n \n- def __call__(self, scope, receive=None, send=None):\n- # type: (Any, Any, Any) -> Any\n- if receive is None or send is None:\n+ if _looks_like_asgi3(app):\n+ self.__call__ = self._run_asgi3 # type: Callable[..., Any]\n+ else:\n+ self.__call__ = self._run_asgi2\n \n- async def run_asgi2(receive, send):\n- # type: (Any, Any) -> Any\n- return await self._run_app(\n- scope, lambda: self.app(scope)(receive, send)\n- )\n+ def _run_asgi2(self, scope):\n+ # type: (Any) -> Any\n+ async def inner(receive, send):\n+ # type: (Any, Any) -> Any\n+ return await self._run_app(scope, lambda: self.app(scope)(receive, send))\n \n- return run_asgi2\n- else:\n- return self._run_app(scope, lambda: self.app(scope, receive, send))\n+ return inner\n+\n+ async def _run_asgi3(self, scope, receive, send):\n+ # type: (Any, Any, Any) -> Any\n+ return await self._run_app(scope, lambda: self.app(scope, receive, send))\n \n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n", "issue": "TypeError using SentryAsgiMiddleware with FastAPI/Starlette app\nI cannot get `SentryAsgiMiddleware` to work with our FastAPI app. We tried to follow the example in the [Sentry docs](https://docs.sentry.io/platforms/python/asgi/), so the app module basically looks like this:\r\n\r\n```python\r\nfrom fastapi import FastAPI\r\nfrom sentry_sdk.integrations.asgi import SentryAsgiMiddleware\r\n...\r\napp = FastAPI()\r\n\r\[email protected]()\r\n...\r\n\r\napp = SentryAsgiMiddleware(app)\r\n```\r\n\r\nThis gives an error on all requests, see the following stack trace:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 385, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/middleware/proxy_headers.py\", line 45, in __call__\r\n return await self.app(scope, receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/uvicorn/middleware/asgi2.py\", line 7, in __call__\r\n await instance(receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 54, in run_asgi2\r\n scope, lambda: self.app(scope)(receive, send)\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 93, in _run_app\r\n raise exc from None\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 90, in _run_app\r\n return await callback()\r\n File \"/usr/local/lib/python3.7/site-packages/sentry_sdk/integrations/asgi.py\", line 54, in <lambda>\r\n scope, lambda: self.app(scope)(receive, send)\r\nTypeError: __call__() missing 2 required positional arguments: 'receive' and 'send'\r\n```\r\n\r\nLibrary versions:\r\n\r\n- python==3.7.5\r\n- sentry-sdk==0.13.2\r\n- uvicorn==0.10.8\r\n- fastapi==0.42.0\r\n- starlette==0.12.9\n", "before_files": [{"content": "\"\"\"\nAn ASGI middleware.\n\nBased on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.\n\"\"\"\n\nimport functools\nimport urllib\n\nfrom sentry_sdk._types import MYPY\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations._wsgi_common import _filter_headers\nfrom sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function\nfrom sentry_sdk.tracing import Span\n\nif MYPY:\n from typing import Dict\n from typing import Any\n from typing import Optional\n\n from sentry_sdk._types import Event, Hint\n\n\n_asgi_middleware_applied = ContextVar(\"sentry_asgi_middleware_applied\")\n\n\ndef _capture_exception(hub, exc):\n # type: (Hub, Any) -> None\n\n # Check client here as it might have been unset while streaming response\n if hub.client is not None:\n event, hint = event_from_exception(\n exc,\n client_options=hub.client.options,\n mechanism={\"type\": \"asgi\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\nclass SentryAsgiMiddleware:\n __slots__ = (\"app\",)\n\n def __init__(self, app):\n # type: (Any) -> None\n self.app = app\n\n def __call__(self, scope, receive=None, send=None):\n # type: (Any, Any, Any) -> Any\n if receive is None or send is None:\n\n async def run_asgi2(receive, send):\n # type: (Any, Any) -> Any\n return await self._run_app(\n scope, lambda: self.app(scope)(receive, send)\n )\n\n return run_asgi2\n else:\n return self._run_app(scope, lambda: self.app(scope, receive, send))\n\n async def _run_app(self, scope, callback):\n # type: (Any, Any) -> Any\n if _asgi_middleware_applied.get(False):\n return await callback()\n\n _asgi_middleware_applied.set(True)\n try:\n hub = Hub(Hub.current)\n with hub:\n with hub.configure_scope() as sentry_scope:\n sentry_scope.clear_breadcrumbs()\n sentry_scope._name = \"asgi\"\n processor = functools.partial(\n self.event_processor, asgi_scope=scope\n )\n sentry_scope.add_event_processor(processor)\n\n if scope[\"type\"] in (\"http\", \"websocket\"):\n span = Span.continue_from_headers(dict(scope[\"headers\"]))\n span.op = \"{}.server\".format(scope[\"type\"])\n else:\n span = Span()\n span.op = \"asgi.server\"\n\n span.set_tag(\"asgi.type\", scope[\"type\"])\n span.transaction = \"generic ASGI request\"\n\n with hub.start_span(span) as span:\n try:\n return await callback()\n except Exception as exc:\n _capture_exception(hub, exc)\n raise exc from None\n finally:\n _asgi_middleware_applied.set(False)\n\n def event_processor(self, event, hint, asgi_scope):\n # type: (Event, Hint, Any) -> Optional[Event]\n request_info = event.get(\"request\", {})\n\n if asgi_scope[\"type\"] in (\"http\", \"websocket\"):\n request_info[\"url\"] = self.get_url(asgi_scope)\n request_info[\"method\"] = asgi_scope[\"method\"]\n request_info[\"headers\"] = _filter_headers(self.get_headers(asgi_scope))\n request_info[\"query_string\"] = self.get_query(asgi_scope)\n\n if asgi_scope.get(\"client\") and _should_send_default_pii():\n request_info[\"env\"] = {\"REMOTE_ADDR\": asgi_scope[\"client\"][0]}\n\n if asgi_scope.get(\"endpoint\"):\n # Webframeworks like Starlette mutate the ASGI env once routing is\n # done, which is sometime after the request has started. If we have\n # an endpoint, overwrite our path-based transaction name.\n event[\"transaction\"] = self.get_transaction(asgi_scope)\n\n event[\"request\"] = request_info\n\n return event\n\n def get_url(self, scope):\n # type: (Any) -> str\n \"\"\"\n Extract URL from the ASGI scope, without also including the querystring.\n \"\"\"\n scheme = scope.get(\"scheme\", \"http\")\n server = scope.get(\"server\", None)\n path = scope.get(\"root_path\", \"\") + scope[\"path\"]\n\n for key, value in scope[\"headers\"]:\n if key == b\"host\":\n host_header = value.decode(\"latin-1\")\n return \"%s://%s%s\" % (scheme, host_header, path)\n\n if server is not None:\n host, port = server\n default_port = {\"http\": 80, \"https\": 443, \"ws\": 80, \"wss\": 443}[scheme]\n if port != default_port:\n return \"%s://%s:%s%s\" % (scheme, host, port, path)\n return \"%s://%s%s\" % (scheme, host, path)\n return path\n\n def get_query(self, scope):\n # type: (Any) -> Any\n \"\"\"\n Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n return urllib.parse.unquote(scope[\"query_string\"].decode(\"latin-1\"))\n\n def get_headers(self, scope):\n # type: (Any) -> Dict[str, Any]\n \"\"\"\n Extract headers from the ASGI scope, in the format that the Sentry protocol expects.\n \"\"\"\n headers = {} # type: Dict[str, str]\n for raw_key, raw_value in scope[\"headers\"]:\n key = raw_key.decode(\"latin-1\")\n value = raw_value.decode(\"latin-1\")\n if key in headers:\n headers[key] = headers[key] + \", \" + value\n else:\n headers[key] = value\n return headers\n\n def get_transaction(self, scope):\n # type: (Any) -> Optional[str]\n \"\"\"\n Return a transaction string to identify the routed endpoint.\n \"\"\"\n return transaction_from_function(scope[\"endpoint\"])\n", "path": "sentry_sdk/integrations/asgi.py"}]}
| 2,826 | 690 |
gh_patches_debug_15745
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-874
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pwndbg gets stuck in GDB 10.1
<!--
Before reporting a new issue, make sure that we do not have any duplicates already open.
If there is one it might be good to take part in the discussion there.
Please make sure you have checked that the issue persists on LATEST pwndbg version.
Below is a template for BUG REPORTS.
Don't include it if this is a FEATURE REQUEST.
-->
### Description
pwndbg gets stuck in GDB 10.1
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
### Steps to reproduce

<!--
What do we have to do to reproduce the problem?
If this is connected to particular C/asm code,
please provide the smallest C code that reproduces the issue.
-->
### My setup
<!--
Show us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).
NOTE: We are currently supporting only Ubuntu installations.
It is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).
If you would like to change this situation - help us improving pwndbg and supporting other distros!
This can be displayed in pwndbg through `version` command.
If it is somehow unavailable, use:
* `show version` - for gdb
* `py import sys; print(sys.version)` - for python
* pwndbg version/git commit id
-->
```
Gdb: 10.1
Python: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]
Pwndbg: 1.1.0
Capstone: 4.0.1024
Unicorn: 1.0.2
```
</issue>
<code>
[start of pwndbg/heap/__init__.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import pwndbg.heap.heap
5 import pwndbg.symbol
6
7 current = None
8
9 heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
10
11 @pwndbg.events.new_objfile
12 def update():
13 import pwndbg.heap.dlmalloc
14 import pwndbg.heap.ptmalloc
15
16 global current
17
18
19 if pwndbg.symbol.address('ptmalloc_init'):
20 current = pwndbg.heap.ptmalloc.Heap()
21
22 else:
23 # Default to ptmalloc heap for now until
24 # there are more implementations
25 current = pwndbg.heap.ptmalloc.Heap()
26
[end of pwndbg/heap/__init__.py]
[start of pwndbg/heap/dlmalloc.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 import gdb
5
6 import pwndbg.events
7 import pwndbg.typeinfo
8
9
10 class Heap(pwndbg.heap.heap.BaseHeap):
11 pass
12
[end of pwndbg/heap/dlmalloc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py
--- a/pwndbg/heap/__init__.py
+++ b/pwndbg/heap/__init__.py
@@ -8,18 +8,8 @@
heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')
[email protected]_objfile
[email protected]
def update():
- import pwndbg.heap.dlmalloc
import pwndbg.heap.ptmalloc
-
global current
-
-
- if pwndbg.symbol.address('ptmalloc_init'):
- current = pwndbg.heap.ptmalloc.Heap()
-
- else:
- # Default to ptmalloc heap for now until
- # there are more implementations
- current = pwndbg.heap.ptmalloc.Heap()
+ current = pwndbg.heap.ptmalloc.Heap()
diff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py
deleted file mode 100644
--- a/pwndbg/heap/dlmalloc.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import gdb
-
-import pwndbg.events
-import pwndbg.typeinfo
-
-
-class Heap(pwndbg.heap.heap.BaseHeap):
- pass
|
{"golden_diff": "diff --git a/pwndbg/heap/__init__.py b/pwndbg/heap/__init__.py\n--- a/pwndbg/heap/__init__.py\n+++ b/pwndbg/heap/__init__.py\n@@ -8,18 +8,8 @@\n \n heap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n \[email protected]_objfile\[email protected]\n def update():\n- import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n-\n global current\n-\n-\n- if pwndbg.symbol.address('ptmalloc_init'):\n- current = pwndbg.heap.ptmalloc.Heap()\n-\n- else:\n- # Default to ptmalloc heap for now until\n- # there are more implementations\n- current = pwndbg.heap.ptmalloc.Heap()\n+ current = pwndbg.heap.ptmalloc.Heap()\ndiff --git a/pwndbg/heap/dlmalloc.py b/pwndbg/heap/dlmalloc.py\ndeleted file mode 100644\n--- a/pwndbg/heap/dlmalloc.py\n+++ /dev/null\n@@ -1,11 +0,0 @@\n-#!/usr/bin/env python\n-# -*- coding: utf-8 -*-\n-\n-import gdb\n-\n-import pwndbg.events\n-import pwndbg.typeinfo\n-\n-\n-class Heap(pwndbg.heap.heap.BaseHeap):\n- pass\n", "issue": "pwndbg gets stuck in GDB 10.1\n<!--\r\nBefore reporting a new issue, make sure that we do not have any duplicates already open.\r\nIf there is one it might be good to take part in the discussion there.\r\n\r\nPlease make sure you have checked that the issue persists on LATEST pwndbg version.\r\n\r\nBelow is a template for BUG REPORTS.\r\nDon't include it if this is a FEATURE REQUEST.\r\n-->\r\n\r\n\r\n### Description\r\npwndbg gets stuck in GDB 10.1\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n### Steps to reproduce\r\n\r\n\r\n<!--\r\nWhat do we have to do to reproduce the problem?\r\nIf this is connected to particular C/asm code, \r\nplease provide the smallest C code that reproduces the issue.\r\n-->\r\n\r\n### My setup\r\n\r\n<!--\r\nShow us your gdb/python/pwndbg/OS/IDA Pro version (depending on your case).\r\n\r\nNOTE: We are currently supporting only Ubuntu installations.\r\nIt is known that pwndbg is not fully working e.g. on Arch Linux (the heap stuff is not working there).\r\nIf you would like to change this situation - help us improving pwndbg and supporting other distros!\r\n\r\nThis can be displayed in pwndbg through `version` command.\r\n\r\nIf it is somehow unavailable, use:\r\n* `show version` - for gdb\r\n* `py import sys; print(sys.version)` - for python\r\n* pwndbg version/git commit id\r\n-->\r\n```\r\nGdb: 10.1\r\nPython: 3.8.6 (default, Sep 30 2020, 04:00:38) [GCC 10.2.0]\r\nPwndbg: 1.1.0\r\nCapstone: 4.0.1024\r\nUnicorn: 1.0.2\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport pwndbg.heap.heap\nimport pwndbg.symbol\n\ncurrent = None\n\nheap_chain_limit = pwndbg.config.Parameter('heap-dereference-limit', 8, 'number of bins to dereference')\n\[email protected]_objfile\ndef update():\n import pwndbg.heap.dlmalloc\n import pwndbg.heap.ptmalloc\n\n global current\n\n\n if pwndbg.symbol.address('ptmalloc_init'):\n current = pwndbg.heap.ptmalloc.Heap()\n\n else:\n # Default to ptmalloc heap for now until\n # there are more implementations\n current = pwndbg.heap.ptmalloc.Heap()\n", "path": "pwndbg/heap/__init__.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.typeinfo\n\n\nclass Heap(pwndbg.heap.heap.BaseHeap):\n pass\n", "path": "pwndbg/heap/dlmalloc.py"}]}
| 1,312 | 326 |
gh_patches_debug_20387
|
rasdani/github-patches
|
git_diff
|
unionai-oss__pandera-416
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
improve contribution instructions and expose in the docs
#### Location of the documentation
NA
#### Documentation problem
Currently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.
Additionally, we also want to document the process of contributing:
Enhancements (New features)
- create a `feature/<my-feature>` or `feature/<issue-number>` branch
- make a pull request to `dev`
Bugs
- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch
- make a pull request to `master`
Docs
- create a `docs/<my-docs>` or `docs/<issue-number>` branch
- make a pull request to `master`
</issue>
<code>
[start of docs/source/conf.py]
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # This file only contains a selection of the most common options. For a full
4 # list see the documentation:
5 # http://www.sphinx-doc.org/en/master/config
6
7 # -- Path setup --------------------------------------------------------------
8
9 import doctest
10 import logging as pylogging
11
12 # If extensions (or modules to document with autodoc) are in another directory,
13 # add these directories to sys.path here. If the directory is relative to the
14 # documentation root, use os.path.abspath to make it absolute, like shown here.
15 #
16 import os
17 import sys
18
19 from sphinx.util import logging
20
21 sys.path.insert(0, os.path.abspath("../../pandera"))
22
23
24 # -- Project information -----------------------------------------------------
25
26 project = "pandera"
27 copyright = "2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
28 author = "Niels Bantilan, Nigel Markey, Jean-Francois Zinque"
29
30
31 # -- General configuration ---------------------------------------------------
32
33 # Add any Sphinx extension module names here, as strings. They can be
34 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 # ones.
36 extensions = [
37 "sphinx.ext.autodoc",
38 "sphinx.ext.autosummary",
39 "sphinx.ext.intersphinx",
40 "sphinx.ext.doctest",
41 "sphinx_autodoc_typehints",
42 "sphinx.ext.viewcode",
43 ]
44
45 doctest_global_setup = """
46 import sys
47 import pandas as pd
48 import numpy as np
49 from packaging import version
50 pd.options.display.max_columns = None # For Travis on macOS
51 pd.options.display.max_rows = None # For Travis on macOS
52
53 try:
54 import hypothesis
55 except ImportError:
56 SKIP_STRATEGY = True
57 else:
58 SKIP_STRATEGY = False
59
60 SKIP = sys.version_info < (3, 6)
61 PY36 = sys.version_info < (3, 7)
62 SKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36
63 """
64
65 doctest_default_flags = (
66 0
67 | doctest.DONT_ACCEPT_TRUE_FOR_1
68 | doctest.ELLIPSIS
69 | doctest.IGNORE_EXCEPTION_DETAIL
70 | doctest.NORMALIZE_WHITESPACE
71 )
72
73 # Add any paths that contain templates here, relative to this directory.
74 templates_path = ["_templates"]
75
76 # The master toctree document.
77 master_doc = "index"
78
79 # List of patterns, relative to source directory, that match files and
80 # directories to ignore when looking for source files.
81 # This pattern also affects html_static_path and html_extra_path.
82 exclude_patterns = []
83
84 autoclass_content = "both"
85 pygments_style = None
86
87 autodoc_default_options = {
88 # 'special-members': '__call__',
89 "undoc-members": False,
90 # 'exclude-members': '__weakref__'
91 }
92
93 # -- Options for HTML output -------------------------------------------------
94
95 # The theme to use for HTML and HTML Help pages. See the documentation for
96 # a list of builtin themes.
97 #
98 html_theme = "sphinx_rtd_theme"
99
100 # Theme options are theme-specific and customize the look and feel of a theme
101 # further. For a list of options available for each theme, see the
102 # documentation.
103
104 html_logo = "_static/pandera-banner-white.png"
105 html_theme_options = {
106 "collapse_navigation": False,
107 "logo_only": True,
108 "analytics_id": "UA-71018060-2",
109 }
110
111 # Add any paths that contain custom static files (such as style sheets) here,
112 # relative to this directory. They are copied after the builtin static files,
113 # so a file named "default.css" will overwrite the builtin "default.css".
114 html_static_path = ["_static"]
115
116 rst_prolog = """
117 .. role:: red
118 .. role:: green
119 """
120
121 autosummary_generate = ["API_reference.rst"]
122 autosummary_filename_map = {
123 "pandera.Check": "pandera.Check",
124 "pandera.check": "pandera.check_decorator",
125 }
126
127 intersphinx_mapping = {
128 "python": ("https://docs.python.org/3/", None),
129 "numpy": ("https://docs.scipy.org/doc/numpy/", None),
130 "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
131 }
132
133
134 # this is a workaround to filter out forward reference issue in
135 # sphinx_autodoc_typehints
136 class FilterPandasTypeAnnotationWarning(pylogging.Filter):
137 def filter(self, record: pylogging.LogRecord) -> bool:
138 # You probably should make this check more specific by checking
139 # that dataclass name is in the message, so that you don't filter out
140 # other meaningful warnings
141 return not record.getMessage().startswith(
142 "Cannot resolve forward reference in type annotations of "
143 '"pandera.typing.DataFrame"'
144 )
145
146
147 logging.getLogger("sphinx_autodoc_typehints").logger.addFilter(
148 FilterPandasTypeAnnotationWarning()
149 )
150
151
152 def setup(app):
153 app.add_css_file("default.css")
154
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -14,6 +14,7 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
+import shutil
import sys
from sphinx.util import logging
@@ -40,6 +41,7 @@
"sphinx.ext.doctest",
"sphinx_autodoc_typehints",
"sphinx.ext.viewcode",
+ "recommonmark",
]
doctest_global_setup = """
@@ -70,6 +72,19 @@
| doctest.NORMALIZE_WHITESPACE
)
+source_suffix = {
+ ".rst": "restructuredtext",
+ ".md": "markdown",
+}
+
+# copy CONTRIBUTING.md docs into source directory
+shutil.copyfile(
+ os.path.join(
+ os.path.dirname(__file__), "..", "..", ".github", "CONTRIBUTING.md"
+ ),
+ "CONTRIBUTING.md",
+)
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -14,6 +14,7 @@\n # documentation root, use os.path.abspath to make it absolute, like shown here.\n #\n import os\n+import shutil\n import sys\n \n from sphinx.util import logging\n@@ -40,6 +41,7 @@\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n+ \"recommonmark\",\n ]\n \n doctest_global_setup = \"\"\"\n@@ -70,6 +72,19 @@\n | doctest.NORMALIZE_WHITESPACE\n )\n \n+source_suffix = {\n+ \".rst\": \"restructuredtext\",\n+ \".md\": \"markdown\",\n+}\n+\n+# copy CONTRIBUTING.md docs into source directory\n+shutil.copyfile(\n+ os.path.join(\n+ os.path.dirname(__file__), \"..\", \"..\", \".github\", \"CONTRIBUTING.md\"\n+ ),\n+ \"CONTRIBUTING.md\",\n+)\n+\n # Add any paths that contain templates here, relative to this directory.\n templates_path = [\"_templates\"]\n", "issue": "improve contribution instructions and expose in the docs\n#### Location of the documentation\r\n\r\nNA\r\n\r\n#### Documentation problem\r\n\r\nCurrently, the contribution documentation is in [github](https://github.com/pandera-dev/pandera/blob/master/.github/CONTRIBUTING.md) but it would be nice to also expose it in the online documentation.\r\n\r\nAdditionally, we also want to document the process of contributing:\r\n\r\nEnhancements (New features)\r\n- create a `feature/<my-feature>` or `feature/<issue-number>` branch\r\n- make a pull request to `dev`\r\n\r\nBugs\r\n- create a `bugfix/<my-fix>` or `bugfix/<issue-number>` branch\r\n- make a pull request to `master`\r\n\r\nDocs\r\n- create a `docs/<my-docs>` or `docs/<issue-number>` branch\r\n- make a pull request to `master`\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\nimport doctest\nimport logging as pylogging\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nfrom sphinx.util import logging\n\nsys.path.insert(0, os.path.abspath(\"../../pandera\"))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pandera\"\ncopyright = \"2019, Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\nauthor = \"Niels Bantilan, Nigel Markey, Jean-Francois Zinque\"\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.doctest\",\n \"sphinx_autodoc_typehints\",\n \"sphinx.ext.viewcode\",\n]\n\ndoctest_global_setup = \"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom packaging import version\npd.options.display.max_columns = None # For Travis on macOS\npd.options.display.max_rows = None # For Travis on macOS\n\ntry:\n import hypothesis\nexcept ImportError:\n SKIP_STRATEGY = True\nelse:\n SKIP_STRATEGY = False\n\nSKIP = sys.version_info < (3, 6)\nPY36 = sys.version_info < (3, 7)\nSKIP_PANDAS_LT_V1 = version.parse(pd.__version__).release < (1, 0) or PY36\n\"\"\"\n\ndoctest_default_flags = (\n 0\n | doctest.DONT_ACCEPT_TRUE_FOR_1\n | doctest.ELLIPSIS\n | doctest.IGNORE_EXCEPTION_DETAIL\n | doctest.NORMALIZE_WHITESPACE\n)\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\nautoclass_content = \"both\"\npygments_style = None\n\nautodoc_default_options = {\n # 'special-members': '__call__',\n \"undoc-members\": False,\n # 'exclude-members': '__weakref__'\n}\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_logo = \"_static/pandera-banner-white.png\"\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"logo_only\": True,\n \"analytics_id\": \"UA-71018060-2\",\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\nrst_prolog = \"\"\"\n.. role:: red\n.. role:: green\n\"\"\"\n\nautosummary_generate = [\"API_reference.rst\"]\nautosummary_filename_map = {\n \"pandera.Check\": \"pandera.Check\",\n \"pandera.check\": \"pandera.check_decorator\",\n}\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n}\n\n\n# this is a workaround to filter out forward reference issue in\n# sphinx_autodoc_typehints\nclass FilterPandasTypeAnnotationWarning(pylogging.Filter):\n def filter(self, record: pylogging.LogRecord) -> bool:\n # You probably should make this check more specific by checking\n # that dataclass name is in the message, so that you don't filter out\n # other meaningful warnings\n return not record.getMessage().startswith(\n \"Cannot resolve forward reference in type annotations of \"\n '\"pandera.typing.DataFrame\"'\n )\n\n\nlogging.getLogger(\"sphinx_autodoc_typehints\").logger.addFilter(\n FilterPandasTypeAnnotationWarning()\n)\n\n\ndef setup(app):\n app.add_css_file(\"default.css\")\n", "path": "docs/source/conf.py"}]}
| 2,170 | 259 |
gh_patches_debug_14934
|
rasdani/github-patches
|
git_diff
|
hylang__hy-932
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
Multiple implementations overwrite each others' entrypoint hooks
It's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.
Sorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice shall be included in
12 # all copies or substantial portions of the Software.
13 #
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 # DEALINGS IN THE SOFTWARE.
21
22 import os
23 import re
24 import sys
25
26 from setuptools import find_packages, setup
27
28 PKG = "hy"
29 VERSIONFILE = os.path.join(PKG, "version.py")
30 verstr = "unknown"
31 try:
32 verstrline = open(VERSIONFILE, "rt").read()
33 except EnvironmentError:
34 pass # Okay, there is no version file.
35 else:
36 VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
37 mo = re.search(VSRE, verstrline, re.M)
38 if mo:
39 __version__ = mo.group(1)
40 else:
41 msg = "if %s.py exists, it is required to be well-formed" % VERSIONFILE
42 raise RuntimeError(msg)
43
44 long_description = """Hy is a Python <--> Lisp layer. It helps
45 make things work nicer, and lets Python and the Hy lisp variant play
46 nice together. """
47
48 install_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']
49 if sys.version_info[:2] < (2, 7):
50 install_requires.append('argparse>=1.2.1')
51 install_requires.append('importlib>=1.0.2')
52 if os.name == 'nt':
53 install_requires.append('pyreadline==2.0')
54
55 setup(
56 name=PKG,
57 version=__version__,
58 install_requires=install_requires,
59 entry_points={
60 'console_scripts': [
61 'hy = hy.cmdline:hy_main',
62 'hyc = hy.cmdline:hyc_main',
63 'hy2py = hy.cmdline:hy2py_main',
64 ]
65 },
66 packages=find_packages(exclude=['tests*']),
67 package_data={
68 'hy.contrib': ['*.hy'],
69 'hy.core': ['*.hy'],
70 },
71 author="Paul Tagliamonte",
72 author_email="[email protected]",
73 long_description=long_description,
74 description='Lisp and Python love each other.',
75 license="Expat",
76 url="http://hylang.org/",
77 platforms=['any'],
78 classifiers=[
79 "Development Status :: 4 - Beta",
80 "Intended Audience :: Developers",
81 "License :: DFSG approved",
82 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh.
83 "Operating System :: OS Independent",
84 "Programming Language :: Lisp",
85 "Programming Language :: Python",
86 "Programming Language :: Python :: 2",
87 "Programming Language :: Python :: 2.6",
88 "Programming Language :: Python :: 2.7",
89 "Programming Language :: Python :: 3",
90 "Programming Language :: Python :: 3.3",
91 "Programming Language :: Python :: 3.4",
92 "Topic :: Software Development :: Code Generators",
93 "Topic :: Software Development :: Compilers",
94 "Topic :: Software Development :: Libraries",
95 ]
96 )
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -52,6 +52,8 @@
if os.name == 'nt':
install_requires.append('pyreadline==2.0')
+ver = sys.version_info[0]
+
setup(
name=PKG,
version=__version__,
@@ -59,8 +61,11 @@
entry_points={
'console_scripts': [
'hy = hy.cmdline:hy_main',
+ 'hy%d = hy.cmdline:hy_main' % ver,
'hyc = hy.cmdline:hyc_main',
+ 'hyc%d = hy.cmdline:hyc_main' % ver,
'hy2py = hy.cmdline:hy2py_main',
+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,
]
},
packages=find_packages(exclude=['tests*']),
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,6 +52,8 @@\n if os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n \n+ver = sys.version_info[0]\n+\n setup(\n name=PKG,\n version=__version__,\n@@ -59,8 +61,11 @@\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n+ 'hy%d = hy.cmdline:hy_main' % ver,\n 'hyc = hy.cmdline:hyc_main',\n+ 'hyc%d = hy.cmdline:hyc_main' % ver,\n 'hy2py = hy.cmdline:hy2py_main',\n+ 'hy2py%d = hy.cmdline:hy2py_main' % ver,\n ]\n },\n packages=find_packages(exclude=['tests*']),\n", "issue": "Multiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\nMultiple implementations overwrite each others' entrypoint hooks\nIt's not uncommon to find Python 2 and Python 3 coexisting on the same system, and it's not unreasonable for the user to also want Hy for both Pythons. However, where Python handles this gracefully by providing separate executables (`python2` and `python3` alongside `python`, `pip2` and `pip3` alongside `pip` etc) Hy simply bulldozes an existing `hy`, `hyc` etc.\n\nSorry if this has already been reported; I tried to look for previous issues but nothing much came to mind when I was trying to play guess-the-keyword.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) 2012, 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nPKG = \"hy\"\nVERSIONFILE = os.path.join(PKG, \"version.py\")\nverstr = \"unknown\"\ntry:\n verstrline = open(VERSIONFILE, \"rt\").read()\nexcept EnvironmentError:\n pass # Okay, there is no version file.\nelse:\n VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n mo = re.search(VSRE, verstrline, re.M)\n if mo:\n __version__ = mo.group(1)\n else:\n msg = \"if %s.py exists, it is required to be well-formed\" % VERSIONFILE\n raise RuntimeError(msg)\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\ninstall_requires = ['rply>=0.7.0', 'astor>=0.5', 'clint>=0.4']\nif sys.version_info[:2] < (2, 7):\n install_requires.append('argparse>=1.2.1')\n install_requires.append('importlib>=1.0.2')\nif os.name == 'nt':\n install_requires.append('pyreadline==2.0')\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy.contrib': ['*.hy'],\n 'hy.core': ['*.hy'],\n },\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}]}
| 1,866 | 203 |
gh_patches_debug_3001
|
rasdani/github-patches
|
git_diff
|
Bitmessage__PyBitmessage-726
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Trouble sending on multicor machines on 0.4.4
I've seen this on both an OSX box (8 cores) and a linux box (4 cores). I was only able to do the full reproducible on linux, as my `keys.dat` file prevented me from going back to 0.4.3 on the OSX box.
1. Check out v0.4.3.
2. Open top
3. Open bitmessage.
4. Send a message.
5. Processes will start up for each core in top to calculate the PoW more quickly. Message will send.
6. Close bitmessage.
7. Check out `ProtoV3`
8. Send a message.
9. Processes will fire up in top. They'll consume 100% cpu for a few minutes. One by one, the CPU usage on each process will drop to zero.
10. The bitmessage app will still say that we're doing work to calculate the PoW. The message never sends.
</issue>
<code>
[start of src/bitmessagemain.py]
1 #!/usr/bin/env python2.7
2 # Copyright (c) 2012 Jonathan Warren
3 # Copyright (c) 2012 The Bitmessage developers
4 # Distributed under the MIT/X11 software license. See the accompanying
5 # file COPYING or http://www.opensource.org/licenses/mit-license.php.
6
7 # Right now, PyBitmessage only support connecting to stream 1. It doesn't
8 # yet contain logic to expand into further streams.
9
10 # The software version variable is now held in shared.py
11
12
13 import sys
14 #Version check
15 #Older versions of Python don't support the print function while Python 3 doesn't
16 #like the print statement, so we use sys.stdout for the version check. After this
17 #check we can then use the print function in the remainder of this file. Currently
18 #in order to use logging, a lot of unnecessary code needs to be executed which could
19 #potentially render this version check useless. So logging won't be used here until
20 #there is a more efficient way to configure logging
21 if sys.hexversion >= 0x3000000:
22 msg = "PyBitmessage does not support Python 3. Python 2.7.3 or later is required. Your version: %s" % sys.version
23 #logger.critical(msg)
24 sys.stdout.write(msg)
25 sys.exit(0)
26 if sys.hexversion < 0x20703F0:
27 msg = "You should use Python 2.7.3 or greater (but not Python 3). Your version: %s" % sys.version
28 #logger.critical(msg)
29 sys.stdout.write(msg)
30 sys.exit(0)
31
32 import signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.
33 # The next 3 are used for the API
34 import singleton
35 import os
36 import socket
37 import ctypes
38 from struct import pack
39
40 from SimpleXMLRPCServer import SimpleXMLRPCServer
41 from api import MySimpleXMLRPCRequestHandler
42 from helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections
43
44 import shared
45 from helper_sql import sqlQuery
46 import threading
47
48 # Classes
49 #from helper_sql import *
50 #from class_sqlThread import *
51 from class_sqlThread import sqlThread
52 from class_singleCleaner import singleCleaner
53 #from class_singleWorker import *
54 from class_objectProcessor import objectProcessor
55 from class_outgoingSynSender import outgoingSynSender
56 from class_singleListener import singleListener
57 from class_singleWorker import singleWorker
58 #from class_addressGenerator import *
59 from class_addressGenerator import addressGenerator
60 from debug import logger
61
62 # Helper Functions
63 import helper_bootstrap
64 import helper_generic
65
66 from subprocess import call
67 import time
68
69
70 def connectToStream(streamNumber):
71 shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'
72 selfInitiatedConnections[streamNumber] = {}
73 shared.inventorySets[streamNumber] = set()
74 queryData = sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''', streamNumber)
75 for row in queryData:
76 shared.inventorySets[streamNumber].add(row[0])
77
78
79 if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():
80 # Some XP and Vista systems can only have 10 outgoing connections at a time.
81 maximumNumberOfHalfOpenConnections = 9
82 else:
83 maximumNumberOfHalfOpenConnections = 64
84 for i in range(maximumNumberOfHalfOpenConnections):
85 a = outgoingSynSender()
86 a.setup(streamNumber, selfInitiatedConnections)
87 a.start()
88
89 def _fixWinsock():
90 if not ('win32' in sys.platform) and not ('win64' in sys.platform):
91 return
92
93 # Python 2 on Windows doesn't define a wrapper for
94 # socket.inet_ntop but we can make one ourselves using ctypes
95 if not hasattr(socket, 'inet_ntop'):
96 addressToString = ctypes.windll.ws2_32.WSAAddressToStringA
97 def inet_ntop(family, host):
98 if family == socket.AF_INET:
99 if len(host) != 4:
100 raise ValueError("invalid IPv4 host")
101 host = pack("hH4s8s", socket.AF_INET, 0, host, "\0" * 8)
102 elif family == socket.AF_INET6:
103 if len(host) != 16:
104 raise ValueError("invalid IPv6 host")
105 host = pack("hHL16sL", socket.AF_INET6, 0, 0, host, 0)
106 else:
107 raise ValueError("invalid address family")
108 buf = "\0" * 64
109 lengthBuf = pack("I", len(buf))
110 addressToString(host, len(host), None, buf, lengthBuf)
111 return buf[0:buf.index("\0")]
112 socket.inet_ntop = inet_ntop
113
114 # Same for inet_pton
115 if not hasattr(socket, 'inet_pton'):
116 stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
117 def inet_pton(family, host):
118 buf = "\0" * 28
119 lengthBuf = pack("I", len(buf))
120 if stringToAddress(str(host),
121 int(family),
122 None,
123 buf,
124 lengthBuf) != 0:
125 raise socket.error("illegal IP address passed to inet_pton")
126 if family == socket.AF_INET:
127 return buf[4:8]
128 elif family == socket.AF_INET6:
129 return buf[8:24]
130 else:
131 raise ValueError("invalid address family")
132 socket.inet_pton = inet_pton
133
134 # These sockopts are needed on for IPv6 support
135 if not hasattr(socket, 'IPPROTO_IPV6'):
136 socket.IPPROTO_IPV6 = 41
137 if not hasattr(socket, 'IPV6_V6ONLY'):
138 socket.IPV6_V6ONLY = 27
139
140 # This thread, of which there is only one, runs the API.
141 class singleAPI(threading.Thread):
142
143 def __init__(self):
144 threading.Thread.__init__(self)
145
146 def run(self):
147 se = SimpleXMLRPCServer((shared.config.get('bitmessagesettings', 'apiinterface'), shared.config.getint(
148 'bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)
149 se.register_introspection_functions()
150 se.serve_forever()
151
152 # This is a list of current connections (the thread pointers at least)
153 selfInitiatedConnections = {}
154
155 if shared.useVeryEasyProofOfWorkForTesting:
156 shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(
157 shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)
158 shared.networkDefaultPayloadLengthExtraBytes = int(
159 shared.networkDefaultPayloadLengthExtraBytes / 7000)
160
161 class Main:
162 def start(self, daemon=False):
163 _fixWinsock()
164
165 shared.daemon = daemon
166 # is the application already running? If yes then exit.
167 thisapp = singleton.singleinstance()
168
169 # get curses flag
170 curses = False
171 if '-c' in sys.argv:
172 curses = True
173
174 signal.signal(signal.SIGINT, helper_generic.signal_handler)
175 signal.signal(signal.SIGTERM, helper_generic.signal_handler)
176 # signal.signal(signal.SIGINT, signal.SIG_DFL)
177
178 helper_bootstrap.knownNodes()
179 # Start the address generation thread
180 addressGeneratorThread = addressGenerator()
181 addressGeneratorThread.daemon = True # close the main program even if there are threads left
182 addressGeneratorThread.start()
183
184 # Start the thread that calculates POWs
185 singleWorkerThread = singleWorker()
186 singleWorkerThread.daemon = True # close the main program even if there are threads left
187 singleWorkerThread.start()
188
189 # Start the SQL thread
190 sqlLookup = sqlThread()
191 sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.
192 sqlLookup.start()
193
194 # Start the thread that calculates POWs
195 objectProcessorThread = objectProcessor()
196 objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object.
197 objectProcessorThread.start()
198
199 # Start the cleanerThread
200 singleCleanerThread = singleCleaner()
201 singleCleanerThread.daemon = True # close the main program even if there are threads left
202 singleCleanerThread.start()
203
204 shared.reloadMyAddressHashes()
205 shared.reloadBroadcastSendersForWhichImWatching()
206
207 if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
208 try:
209 apiNotifyPath = shared.config.get(
210 'bitmessagesettings', 'apinotifypath')
211 except:
212 apiNotifyPath = ''
213 if apiNotifyPath != '':
214 with shared.printLock:
215 print('Trying to call', apiNotifyPath)
216
217 call([apiNotifyPath, "startingUp"])
218 singleAPIThread = singleAPI()
219 singleAPIThread.daemon = True # close the main program even if there are threads left
220 singleAPIThread.start()
221
222 connectToStream(1)
223
224 singleListenerThread = singleListener()
225 singleListenerThread.setup(selfInitiatedConnections)
226 singleListenerThread.daemon = True # close the main program even if there are threads left
227 singleListenerThread.start()
228
229 if daemon == False and shared.safeConfigGetBoolean('bitmessagesettings', 'daemon') == False:
230 if curses == False:
231 try:
232 from PyQt4 import QtCore, QtGui
233 except Exception as err:
234 print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \'PyQt Download\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon')
235 print('Error message:', err)
236 print('You can also run PyBitmessage with the new curses interface by providing \'-c\' as a commandline argument.')
237 os._exit(0)
238
239 import bitmessageqt
240 bitmessageqt.run()
241 else:
242 print('Running with curses')
243 import bitmessagecurses
244 bitmessagecurses.runwrapper()
245 else:
246 shared.config.remove_option('bitmessagesettings', 'dontconnect')
247
248 if daemon:
249 with shared.printLock:
250 print('Running as a daemon. The main program should exit this thread.')
251 else:
252 with shared.printLock:
253 print('Running as a daemon. You can use Ctrl+C to exit.')
254 while True:
255 time.sleep(20)
256
257 def stop(self):
258 with shared.printLock:
259 print('Stopping Bitmessage Deamon.')
260 shared.doCleanShutdown()
261
262
263 #TODO: nice function but no one is using this
264 def getApiAddress(self):
265 if not shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):
266 return None
267 address = shared.config.get('bitmessagesettings', 'apiinterface')
268 port = shared.config.getint('bitmessagesettings', 'apiport')
269 return {'address':address,'port':port}
270
271 if __name__ == "__main__":
272 mainprogram = Main()
273 mainprogram.start()
274
275
276 # So far, the creation of and management of the Bitmessage protocol and this
277 # client is a one-man operation. Bitcoin tips are quite appreciated.
278 # 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u
279
[end of src/bitmessagemain.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py
--- a/src/bitmessagemain.py
+++ b/src/bitmessagemain.py
@@ -172,7 +172,6 @@
curses = True
signal.signal(signal.SIGINT, helper_generic.signal_handler)
- signal.signal(signal.SIGTERM, helper_generic.signal_handler)
# signal.signal(signal.SIGINT, signal.SIG_DFL)
helper_bootstrap.knownNodes()
|
{"golden_diff": "diff --git a/src/bitmessagemain.py b/src/bitmessagemain.py\n--- a/src/bitmessagemain.py\n+++ b/src/bitmessagemain.py\n@@ -172,7 +172,6 @@\n curses = True\n \n signal.signal(signal.SIGINT, helper_generic.signal_handler)\n- signal.signal(signal.SIGTERM, helper_generic.signal_handler)\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n \n helper_bootstrap.knownNodes()\n", "issue": "Trouble sending on multicor machines on 0.4.4\nI've seen this on both an OSX box (8 cores) and a linux box (4 cores). I was only able to do the full reproducible on linux, as my `keys.dat` file prevented me from going back to 0.4.3 on the OSX box.\n1. Check out v0.4.3.\n2. Open top\n3. Open bitmessage.\n4. Send a message.\n5. Processes will start up for each core in top to calculate the PoW more quickly. Message will send.\n6. Close bitmessage.\n7. Check out `ProtoV3`\n8. Send a message.\n9. Processes will fire up in top. They'll consume 100% cpu for a few minutes. One by one, the CPU usage on each process will drop to zero.\n10. The bitmessage app will still say that we're doing work to calculate the PoW. The message never sends.\n\n", "before_files": [{"content": "#!/usr/bin/env python2.7\n# Copyright (c) 2012 Jonathan Warren\n# Copyright (c) 2012 The Bitmessage developers\n# Distributed under the MIT/X11 software license. See the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\n# Right now, PyBitmessage only support connecting to stream 1. It doesn't\n# yet contain logic to expand into further streams.\n\n# The software version variable is now held in shared.py\n\n\nimport sys\n#Version check\n#Older versions of Python don't support the print function while Python 3 doesn't\n#like the print statement, so we use sys.stdout for the version check. After this\n#check we can then use the print function in the remainder of this file. Currently\n#in order to use logging, a lot of unnecessary code needs to be executed which could\n#potentially render this version check useless. So logging won't be used here until\n#there is a more efficient way to configure logging\nif sys.hexversion >= 0x3000000:\n msg = \"PyBitmessage does not support Python 3. Python 2.7.3 or later is required. Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\nif sys.hexversion < 0x20703F0:\n msg = \"You should use Python 2.7.3 or greater (but not Python 3). Your version: %s\" % sys.version\n #logger.critical(msg)\n sys.stdout.write(msg)\n sys.exit(0)\n\nimport signal # Used to capture a Ctrl-C keypress so that Bitmessage can shutdown gracefully.\n# The next 3 are used for the API\nimport singleton\nimport os\nimport socket\nimport ctypes\nfrom struct import pack\n\nfrom SimpleXMLRPCServer import SimpleXMLRPCServer\nfrom api import MySimpleXMLRPCRequestHandler\nfrom helper_startup import isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections\n\nimport shared\nfrom helper_sql import sqlQuery\nimport threading\n\n# Classes\n#from helper_sql import *\n#from class_sqlThread import *\nfrom class_sqlThread import sqlThread\nfrom class_singleCleaner import singleCleaner\n#from class_singleWorker import *\nfrom class_objectProcessor import objectProcessor\nfrom class_outgoingSynSender import outgoingSynSender\nfrom class_singleListener import singleListener\nfrom class_singleWorker import singleWorker\n#from class_addressGenerator import *\nfrom class_addressGenerator import addressGenerator\nfrom debug import logger\n\n# Helper Functions\nimport helper_bootstrap\nimport helper_generic\n\nfrom subprocess import call\nimport time\n \n\ndef connectToStream(streamNumber):\n shared.streamsInWhichIAmParticipating[streamNumber] = 'no data'\n selfInitiatedConnections[streamNumber] = {}\n shared.inventorySets[streamNumber] = set()\n queryData = sqlQuery('''SELECT hash FROM inventory WHERE streamnumber=?''', streamNumber)\n for row in queryData:\n shared.inventorySets[streamNumber].add(row[0])\n\n \n if isOurOperatingSystemLimitedToHavingVeryFewHalfOpenConnections():\n # Some XP and Vista systems can only have 10 outgoing connections at a time.\n maximumNumberOfHalfOpenConnections = 9\n else:\n maximumNumberOfHalfOpenConnections = 64\n for i in range(maximumNumberOfHalfOpenConnections):\n a = outgoingSynSender()\n a.setup(streamNumber, selfInitiatedConnections)\n a.start()\n\ndef _fixWinsock():\n if not ('win32' in sys.platform) and not ('win64' in sys.platform):\n return\n\n # Python 2 on Windows doesn't define a wrapper for\n # socket.inet_ntop but we can make one ourselves using ctypes\n if not hasattr(socket, 'inet_ntop'):\n addressToString = ctypes.windll.ws2_32.WSAAddressToStringA\n def inet_ntop(family, host):\n if family == socket.AF_INET:\n if len(host) != 4:\n raise ValueError(\"invalid IPv4 host\")\n host = pack(\"hH4s8s\", socket.AF_INET, 0, host, \"\\0\" * 8)\n elif family == socket.AF_INET6:\n if len(host) != 16:\n raise ValueError(\"invalid IPv6 host\")\n host = pack(\"hHL16sL\", socket.AF_INET6, 0, 0, host, 0)\n else:\n raise ValueError(\"invalid address family\")\n buf = \"\\0\" * 64\n lengthBuf = pack(\"I\", len(buf))\n addressToString(host, len(host), None, buf, lengthBuf)\n return buf[0:buf.index(\"\\0\")]\n socket.inet_ntop = inet_ntop\n\n # Same for inet_pton\n if not hasattr(socket, 'inet_pton'):\n stringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA\n def inet_pton(family, host):\n buf = \"\\0\" * 28\n lengthBuf = pack(\"I\", len(buf))\n if stringToAddress(str(host),\n int(family),\n None,\n buf,\n lengthBuf) != 0:\n raise socket.error(\"illegal IP address passed to inet_pton\")\n if family == socket.AF_INET:\n return buf[4:8]\n elif family == socket.AF_INET6:\n return buf[8:24]\n else:\n raise ValueError(\"invalid address family\")\n socket.inet_pton = inet_pton\n\n # These sockopts are needed on for IPv6 support\n if not hasattr(socket, 'IPPROTO_IPV6'):\n socket.IPPROTO_IPV6 = 41\n if not hasattr(socket, 'IPV6_V6ONLY'):\n socket.IPV6_V6ONLY = 27\n\n# This thread, of which there is only one, runs the API.\nclass singleAPI(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n\n def run(self):\n se = SimpleXMLRPCServer((shared.config.get('bitmessagesettings', 'apiinterface'), shared.config.getint(\n 'bitmessagesettings', 'apiport')), MySimpleXMLRPCRequestHandler, True, True)\n se.register_introspection_functions()\n se.serve_forever()\n\n# This is a list of current connections (the thread pointers at least)\nselfInitiatedConnections = {}\n\nif shared.useVeryEasyProofOfWorkForTesting:\n shared.networkDefaultProofOfWorkNonceTrialsPerByte = int(\n shared.networkDefaultProofOfWorkNonceTrialsPerByte / 16)\n shared.networkDefaultPayloadLengthExtraBytes = int(\n shared.networkDefaultPayloadLengthExtraBytes / 7000)\n\nclass Main:\n def start(self, daemon=False):\n _fixWinsock()\n\n shared.daemon = daemon\n # is the application already running? If yes then exit.\n thisapp = singleton.singleinstance()\n\n # get curses flag\n curses = False\n if '-c' in sys.argv:\n curses = True\n\n signal.signal(signal.SIGINT, helper_generic.signal_handler)\n signal.signal(signal.SIGTERM, helper_generic.signal_handler)\n # signal.signal(signal.SIGINT, signal.SIG_DFL)\n\n helper_bootstrap.knownNodes()\n # Start the address generation thread\n addressGeneratorThread = addressGenerator()\n addressGeneratorThread.daemon = True # close the main program even if there are threads left\n addressGeneratorThread.start()\n\n # Start the thread that calculates POWs\n singleWorkerThread = singleWorker()\n singleWorkerThread.daemon = True # close the main program even if there are threads left\n singleWorkerThread.start()\n\n # Start the SQL thread\n sqlLookup = sqlThread()\n sqlLookup.daemon = False # DON'T close the main program even if there are threads left. The closeEvent should command this thread to exit gracefully.\n sqlLookup.start()\n\n # Start the thread that calculates POWs\n objectProcessorThread = objectProcessor()\n objectProcessorThread.daemon = False # DON'T close the main program even the thread remains. This thread checks the shutdown variable after processing each object.\n objectProcessorThread.start()\n\n # Start the cleanerThread\n singleCleanerThread = singleCleaner()\n singleCleanerThread.daemon = True # close the main program even if there are threads left\n singleCleanerThread.start()\n\n shared.reloadMyAddressHashes()\n shared.reloadBroadcastSendersForWhichImWatching()\n\n if shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n try:\n apiNotifyPath = shared.config.get(\n 'bitmessagesettings', 'apinotifypath')\n except:\n apiNotifyPath = ''\n if apiNotifyPath != '':\n with shared.printLock:\n print('Trying to call', apiNotifyPath)\n\n call([apiNotifyPath, \"startingUp\"])\n singleAPIThread = singleAPI()\n singleAPIThread.daemon = True # close the main program even if there are threads left\n singleAPIThread.start()\n\n connectToStream(1)\n\n singleListenerThread = singleListener()\n singleListenerThread.setup(selfInitiatedConnections)\n singleListenerThread.daemon = True # close the main program even if there are threads left\n singleListenerThread.start()\n\n if daemon == False and shared.safeConfigGetBoolean('bitmessagesettings', 'daemon') == False:\n if curses == False:\n try:\n from PyQt4 import QtCore, QtGui\n except Exception as err:\n print('PyBitmessage requires PyQt unless you want to run it as a daemon and interact with it using the API. You can download PyQt from http://www.riverbankcomputing.com/software/pyqt/download or by searching Google for \\'PyQt Download\\'. If you want to run in daemon mode, see https://bitmessage.org/wiki/Daemon')\n print('Error message:', err)\n print('You can also run PyBitmessage with the new curses interface by providing \\'-c\\' as a commandline argument.')\n os._exit(0)\n\n import bitmessageqt\n bitmessageqt.run()\n else:\n print('Running with curses')\n import bitmessagecurses\n bitmessagecurses.runwrapper()\n else:\n shared.config.remove_option('bitmessagesettings', 'dontconnect')\n\n if daemon:\n with shared.printLock:\n print('Running as a daemon. The main program should exit this thread.')\n else:\n with shared.printLock:\n print('Running as a daemon. You can use Ctrl+C to exit.')\n while True:\n time.sleep(20)\n\n def stop(self):\n with shared.printLock:\n print('Stopping Bitmessage Deamon.')\n shared.doCleanShutdown()\n\n\n #TODO: nice function but no one is using this \n def getApiAddress(self):\n if not shared.safeConfigGetBoolean('bitmessagesettings', 'apienabled'):\n return None\n address = shared.config.get('bitmessagesettings', 'apiinterface')\n port = shared.config.getint('bitmessagesettings', 'apiport')\n return {'address':address,'port':port}\n\nif __name__ == \"__main__\":\n mainprogram = Main()\n mainprogram.start()\n\n\n# So far, the creation of and management of the Bitmessage protocol and this\n# client is a one-man operation. Bitcoin tips are quite appreciated.\n# 1H5XaDA6fYENLbknwZyjiYXYPQaFjjLX2u\n", "path": "src/bitmessagemain.py"}]}
| 4,000 | 101 |
gh_patches_debug_36430
|
rasdani/github-patches
|
git_diff
|
oppia__oppia-2670
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Split up the admin controller
The admin page is going to be the first de-facto example of how to split up a large controller into multiple directives and services. This will be used as a guide for other parts of the frontend refactoring project. The overall process used as part of this refactor is:
1. Minimize the HTML in the controller by reusing existing Jinja templates and directives as makes sense (this should only affect admin.html and not require any significant refactoring)
2. Extract major parts of the controller into isolated directives (and move dependent JS from the controller JS file to directive JS files)
3. Additional steps may need to be done to further split directives that are too large once they are pulled out of the controller
4. Functionality already implemented in the controller/directive JS files should be replaced with calls to existing services (possibly moving those services to the domain folder if they are in other pages/ directories)
5. Common functionality in the directive and controller JS files should be split into non-UI services
Tests should be added for these services
6. If these services need to be used by other pages, they should be moved to domain/{directory}
7. Finally, if domain objects are needed they should be added to domain/{directory} and the services should be updated to use them
The admin page doesn't require all of the above steps, which means the collection editor and simple exploration editor will need to be used as examples for how some of other steps look (like creating domain object factories).
</issue>
<code>
[start of core/controllers/admin.py]
1 # Copyright 2014 The Oppia Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS-IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Controllers for the admin view."""
16
17 import logging
18
19 import jinja2
20
21 from core import jobs
22 from core import jobs_registry
23 from core.controllers import base
24 from core.controllers import editor
25 from core.domain import collection_services
26 from core.domain import config_domain
27 from core.domain import config_services
28 from core.domain import exp_services
29 from core.domain import recommendations_services
30 from core.domain import rights_manager
31 from core.domain import rte_component_registry
32 from core.platform import models
33 import feconf
34 import utils
35
36 current_user_services = models.Registry.import_current_user_services()
37
38
39 def require_super_admin(handler):
40 """Decorator that checks if the current user is a super admin."""
41 def test_super_admin(self, **kwargs):
42 """Checks if the user is logged in and is a super admin."""
43 if not self.user_id:
44 self.redirect(
45 current_user_services.create_login_url(self.request.uri))
46 return
47 if not current_user_services.is_current_user_super_admin():
48 raise self.UnauthorizedUserException(
49 '%s is not a super admin of this application', self.user_id)
50 return handler(self, **kwargs)
51
52 return test_super_admin
53
54
55 class AdminPage(base.BaseHandler):
56 """Admin page shown in the App Engine admin console."""
57 @require_super_admin
58 def get(self):
59 """Handles GET requests."""
60 demo_exploration_ids = feconf.DEMO_EXPLORATIONS.keys()
61
62 recent_job_data = jobs.get_data_for_recent_jobs()
63 unfinished_job_data = jobs.get_data_for_unfinished_jobs()
64 for job in unfinished_job_data:
65 job['can_be_canceled'] = job['is_cancelable'] and any([
66 klass.__name__ == job['job_type']
67 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS])
68
69 queued_or_running_job_types = set([
70 job['job_type'] for job in unfinished_job_data])
71 one_off_job_specs = [{
72 'job_type': klass.__name__,
73 'is_queued_or_running': (
74 klass.__name__ in queued_or_running_job_types)
75 } for klass in jobs_registry.ONE_OFF_JOB_MANAGERS]
76
77 continuous_computations_data = jobs.get_continuous_computations_info(
78 jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS)
79 for computation in continuous_computations_data:
80 if computation['last_started_msec']:
81 computation['human_readable_last_started'] = (
82 utils.get_human_readable_time_string(
83 computation['last_started_msec']))
84 if computation['last_stopped_msec']:
85 computation['human_readable_last_stopped'] = (
86 utils.get_human_readable_time_string(
87 computation['last_stopped_msec']))
88 if computation['last_finished_msec']:
89 computation['human_readable_last_finished'] = (
90 utils.get_human_readable_time_string(
91 computation['last_finished_msec']))
92
93 self.values.update({
94 'continuous_computations_data': continuous_computations_data,
95 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.iteritems()),
96 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.iteritems()),
97 'demo_exploration_ids': demo_exploration_ids,
98 'human_readable_current_time': (
99 utils.get_human_readable_time_string(
100 utils.get_current_time_in_millisecs())),
101 'one_off_job_specs': one_off_job_specs,
102 'recent_job_data': recent_job_data,
103 'rte_components_html': jinja2.utils.Markup(
104 rte_component_registry.Registry.get_html_for_all_components()),
105 'unfinished_job_data': unfinished_job_data,
106 'value_generators_js': jinja2.utils.Markup(
107 editor.get_value_generators_js()),
108 })
109
110 self.render_template('pages/admin/admin.html')
111
112
113 class AdminHandler(base.BaseHandler):
114 """Handler for the admin page."""
115
116 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
117
118 @require_super_admin
119 def get(self):
120 """Handles GET requests."""
121
122 self.render_json({
123 'config_properties': (
124 config_domain.Registry.get_config_property_schemas()),
125 })
126
127 @require_super_admin
128 def post(self):
129 """Handles POST requests."""
130 try:
131 if self.payload.get('action') == 'reload_exploration':
132 exploration_id = self.payload.get('exploration_id')
133 logging.info(
134 '[ADMIN] %s reloaded exploration %s' %
135 (self.user_id, exploration_id))
136 exp_services.load_demo(unicode(exploration_id))
137 rights_manager.release_ownership_of_exploration(
138 feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
139 elif self.payload.get('action') == 'reload_collection':
140 collection_id = self.payload.get('collection_id')
141 logging.info(
142 '[ADMIN] %s reloaded collection %s' %
143 (self.user_id, collection_id))
144 collection_services.load_demo(unicode(collection_id))
145 rights_manager.release_ownership_of_collection(
146 feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
147 elif self.payload.get('action') == 'clear_search_index':
148 exp_services.clear_search_index()
149 elif self.payload.get('action') == 'save_config_properties':
150 new_config_property_values = self.payload.get(
151 'new_config_property_values')
152 logging.info('[ADMIN] %s saved config property values: %s' %
153 (self.user_id, new_config_property_values))
154 for (name, value) in new_config_property_values.iteritems():
155 config_services.set_property(self.user_id, name, value)
156 elif self.payload.get('action') == 'revert_config_property':
157 config_property_id = self.payload.get('config_property_id')
158 logging.info('[ADMIN] %s reverted config property: %s' %
159 (self.user_id, config_property_id))
160 config_services.revert_property(
161 self.user_id, config_property_id)
162 elif self.payload.get('action') == 'start_new_job':
163 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
164 if klass.__name__ == self.payload.get('job_type'):
165 klass.enqueue(klass.create_new())
166 break
167 elif self.payload.get('action') == 'cancel_job':
168 job_id = self.payload.get('job_id')
169 job_type = self.payload.get('job_type')
170 for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
171 if klass.__name__ == job_type:
172 klass.cancel(job_id, self.user_id)
173 break
174 elif self.payload.get('action') == 'start_computation':
175 computation_type = self.payload.get('computation_type')
176 for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
177 if klass.__name__ == computation_type:
178 klass.start_computation()
179 break
180 elif self.payload.get('action') == 'stop_computation':
181 computation_type = self.payload.get('computation_type')
182 for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
183 if klass.__name__ == computation_type:
184 klass.stop_computation(self.user_id)
185 break
186 elif self.payload.get('action') == 'upload_topic_similarities':
187 data = self.payload.get('data')
188 recommendations_services.update_topic_similarities(data)
189
190 self.render_json({})
191 except Exception as e:
192 self.render_json({'error': unicode(e)})
193 raise
194
195
196 class AdminJobOutput(base.BaseHandler):
197 """Retrieves job output to show on the admin page."""
198
199 GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
200
201 @require_super_admin
202 def get(self):
203 """Handles GET requests."""
204 job_id = self.request.get('job_id')
205 self.render_json({
206 'output': jobs.get_job_output(job_id)
207 })
208
209
210 class AdminTopicsCsvDownloadHandler(base.BaseHandler):
211 """Retrieves topic similarity data for download."""
212
213 @require_super_admin
214 def get(self):
215 self.response.headers['Content-Type'] = 'text/csv'
216 self.response.headers['Content-Disposition'] = (
217 'attachment; filename=topic_similarities.csv')
218 self.response.write(
219 recommendations_services.get_topic_similarities_as_csv())
220
[end of core/controllers/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/controllers/admin.py b/core/controllers/admin.py
--- a/core/controllers/admin.py
+++ b/core/controllers/admin.py
@@ -130,20 +130,10 @@
try:
if self.payload.get('action') == 'reload_exploration':
exploration_id = self.payload.get('exploration_id')
- logging.info(
- '[ADMIN] %s reloaded exploration %s' %
- (self.user_id, exploration_id))
- exp_services.load_demo(unicode(exploration_id))
- rights_manager.release_ownership_of_exploration(
- feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
+ self._reload_exploration(exploration_id)
elif self.payload.get('action') == 'reload_collection':
collection_id = self.payload.get('collection_id')
- logging.info(
- '[ADMIN] %s reloaded collection %s' %
- (self.user_id, collection_id))
- collection_services.load_demo(unicode(collection_id))
- rights_manager.release_ownership_of_collection(
- feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
+ self._reload_collection(collection_id)
elif self.payload.get('action') == 'clear_search_index':
exp_services.clear_search_index()
elif self.payload.get('action') == 'save_config_properties':
@@ -192,6 +182,28 @@
self.render_json({'error': unicode(e)})
raise
+ def _reload_exploration(self, exploration_id):
+ if feconf.DEV_MODE:
+ logging.info(
+ '[ADMIN] %s reloaded exploration %s' %
+ (self.user_id, exploration_id))
+ exp_services.load_demo(unicode(exploration_id))
+ rights_manager.release_ownership_of_exploration(
+ feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))
+ else:
+ raise Exception('Cannot reload an exploration in production.')
+
+ def _reload_collection(self, collection_id):
+ if feconf.DEV_MODE:
+ logging.info(
+ '[ADMIN] %s reloaded collection %s' %
+ (self.user_id, collection_id))
+ collection_services.load_demo(unicode(collection_id))
+ rights_manager.release_ownership_of_collection(
+ feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))
+ else:
+ raise Exception('Cannot reload a collection in production.')
+
class AdminJobOutput(base.BaseHandler):
"""Retrieves job output to show on the admin page."""
|
{"golden_diff": "diff --git a/core/controllers/admin.py b/core/controllers/admin.py\n--- a/core/controllers/admin.py\n+++ b/core/controllers/admin.py\n@@ -130,20 +130,10 @@\n try:\n if self.payload.get('action') == 'reload_exploration':\n exploration_id = self.payload.get('exploration_id')\n- logging.info(\n- '[ADMIN] %s reloaded exploration %s' %\n- (self.user_id, exploration_id))\n- exp_services.load_demo(unicode(exploration_id))\n- rights_manager.release_ownership_of_exploration(\n- feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n+ self._reload_exploration(exploration_id)\n elif self.payload.get('action') == 'reload_collection':\n collection_id = self.payload.get('collection_id')\n- logging.info(\n- '[ADMIN] %s reloaded collection %s' %\n- (self.user_id, collection_id))\n- collection_services.load_demo(unicode(collection_id))\n- rights_manager.release_ownership_of_collection(\n- feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n+ self._reload_collection(collection_id)\n elif self.payload.get('action') == 'clear_search_index':\n exp_services.clear_search_index()\n elif self.payload.get('action') == 'save_config_properties':\n@@ -192,6 +182,28 @@\n self.render_json({'error': unicode(e)})\n raise\n \n+ def _reload_exploration(self, exploration_id):\n+ if feconf.DEV_MODE:\n+ logging.info(\n+ '[ADMIN] %s reloaded exploration %s' %\n+ (self.user_id, exploration_id))\n+ exp_services.load_demo(unicode(exploration_id))\n+ rights_manager.release_ownership_of_exploration(\n+ feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n+ else:\n+ raise Exception('Cannot reload an exploration in production.')\n+\n+ def _reload_collection(self, collection_id):\n+ if feconf.DEV_MODE:\n+ logging.info(\n+ '[ADMIN] %s reloaded collection %s' %\n+ (self.user_id, collection_id))\n+ collection_services.load_demo(unicode(collection_id))\n+ rights_manager.release_ownership_of_collection(\n+ feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n+ else:\n+ raise Exception('Cannot reload a collection in production.')\n+\n \n class AdminJobOutput(base.BaseHandler):\n \"\"\"Retrieves job output to show on the admin page.\"\"\"\n", "issue": "Split up the admin controller\nThe admin page is going to be the first de-facto example of how to split up a large controller into multiple directives and services. This will be used as a guide for other parts of the frontend refactoring project. The overall process used as part of this refactor is:\n1. Minimize the HTML in the controller by reusing existing Jinja templates and directives as makes sense (this should only affect admin.html and not require any significant refactoring)\n2. Extract major parts of the controller into isolated directives (and move dependent JS from the controller JS file to directive JS files)\n3. Additional steps may need to be done to further split directives that are too large once they are pulled out of the controller\n4. Functionality already implemented in the controller/directive JS files should be replaced with calls to existing services (possibly moving those services to the domain folder if they are in other pages/ directories)\n5. Common functionality in the directive and controller JS files should be split into non-UI services\n Tests should be added for these services\n6. If these services need to be used by other pages, they should be moved to domain/{directory}\n7. Finally, if domain objects are needed they should be added to domain/{directory} and the services should be updated to use them\n\nThe admin page doesn't require all of the above steps, which means the collection editor and simple exploration editor will need to be used as examples for how some of other steps look (like creating domain object factories).\n\n", "before_files": [{"content": "# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Controllers for the admin view.\"\"\"\n\nimport logging\n\nimport jinja2\n\nfrom core import jobs\nfrom core import jobs_registry\nfrom core.controllers import base\nfrom core.controllers import editor\nfrom core.domain import collection_services\nfrom core.domain import config_domain\nfrom core.domain import config_services\nfrom core.domain import exp_services\nfrom core.domain import recommendations_services\nfrom core.domain import rights_manager\nfrom core.domain import rte_component_registry\nfrom core.platform import models\nimport feconf\nimport utils\n\ncurrent_user_services = models.Registry.import_current_user_services()\n\n\ndef require_super_admin(handler):\n \"\"\"Decorator that checks if the current user is a super admin.\"\"\"\n def test_super_admin(self, **kwargs):\n \"\"\"Checks if the user is logged in and is a super admin.\"\"\"\n if not self.user_id:\n self.redirect(\n current_user_services.create_login_url(self.request.uri))\n return\n if not current_user_services.is_current_user_super_admin():\n raise self.UnauthorizedUserException(\n '%s is not a super admin of this application', self.user_id)\n return handler(self, **kwargs)\n\n return test_super_admin\n\n\nclass AdminPage(base.BaseHandler):\n \"\"\"Admin page shown in the App Engine admin console.\"\"\"\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n demo_exploration_ids = feconf.DEMO_EXPLORATIONS.keys()\n\n recent_job_data = jobs.get_data_for_recent_jobs()\n unfinished_job_data = jobs.get_data_for_unfinished_jobs()\n for job in unfinished_job_data:\n job['can_be_canceled'] = job['is_cancelable'] and any([\n klass.__name__ == job['job_type']\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS])\n\n queued_or_running_job_types = set([\n job['job_type'] for job in unfinished_job_data])\n one_off_job_specs = [{\n 'job_type': klass.__name__,\n 'is_queued_or_running': (\n klass.__name__ in queued_or_running_job_types)\n } for klass in jobs_registry.ONE_OFF_JOB_MANAGERS]\n\n continuous_computations_data = jobs.get_continuous_computations_info(\n jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS)\n for computation in continuous_computations_data:\n if computation['last_started_msec']:\n computation['human_readable_last_started'] = (\n utils.get_human_readable_time_string(\n computation['last_started_msec']))\n if computation['last_stopped_msec']:\n computation['human_readable_last_stopped'] = (\n utils.get_human_readable_time_string(\n computation['last_stopped_msec']))\n if computation['last_finished_msec']:\n computation['human_readable_last_finished'] = (\n utils.get_human_readable_time_string(\n computation['last_finished_msec']))\n\n self.values.update({\n 'continuous_computations_data': continuous_computations_data,\n 'demo_collections': sorted(feconf.DEMO_COLLECTIONS.iteritems()),\n 'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.iteritems()),\n 'demo_exploration_ids': demo_exploration_ids,\n 'human_readable_current_time': (\n utils.get_human_readable_time_string(\n utils.get_current_time_in_millisecs())),\n 'one_off_job_specs': one_off_job_specs,\n 'recent_job_data': recent_job_data,\n 'rte_components_html': jinja2.utils.Markup(\n rte_component_registry.Registry.get_html_for_all_components()),\n 'unfinished_job_data': unfinished_job_data,\n 'value_generators_js': jinja2.utils.Markup(\n editor.get_value_generators_js()),\n })\n\n self.render_template('pages/admin/admin.html')\n\n\nclass AdminHandler(base.BaseHandler):\n \"\"\"Handler for the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n\n self.render_json({\n 'config_properties': (\n config_domain.Registry.get_config_property_schemas()),\n })\n\n @require_super_admin\n def post(self):\n \"\"\"Handles POST requests.\"\"\"\n try:\n if self.payload.get('action') == 'reload_exploration':\n exploration_id = self.payload.get('exploration_id')\n logging.info(\n '[ADMIN] %s reloaded exploration %s' %\n (self.user_id, exploration_id))\n exp_services.load_demo(unicode(exploration_id))\n rights_manager.release_ownership_of_exploration(\n feconf.SYSTEM_COMMITTER_ID, unicode(exploration_id))\n elif self.payload.get('action') == 'reload_collection':\n collection_id = self.payload.get('collection_id')\n logging.info(\n '[ADMIN] %s reloaded collection %s' %\n (self.user_id, collection_id))\n collection_services.load_demo(unicode(collection_id))\n rights_manager.release_ownership_of_collection(\n feconf.SYSTEM_COMMITTER_ID, unicode(collection_id))\n elif self.payload.get('action') == 'clear_search_index':\n exp_services.clear_search_index()\n elif self.payload.get('action') == 'save_config_properties':\n new_config_property_values = self.payload.get(\n 'new_config_property_values')\n logging.info('[ADMIN] %s saved config property values: %s' %\n (self.user_id, new_config_property_values))\n for (name, value) in new_config_property_values.iteritems():\n config_services.set_property(self.user_id, name, value)\n elif self.payload.get('action') == 'revert_config_property':\n config_property_id = self.payload.get('config_property_id')\n logging.info('[ADMIN] %s reverted config property: %s' %\n (self.user_id, config_property_id))\n config_services.revert_property(\n self.user_id, config_property_id)\n elif self.payload.get('action') == 'start_new_job':\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == self.payload.get('job_type'):\n klass.enqueue(klass.create_new())\n break\n elif self.payload.get('action') == 'cancel_job':\n job_id = self.payload.get('job_id')\n job_type = self.payload.get('job_type')\n for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:\n if klass.__name__ == job_type:\n klass.cancel(job_id, self.user_id)\n break\n elif self.payload.get('action') == 'start_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.start_computation()\n break\n elif self.payload.get('action') == 'stop_computation':\n computation_type = self.payload.get('computation_type')\n for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:\n if klass.__name__ == computation_type:\n klass.stop_computation(self.user_id)\n break\n elif self.payload.get('action') == 'upload_topic_similarities':\n data = self.payload.get('data')\n recommendations_services.update_topic_similarities(data)\n\n self.render_json({})\n except Exception as e:\n self.render_json({'error': unicode(e)})\n raise\n\n\nclass AdminJobOutput(base.BaseHandler):\n \"\"\"Retrieves job output to show on the admin page.\"\"\"\n\n GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON\n\n @require_super_admin\n def get(self):\n \"\"\"Handles GET requests.\"\"\"\n job_id = self.request.get('job_id')\n self.render_json({\n 'output': jobs.get_job_output(job_id)\n })\n\n\nclass AdminTopicsCsvDownloadHandler(base.BaseHandler):\n \"\"\"Retrieves topic similarity data for download.\"\"\"\n\n @require_super_admin\n def get(self):\n self.response.headers['Content-Type'] = 'text/csv'\n self.response.headers['Content-Disposition'] = (\n 'attachment; filename=topic_similarities.csv')\n self.response.write(\n recommendations_services.get_topic_similarities_as_csv())\n", "path": "core/controllers/admin.py"}]}
| 3,239 | 546 |
gh_patches_debug_13073
|
rasdani/github-patches
|
git_diff
|
kymatio__kymatio-650
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sphinx inverses colors...
no clue why, but it can wait v3.
cf here:
https://www.kymat.io/gallery_2d/plot_scattering_disk.html#sphx-glr-gallery-2d-plot-scattering-disk-py
https://www.kymat.io/gallery_2d/plot_invert_scattering_torch.html#sphx-glr-gallery-2d-plot-invert-scattering-torch-py
</issue>
<code>
[start of examples/2d/plot_scattering_disk.py]
1 """
2 Scattering disk display
3 =======================
4 This script reproduces concentric circles that encode Scattering coefficient's
5 energy as described in "Invariant Scattering Convolution Networks" by Bruna and Mallat.
6 Here, for the sake of simplicity, we only consider first order scattering.
7
8 Author: https://github.com/Jonas1312
9 Edited by: Edouard Oyallon
10 """
11
12
13
14 import matplotlib as mpl
15 import matplotlib.cm as cm
16 import matplotlib.pyplot as plt
17 import numpy as np
18 from kymatio import Scattering2D
19 from PIL import Image
20 import os
21
22
23 img_name = os.path.join(os.getcwd(),"images/digit.png")
24
25 ####################################################################
26 # Scattering computations
27 #-------------------------------------------------------------------
28 # First, we read the input digit:
29 src_img = Image.open(img_name).convert('L').resize((32,32))
30 src_img = np.array(src_img)
31 print("img shape: ", src_img.shape)
32
33 ####################################################################
34 # We compute a Scattering Transform with L=6 angles and J=3 scales.
35 # Rotating a wavelet $\psi$ by $\pi$ is equivalent to consider its
36 # conjugate in fourier: $\hat\psi_{\pi}(\omega)=\hat\psi(r_{-\pi}\omega)^*$.
37 #
38 # Combining this and the fact that a real signal has a Hermitian symmetry
39 # implies that it is usually sufficient to use the angles $\{\frac{\pi l}{L}\}_{l\leq L}$ at computation time.
40 # For consistency, we will however display $\{\frac{2\pi l}{L}\}_{l\leq 2L}$,
41 # which implies that our visualization will be redundant and have a symmetry by rotation of $\pi$.
42
43 L = 6
44 J = 3
45 scattering = Scattering2D(J=J, shape=src_img.shape, L=L, max_order=1, frontend='numpy')
46
47 ####################################################################
48 # We now compute the scattering coefficients:
49 src_img_tensor = src_img.astype(np.float32) / 255.
50
51 scattering_coefficients = scattering(src_img_tensor)
52 print("coeffs shape: ", scattering_coefficients.shape)
53 # Invert colors
54 scattering_coefficients = -scattering_coefficients
55
56 ####################################################################
57 # We skip the low pass filter...
58 scattering_coefficients = scattering_coefficients[1:, :, :]
59 norm = mpl.colors.Normalize(scattering_coefficients.min(), scattering_coefficients.max(), clip=True)
60 mapper = cm.ScalarMappable(norm=norm, cmap="gray")
61 nb_coeffs, window_rows, window_columns = scattering_coefficients.shape
62
63 ####################################################################
64 # Figure reproduction
65 #-------------------------------------------------------------------
66
67 ####################################################################
68 # Now we can reproduce a figure that displays the energy of the first
69 # order Scattering coefficient, which are given by $\{\mid x\star\psi_{j,\theta}\mid\star\phi_J|\}_{j,\theta}$ .
70 # Here, each scattering coefficient is represented on the polar plane. The polar radius and angle correspond
71 # respectively to the scale $j$ and the rotation $\theta$ applied to the mother wavelet.
72 #
73 # Observe that as predicted, the visualization exhibit a redundancy and a symmetry.
74
75 fig,ax = plt.subplots()
76
77 plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')
78 ax.axis('off')
79 offset = 0.1
80 for row in range(window_rows):
81 for column in range(window_columns):
82 ax=fig.add_subplot(window_rows, window_columns, 1 + column + row * window_rows, projection='polar')
83 ax.set_ylim(0, 1)
84 ax.axis('off')
85 ax.set_yticklabels([]) # turn off radial tick labels (yticks)
86 ax.set_xticklabels([]) # turn off degrees
87 # ax.set_theta_zero_location('N') # 0° to North
88 coefficients = scattering_coefficients[:, row, column]
89 for j in range(J):
90 for l in range(L):
91 coeff = coefficients[l + (J - 1 - j) * L]
92 color = mpl.colors.to_hex(mapper.to_rgba(coeff))
93 ax.bar(x=(4.5+l) * np.pi / L,
94 height=2*(2**(j-1) / 2**J),
95 width=2 * np.pi / L,
96 bottom=offset + (2**j / 2**J) ,
97 color=color)
98 ax.bar(x=(4.5+l+L) * np.pi / L,
99 height=2*(2**(j-1) / 2**J),
100 width=2 * np.pi / L,
101 bottom=offset + (2**j / 2**J) ,
102 color=color)
103
[end of examples/2d/plot_scattering_disk.py]
[start of examples/2d/plot_invert_scattering_torch.py]
1 """
2 Inverting scattering via mse
3 ============================
4 This script aims to quantify the information loss for natural images by
5 performing a reconstruction of an image from its scattering coefficients via a
6 L2-norm minimization.
7 """
8
9 ###############################################################################
10 # Imports
11 # -------
12 import matplotlib.pyplot as plt
13 import numpy as np
14 import torch
15 import torch.nn.functional as F
16 from PIL import Image
17 from torch import optim
18 from scipy.misc import face
19
20 from kymatio.torch import Scattering2D
21
22 device = "cuda" if torch.cuda.is_available() else "cpu"
23
24 ###############################################################################
25 # Load test image
26 # ---------------
27 src_img = Image.fromarray(face())
28 src_img = src_img.resize((512, 384), Image.ANTIALIAS)
29 src_img = np.array(src_img).astype(np.float32)
30 src_img = src_img / 255.0
31 plt.imshow(src_img)
32 plt.title("Original image")
33
34 src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW
35 max_iter = 5 # number of steps for the GD
36 print("Image shape: ", src_img.shape)
37 channels, height, width = src_img.shape
38
39 ###############################################################################
40 # Main loop
41 # ----------
42 for order in [1]:
43 for J in [2, 4]:
44
45 # Compute scattering coefficients
46 scattering = Scattering2D(J=J, shape=(height, width), max_order=order)
47 if device == "cuda":
48 scattering = scattering.cuda()
49 max_iter = 500
50 src_img_tensor = torch.from_numpy(src_img).to(device).contiguous()
51 scattering_coefficients = scattering(src_img_tensor)
52
53 # Create trainable input image
54 input_tensor = torch.rand(src_img.shape, requires_grad=True, device=device)
55
56 # Optimizer hyperparams
57 optimizer = optim.Adam([input_tensor], lr=1)
58
59 # Training
60 best_img = None
61 best_loss = float("inf")
62 for epoch in range(1, max_iter):
63 new_coefficients = scattering(input_tensor)
64 loss = F.mse_loss(input=new_coefficients, target=scattering_coefficients)
65 print("Epoch {}, loss: {}".format(epoch, loss.item()), end="\r")
66 optimizer.zero_grad()
67 loss.backward()
68 optimizer.step()
69 if loss < best_loss:
70 best_loss = loss.detach().cpu().item()
71 best_img = input_tensor.detach().cpu().numpy()
72
73 best_img = np.clip(best_img, 0.0, 1.0)
74
75 # PSNR
76 mse = np.mean((src_img - best_img) ** 2)
77 psnr = 20 * np.log10(1.0 / np.sqrt(mse))
78 print("\nPSNR: {:.2f}dB for order {} and J={}".format(psnr, order, J))
79
80 # Plot
81 plt.figure()
82 plt.imshow(np.moveaxis(best_img, 0, -1))
83 plt.title("PSNR: {:.2f}dB (order {}, J={})".format(psnr, order, J))
84
85 plt.show()
86
[end of examples/2d/plot_invert_scattering_torch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/2d/plot_invert_scattering_torch.py b/examples/2d/plot_invert_scattering_torch.py
--- a/examples/2d/plot_invert_scattering_torch.py
+++ b/examples/2d/plot_invert_scattering_torch.py
@@ -32,7 +32,7 @@
plt.title("Original image")
src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW
-max_iter = 5 # number of steps for the GD
+max_iter = 15 # number of steps for the GD
print("Image shape: ", src_img.shape)
channels, height, width = src_img.shape
diff --git a/examples/2d/plot_scattering_disk.py b/examples/2d/plot_scattering_disk.py
--- a/examples/2d/plot_scattering_disk.py
+++ b/examples/2d/plot_scattering_disk.py
@@ -74,7 +74,7 @@
fig,ax = plt.subplots()
-plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')
+plt.imshow(src_img,cmap='gray',interpolation='nearest', aspect='auto')
ax.axis('off')
offset = 0.1
for row in range(window_rows):
|
{"golden_diff": "diff --git a/examples/2d/plot_invert_scattering_torch.py b/examples/2d/plot_invert_scattering_torch.py\n--- a/examples/2d/plot_invert_scattering_torch.py\n+++ b/examples/2d/plot_invert_scattering_torch.py\n@@ -32,7 +32,7 @@\n plt.title(\"Original image\")\n \n src_img = np.moveaxis(src_img, -1, 0) # HWC to CHW\n-max_iter = 5 # number of steps for the GD\n+max_iter = 15 # number of steps for the GD\n print(\"Image shape: \", src_img.shape)\n channels, height, width = src_img.shape\n \ndiff --git a/examples/2d/plot_scattering_disk.py b/examples/2d/plot_scattering_disk.py\n--- a/examples/2d/plot_scattering_disk.py\n+++ b/examples/2d/plot_scattering_disk.py\n@@ -74,7 +74,7 @@\n \n fig,ax = plt.subplots()\n \n-plt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')\n+plt.imshow(src_img,cmap='gray',interpolation='nearest', aspect='auto')\n ax.axis('off')\n offset = 0.1\n for row in range(window_rows):\n", "issue": "sphinx inverses colors...\nno clue why, but it can wait v3.\r\n\r\ncf here:\r\nhttps://www.kymat.io/gallery_2d/plot_scattering_disk.html#sphx-glr-gallery-2d-plot-scattering-disk-py\r\nhttps://www.kymat.io/gallery_2d/plot_invert_scattering_torch.html#sphx-glr-gallery-2d-plot-invert-scattering-torch-py\n", "before_files": [{"content": "\"\"\"\nScattering disk display\n=======================\nThis script reproduces concentric circles that encode Scattering coefficient's\nenergy as described in \"Invariant Scattering Convolution Networks\" by Bruna and Mallat.\nHere, for the sake of simplicity, we only consider first order scattering.\n\nAuthor: https://github.com/Jonas1312\nEdited by: Edouard Oyallon\n\"\"\"\n\n\n\nimport matplotlib as mpl\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom kymatio import Scattering2D\nfrom PIL import Image\nimport os\n\n\nimg_name = os.path.join(os.getcwd(),\"images/digit.png\")\n\n####################################################################\n# Scattering computations\n#-------------------------------------------------------------------\n# First, we read the input digit:\nsrc_img = Image.open(img_name).convert('L').resize((32,32))\nsrc_img = np.array(src_img)\nprint(\"img shape: \", src_img.shape)\n\n####################################################################\n# We compute a Scattering Transform with L=6 angles and J=3 scales.\n# Rotating a wavelet $\\psi$ by $\\pi$ is equivalent to consider its\n# conjugate in fourier: $\\hat\\psi_{\\pi}(\\omega)=\\hat\\psi(r_{-\\pi}\\omega)^*$.\n#\n# Combining this and the fact that a real signal has a Hermitian symmetry\n# implies that it is usually sufficient to use the angles $\\{\\frac{\\pi l}{L}\\}_{l\\leq L}$ at computation time.\n# For consistency, we will however display $\\{\\frac{2\\pi l}{L}\\}_{l\\leq 2L}$,\n# which implies that our visualization will be redundant and have a symmetry by rotation of $\\pi$.\n\nL = 6\nJ = 3\nscattering = Scattering2D(J=J, shape=src_img.shape, L=L, max_order=1, frontend='numpy')\n\n####################################################################\n# We now compute the scattering coefficients:\nsrc_img_tensor = src_img.astype(np.float32) / 255.\n\nscattering_coefficients = scattering(src_img_tensor)\nprint(\"coeffs shape: \", scattering_coefficients.shape)\n# Invert colors\nscattering_coefficients = -scattering_coefficients\n\n####################################################################\n# We skip the low pass filter...\nscattering_coefficients = scattering_coefficients[1:, :, :]\nnorm = mpl.colors.Normalize(scattering_coefficients.min(), scattering_coefficients.max(), clip=True)\nmapper = cm.ScalarMappable(norm=norm, cmap=\"gray\")\nnb_coeffs, window_rows, window_columns = scattering_coefficients.shape\n\n####################################################################\n# Figure reproduction\n#-------------------------------------------------------------------\n\n####################################################################\n# Now we can reproduce a figure that displays the energy of the first\n# order Scattering coefficient, which are given by $\\{\\mid x\\star\\psi_{j,\\theta}\\mid\\star\\phi_J|\\}_{j,\\theta}$ .\n# Here, each scattering coefficient is represented on the polar plane. The polar radius and angle correspond\n# respectively to the scale $j$ and the rotation $\\theta$ applied to the mother wavelet.\n#\n# Observe that as predicted, the visualization exhibit a redundancy and a symmetry.\n\nfig,ax = plt.subplots()\n\nplt.imshow(1-src_img,cmap='gray',interpolation='nearest', aspect='auto')\nax.axis('off')\noffset = 0.1\nfor row in range(window_rows):\n for column in range(window_columns):\n ax=fig.add_subplot(window_rows, window_columns, 1 + column + row * window_rows, projection='polar')\n ax.set_ylim(0, 1)\n ax.axis('off')\n ax.set_yticklabels([]) # turn off radial tick labels (yticks)\n ax.set_xticklabels([]) # turn off degrees\n # ax.set_theta_zero_location('N') # 0\u00b0 to North\n coefficients = scattering_coefficients[:, row, column]\n for j in range(J):\n for l in range(L):\n coeff = coefficients[l + (J - 1 - j) * L]\n color = mpl.colors.to_hex(mapper.to_rgba(coeff))\n ax.bar(x=(4.5+l) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n ax.bar(x=(4.5+l+L) * np.pi / L,\n height=2*(2**(j-1) / 2**J),\n width=2 * np.pi / L,\n bottom=offset + (2**j / 2**J) ,\n color=color)\n", "path": "examples/2d/plot_scattering_disk.py"}, {"content": "\"\"\"\nInverting scattering via mse\n============================\nThis script aims to quantify the information loss for natural images by\nperforming a reconstruction of an image from its scattering coefficients via a\nL2-norm minimization.\n\"\"\"\n\n###############################################################################\n# Imports\n# -------\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torch import optim\nfrom scipy.misc import face\n\nfrom kymatio.torch import Scattering2D\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n###############################################################################\n# Load test image\n# ---------------\nsrc_img = Image.fromarray(face())\nsrc_img = src_img.resize((512, 384), Image.ANTIALIAS)\nsrc_img = np.array(src_img).astype(np.float32)\nsrc_img = src_img / 255.0\nplt.imshow(src_img)\nplt.title(\"Original image\")\n\nsrc_img = np.moveaxis(src_img, -1, 0) # HWC to CHW\nmax_iter = 5 # number of steps for the GD\nprint(\"Image shape: \", src_img.shape)\nchannels, height, width = src_img.shape\n\n###############################################################################\n# Main loop\n# ----------\nfor order in [1]:\n for J in [2, 4]:\n\n # Compute scattering coefficients\n scattering = Scattering2D(J=J, shape=(height, width), max_order=order)\n if device == \"cuda\":\n scattering = scattering.cuda()\n max_iter = 500\n src_img_tensor = torch.from_numpy(src_img).to(device).contiguous()\n scattering_coefficients = scattering(src_img_tensor)\n\n # Create trainable input image\n input_tensor = torch.rand(src_img.shape, requires_grad=True, device=device)\n\n # Optimizer hyperparams\n optimizer = optim.Adam([input_tensor], lr=1)\n\n # Training\n best_img = None\n best_loss = float(\"inf\")\n for epoch in range(1, max_iter):\n new_coefficients = scattering(input_tensor)\n loss = F.mse_loss(input=new_coefficients, target=scattering_coefficients)\n print(\"Epoch {}, loss: {}\".format(epoch, loss.item()), end=\"\\r\")\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if loss < best_loss:\n best_loss = loss.detach().cpu().item()\n best_img = input_tensor.detach().cpu().numpy()\n\n best_img = np.clip(best_img, 0.0, 1.0)\n\n # PSNR\n mse = np.mean((src_img - best_img) ** 2)\n psnr = 20 * np.log10(1.0 / np.sqrt(mse))\n print(\"\\nPSNR: {:.2f}dB for order {} and J={}\".format(psnr, order, J))\n\n # Plot\n plt.figure()\n plt.imshow(np.moveaxis(best_img, 0, -1))\n plt.title(\"PSNR: {:.2f}dB (order {}, J={})\".format(psnr, order, J))\n\nplt.show()\n", "path": "examples/2d/plot_invert_scattering_torch.py"}]}
| 2,708 | 286 |
gh_patches_debug_12440
|
rasdani/github-patches
|
git_diff
|
conda__conda-8272
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
condarc file precedence not respected when merging
In my system `.condarc` I have `defaults` and `conda-forge` and in my user `.condarc` I have my own `dhirschfeld` channel:

My understanding ([from this blog post](https://www.anaconda.com/conda-configuration-engine-power-users/)) was that the system `.condarc` should take precedence when merging and that my channel should be *appended* to the channel list. This is the behaviour I'd like, but not what I'm observing in practice:
```
(base) C:\> conda info
active environment : base
active env location : C:\Miniconda3
shell level : 1
user config file : C:\Users\dhirschfeld\.condarc
populated config files : C:\Miniconda3\.condarc
C:\Users\dhirschfeld\.condarc
conda version : 4.6.3
conda-build version : not installed
python version : 3.7.1.final.0
base environment : C:\Miniconda3 (writable)
channel URLs : https://conda.anaconda.org/dhirschfeld/win-64
https://conda.anaconda.org/dhirschfeld/noarch
https://repo.anaconda.com/pkgs/main/win-64
https://repo.anaconda.com/pkgs/main/noarch
https://repo.anaconda.com/pkgs/free/win-64
https://repo.anaconda.com/pkgs/free/noarch
https://repo.anaconda.com/pkgs/r/win-64
https://repo.anaconda.com/pkgs/r/noarch
https://repo.anaconda.com/pkgs/msys2/win-64
https://repo.anaconda.com/pkgs/msys2/noarch
https://conda.anaconda.org/conda-forge/win-64
https://conda.anaconda.org/conda-forge/noarch
package cache : C:\Miniconda3\pkgs
C:\Users\dhirschfeld\.conda\pkgs
C:\Users\dhirschfeld\AppData\Local\conda\conda\pkgs
envs directories : C:\Miniconda3\envs
C:\Users\dhirschfeld\.conda\envs
C:\Users\dhirschfeld\AppData\Local\conda\conda\envs
platform : win-64
user-agent : conda/4.6.3 requests/2.21.0 CPython/3.7.1 Windows/10 Windows/10.0.17763
administrator : False
netrc file : None
offline mode : False
```
</issue>
<code>
[start of conda/base/constants.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (C) 2012 Anaconda, Inc
3 # SPDX-License-Identifier: BSD-3-Clause
4 """
5 This file should hold most string literals and magic numbers used throughout the code base.
6 The exception is if a literal is specifically meant to be private to and isolated within a module.
7 Think of this as a "more static" source of configuration information.
8
9 Another important source of "static" configuration is conda/models/enums.py.
10 """
11 from __future__ import absolute_import, division, print_function, unicode_literals
12
13 from enum import Enum, EnumMeta
14 from os.path import join
15
16 from ..common.compat import itervalues, on_win, six_with_metaclass, string_types
17
18 PREFIX_PLACEHOLDER = ('/opt/anaconda1anaconda2'
19 # this is intentionally split into parts, such that running
20 # this program on itself will leave it unchanged
21 'anaconda3')
22
23 machine_bits = 8 * tuple.__itemsize__
24
25 APP_NAME = 'conda'
26
27 SEARCH_PATH = (
28 '/etc/conda/.condarc',
29 '/etc/conda/condarc',
30 '/etc/conda/condarc.d/',
31 '/var/lib/conda/.condarc',
32 '/var/lib/conda/condarc',
33 '/var/lib/conda/condarc.d/',
34 '$CONDA_ROOT/.condarc',
35 '$CONDA_ROOT/condarc',
36 '$CONDA_ROOT/condarc.d/',
37 '~/.conda/.condarc',
38 '~/.conda/condarc',
39 '~/.conda/condarc.d/',
40 '~/.condarc',
41 '$CONDA_PREFIX/.condarc',
42 '$CONDA_PREFIX/condarc',
43 '$CONDA_PREFIX/condarc.d/',
44 '$CONDARC',
45 )
46
47 DEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org'
48 CONDA_HOMEPAGE_URL = 'https://conda.io'
49 ERROR_UPLOAD_URL = 'https://conda.io/conda-post/unexpected-error'
50 DEFAULTS_CHANNEL_NAME = 'defaults'
51
52 PLATFORM_DIRECTORIES = (
53 "noarch",
54 "linux-32",
55 "linux-64",
56 "linux-aarch64",
57 "linux-armv6l",
58 "linux-armv7l",
59 "linux-ppc64le",
60 "osx-64",
61 "win-32",
62 "win-64",
63 "zos-z",
64 )
65
66 RECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')
67
68
69 DEFAULT_CHANNELS_UNIX = (
70 'https://repo.anaconda.com/pkgs/main',
71 'https://repo.anaconda.com/pkgs/free',
72 'https://repo.anaconda.com/pkgs/r',
73 )
74
75 DEFAULT_CHANNELS_WIN = (
76 'https://repo.anaconda.com/pkgs/main',
77 'https://repo.anaconda.com/pkgs/free',
78 'https://repo.anaconda.com/pkgs/r',
79 'https://repo.anaconda.com/pkgs/msys2',
80 )
81
82 DEFAULT_CUSTOM_CHANNELS = {
83 'pkgs/pro': 'https://repo.anaconda.com',
84 }
85
86 DEFAULT_CHANNELS = DEFAULT_CHANNELS_WIN if on_win else DEFAULT_CHANNELS_UNIX
87
88 ROOT_ENV_NAME = 'base'
89
90 ROOT_NO_RM = (
91 'python',
92 'pycosat',
93 'ruamel_yaml',
94 'conda',
95 'openssl',
96 'requests',
97 )
98
99 DEFAULT_AGGRESSIVE_UPDATE_PACKAGES = (
100 'ca-certificates',
101 'certifi',
102 'openssl',
103 )
104
105 if on_win:
106 COMPATIBLE_SHELLS = (
107 'bash',
108 'cmd.exe',
109 'fish',
110 'tcsh',
111 'xonsh',
112 'zsh',
113 'powershell',
114 )
115 else:
116 COMPATIBLE_SHELLS = (
117 'bash',
118 'fish',
119 'tcsh',
120 'xonsh',
121 'zsh',
122 'powershell',
123 )
124
125
126 # Maximum priority, reserved for packages we really want to remove
127 MAX_CHANNEL_PRIORITY = 10000
128
129 CONDA_TARBALL_EXTENSION = '.tar.bz2'
130
131 UNKNOWN_CHANNEL = "<unknown>"
132
133
134 class SafetyChecks(Enum):
135 disabled = 'disabled'
136 warn = 'warn'
137 enabled = 'enabled'
138
139 def __str__(self):
140 return self.value
141
142
143 class PathConflict(Enum):
144 clobber = 'clobber'
145 warn = 'warn'
146 prevent = 'prevent'
147
148 def __str__(self):
149 return self.value
150
151
152 class DepsModifier(Enum):
153 """Flags to enable alternate handling of dependencies."""
154 NOT_SET = 'not_set' # default
155 NO_DEPS = 'no_deps'
156 ONLY_DEPS = 'only_deps'
157
158 def __str__(self):
159 return self.value
160
161
162 class UpdateModifier(Enum):
163 SPECS_SATISFIED_SKIP_SOLVE = 'specs_satisfied_skip_solve'
164 FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps
165 UPDATE_DEPS = 'update_deps'
166 UPDATE_SPECS = 'update_specs' # default
167 UPDATE_ALL = 'update_all'
168 # TODO: add REINSTALL_ALL, see https://github.com/conda/conda/issues/6247 and https://github.com/conda/conda/issues/3149 # NOQA
169
170 def __str__(self):
171 return self.value
172
173
174 class ChannelPriorityMeta(EnumMeta):
175
176 def __call__(cls, value, *args, **kwargs):
177 try:
178 return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)
179 except ValueError:
180 if isinstance(value, string_types):
181 from .._vendor.auxlib.type_coercion import typify
182 value = typify(value)
183 if value is True:
184 value = 'flexible'
185 elif value is False:
186 value = cls.DISABLED
187 return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)
188
189
190 class ChannelPriority(six_with_metaclass(ChannelPriorityMeta, Enum)):
191 __name__ = "ChannelPriority"
192
193 STRICT = 'strict'
194 # STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed
195 FLEXIBLE = 'flexible'
196 DISABLED = 'disabled'
197
198 def __str__(self):
199 return self.value
200
201
202 class SatSolverChoice(Enum):
203 PYCOSAT = 'pycosat'
204 PYCRYPTOSAT = 'pycryptosat'
205 PYSAT = 'pysat'
206
207 def __str__(self):
208 return self.value
209
210
211 # Magic files for permissions determination
212 PACKAGE_CACHE_MAGIC_FILE = 'urls.txt'
213 PREFIX_MAGIC_FILE = join('conda-meta', 'history')
214
215
216 # TODO: should be frozendict(), but I don't want to import frozendict from auxlib here.
217 NAMESPACES_MAP = { # base package name, namespace
218 "python": "python",
219 "r": "r",
220 "r-base": "r",
221 "mro-base": "r",
222 "erlang": "erlang",
223 "java": "java",
224 "openjdk": "java",
225 "julia": "julia",
226 "latex": "latex",
227 "lua": "lua",
228 "nodejs": "js",
229 "perl": "perl",
230 "php": "php",
231 "ruby": "ruby",
232 "m2-base": "m2",
233 "msys2-conda-epoch": "m2w64",
234 }
235
236 NAMESPACE_PACKAGE_NAMES = frozenset(NAMESPACES_MAP)
237 NAMESPACES = frozenset(itervalues(NAMESPACES_MAP))
238
239 # Namespace arbiters of uniqueness
240 # global: some repository established by Anaconda, Inc. and conda-forge
241 # python: https://pypi.org/simple
242 # r: https://cran.r-project.org/web/packages/available_packages_by_name.html
243 # erlang: https://hex.pm/packages
244 # java: https://repo1.maven.org/maven2/
245 # julia: https://pkg.julialang.org/
246 # latex: https://ctan.org/pkg
247 # lua: https://luarocks.org/m/root
248 # js: https://docs.npmjs.com/misc/registry
249 # pascal: ???
250 # perl: https://www.cpan.org/modules/01modules.index.html
251 # php: https://packagist.org/
252 # ruby: https://rubygems.org/gems
253 # clojure: https://clojars.org/
254
255
256 # Not all python namespace packages are registered on PyPI. If a package
257 # contains files in site-packages, it probably belongs in the python namespace.
258
[end of conda/base/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conda/base/constants.py b/conda/base/constants.py
--- a/conda/base/constants.py
+++ b/conda/base/constants.py
@@ -24,13 +24,23 @@
APP_NAME = 'conda'
-SEARCH_PATH = (
- '/etc/conda/.condarc',
- '/etc/conda/condarc',
- '/etc/conda/condarc.d/',
- '/var/lib/conda/.condarc',
- '/var/lib/conda/condarc',
- '/var/lib/conda/condarc.d/',
+if on_win:
+ SEARCH_PATH = (
+ 'C:/ProgramData/conda/.condarc',
+ 'C:/ProgramData/conda/condarc',
+ 'C:/ProgramData/conda/condarc.d',
+ )
+else:
+ SEARCH_PATH = (
+ '/etc/conda/.condarc',
+ '/etc/conda/condarc',
+ '/etc/conda/condarc.d/',
+ '/var/lib/conda/.condarc',
+ '/var/lib/conda/condarc',
+ '/var/lib/conda/condarc.d/',
+ )
+
+SEARCH_PATH += (
'$CONDA_ROOT/.condarc',
'$CONDA_ROOT/condarc',
'$CONDA_ROOT/condarc.d/',
|
{"golden_diff": "diff --git a/conda/base/constants.py b/conda/base/constants.py\n--- a/conda/base/constants.py\n+++ b/conda/base/constants.py\n@@ -24,13 +24,23 @@\n \n APP_NAME = 'conda'\n \n-SEARCH_PATH = (\n- '/etc/conda/.condarc',\n- '/etc/conda/condarc',\n- '/etc/conda/condarc.d/',\n- '/var/lib/conda/.condarc',\n- '/var/lib/conda/condarc',\n- '/var/lib/conda/condarc.d/',\n+if on_win:\n+ SEARCH_PATH = (\n+ 'C:/ProgramData/conda/.condarc',\n+ 'C:/ProgramData/conda/condarc',\n+ 'C:/ProgramData/conda/condarc.d',\n+ )\n+else:\n+ SEARCH_PATH = (\n+ '/etc/conda/.condarc',\n+ '/etc/conda/condarc',\n+ '/etc/conda/condarc.d/',\n+ '/var/lib/conda/.condarc',\n+ '/var/lib/conda/condarc',\n+ '/var/lib/conda/condarc.d/',\n+ )\n+\n+SEARCH_PATH += (\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/condarc.d/',\n", "issue": "condarc file precedence not respected when merging\nIn my system `.condarc` I have `defaults` and `conda-forge` and in my user `.condarc` I have my own `dhirschfeld` channel:\r\n\r\n\r\n\r\nMy understanding ([from this blog post](https://www.anaconda.com/conda-configuration-engine-power-users/)) was that the system `.condarc` should take precedence when merging and that my channel should be *appended* to the channel list. This is the behaviour I'd like, but not what I'm observing in practice:\r\n```\r\n(base) C:\\> conda info\r\n\r\n active environment : base\r\n active env location : C:\\Miniconda3\r\n shell level : 1\r\n user config file : C:\\Users\\dhirschfeld\\.condarc\r\n populated config files : C:\\Miniconda3\\.condarc\r\n C:\\Users\\dhirschfeld\\.condarc\r\n conda version : 4.6.3\r\n conda-build version : not installed\r\n python version : 3.7.1.final.0\r\n base environment : C:\\Miniconda3 (writable)\r\n channel URLs : https://conda.anaconda.org/dhirschfeld/win-64\r\n https://conda.anaconda.org/dhirschfeld/noarch\r\n https://repo.anaconda.com/pkgs/main/win-64\r\n https://repo.anaconda.com/pkgs/main/noarch\r\n https://repo.anaconda.com/pkgs/free/win-64\r\n https://repo.anaconda.com/pkgs/free/noarch\r\n https://repo.anaconda.com/pkgs/r/win-64\r\n https://repo.anaconda.com/pkgs/r/noarch\r\n https://repo.anaconda.com/pkgs/msys2/win-64\r\n https://repo.anaconda.com/pkgs/msys2/noarch\r\n https://conda.anaconda.org/conda-forge/win-64\r\n https://conda.anaconda.org/conda-forge/noarch\r\n package cache : C:\\Miniconda3\\pkgs\r\n C:\\Users\\dhirschfeld\\.conda\\pkgs\r\n C:\\Users\\dhirschfeld\\AppData\\Local\\conda\\conda\\pkgs\r\n envs directories : C:\\Miniconda3\\envs\r\n C:\\Users\\dhirschfeld\\.conda\\envs\r\n C:\\Users\\dhirschfeld\\AppData\\Local\\conda\\conda\\envs\r\n platform : win-64\r\n user-agent : conda/4.6.3 requests/2.21.0 CPython/3.7.1 Windows/10 Windows/10.0.17763\r\n administrator : False\r\n netrc file : None\r\n offline mode : False\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (C) 2012 Anaconda, Inc\n# SPDX-License-Identifier: BSD-3-Clause\n\"\"\"\nThis file should hold most string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\nThink of this as a \"more static\" source of configuration information.\n\nAnother important source of \"static\" configuration is conda/models/enums.py.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom enum import Enum, EnumMeta\nfrom os.path import join\n\nfrom ..common.compat import itervalues, on_win, six_with_metaclass, string_types\n\nPREFIX_PLACEHOLDER = ('/opt/anaconda1anaconda2'\n # this is intentionally split into parts, such that running\n # this program on itself will leave it unchanged\n 'anaconda3')\n\nmachine_bits = 8 * tuple.__itemsize__\n\nAPP_NAME = 'conda'\n\nSEARCH_PATH = (\n '/etc/conda/.condarc',\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/.condarc',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/.condarc',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org'\nCONDA_HOMEPAGE_URL = 'https://conda.io'\nERROR_UPLOAD_URL = 'https://conda.io/conda-post/unexpected-error'\nDEFAULTS_CHANNEL_NAME = 'defaults'\n\nPLATFORM_DIRECTORIES = (\n \"noarch\",\n \"linux-32\",\n \"linux-64\",\n \"linux-aarch64\",\n \"linux-armv6l\",\n \"linux-armv7l\",\n \"linux-ppc64le\",\n \"osx-64\",\n \"win-32\",\n \"win-64\",\n \"zos-z\",\n)\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nDEFAULT_CHANNELS_UNIX = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n)\n\nDEFAULT_CHANNELS_WIN = (\n 'https://repo.anaconda.com/pkgs/main',\n 'https://repo.anaconda.com/pkgs/free',\n 'https://repo.anaconda.com/pkgs/r',\n 'https://repo.anaconda.com/pkgs/msys2',\n)\n\nDEFAULT_CUSTOM_CHANNELS = {\n 'pkgs/pro': 'https://repo.anaconda.com',\n}\n\nDEFAULT_CHANNELS = DEFAULT_CHANNELS_WIN if on_win else DEFAULT_CHANNELS_UNIX\n\nROOT_ENV_NAME = 'base'\n\nROOT_NO_RM = (\n 'python',\n 'pycosat',\n 'ruamel_yaml',\n 'conda',\n 'openssl',\n 'requests',\n)\n\nDEFAULT_AGGRESSIVE_UPDATE_PACKAGES = (\n 'ca-certificates',\n 'certifi',\n 'openssl',\n)\n\nif on_win:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'cmd.exe',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\nelse:\n COMPATIBLE_SHELLS = (\n 'bash',\n 'fish',\n 'tcsh',\n 'xonsh',\n 'zsh',\n 'powershell',\n )\n\n\n# Maximum priority, reserved for packages we really want to remove\nMAX_CHANNEL_PRIORITY = 10000\n\nCONDA_TARBALL_EXTENSION = '.tar.bz2'\n\nUNKNOWN_CHANNEL = \"<unknown>\"\n\n\nclass SafetyChecks(Enum):\n disabled = 'disabled'\n warn = 'warn'\n enabled = 'enabled'\n\n def __str__(self):\n return self.value\n\n\nclass PathConflict(Enum):\n clobber = 'clobber'\n warn = 'warn'\n prevent = 'prevent'\n\n def __str__(self):\n return self.value\n\n\nclass DepsModifier(Enum):\n \"\"\"Flags to enable alternate handling of dependencies.\"\"\"\n NOT_SET = 'not_set' # default\n NO_DEPS = 'no_deps'\n ONLY_DEPS = 'only_deps'\n\n def __str__(self):\n return self.value\n\n\nclass UpdateModifier(Enum):\n SPECS_SATISFIED_SKIP_SOLVE = 'specs_satisfied_skip_solve'\n FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps\n UPDATE_DEPS = 'update_deps'\n UPDATE_SPECS = 'update_specs' # default\n UPDATE_ALL = 'update_all'\n # TODO: add REINSTALL_ALL, see https://github.com/conda/conda/issues/6247 and https://github.com/conda/conda/issues/3149 # NOQA\n\n def __str__(self):\n return self.value\n\n\nclass ChannelPriorityMeta(EnumMeta):\n\n def __call__(cls, value, *args, **kwargs):\n try:\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n except ValueError:\n if isinstance(value, string_types):\n from .._vendor.auxlib.type_coercion import typify\n value = typify(value)\n if value is True:\n value = 'flexible'\n elif value is False:\n value = cls.DISABLED\n return super(ChannelPriorityMeta, cls).__call__(value, *args, **kwargs)\n\n\nclass ChannelPriority(six_with_metaclass(ChannelPriorityMeta, Enum)):\n __name__ = \"ChannelPriority\"\n\n STRICT = 'strict'\n # STRICT_OR_FLEXIBLE = 'strict_or_flexible' # TODO: consider implementing if needed\n FLEXIBLE = 'flexible'\n DISABLED = 'disabled'\n\n def __str__(self):\n return self.value\n\n\nclass SatSolverChoice(Enum):\n PYCOSAT = 'pycosat'\n PYCRYPTOSAT = 'pycryptosat'\n PYSAT = 'pysat'\n\n def __str__(self):\n return self.value\n\n\n# Magic files for permissions determination\nPACKAGE_CACHE_MAGIC_FILE = 'urls.txt'\nPREFIX_MAGIC_FILE = join('conda-meta', 'history')\n\n\n# TODO: should be frozendict(), but I don't want to import frozendict from auxlib here.\nNAMESPACES_MAP = { # base package name, namespace\n \"python\": \"python\",\n \"r\": \"r\",\n \"r-base\": \"r\",\n \"mro-base\": \"r\",\n \"erlang\": \"erlang\",\n \"java\": \"java\",\n \"openjdk\": \"java\",\n \"julia\": \"julia\",\n \"latex\": \"latex\",\n \"lua\": \"lua\",\n \"nodejs\": \"js\",\n \"perl\": \"perl\",\n \"php\": \"php\",\n \"ruby\": \"ruby\",\n \"m2-base\": \"m2\",\n \"msys2-conda-epoch\": \"m2w64\",\n}\n\nNAMESPACE_PACKAGE_NAMES = frozenset(NAMESPACES_MAP)\nNAMESPACES = frozenset(itervalues(NAMESPACES_MAP))\n\n# Namespace arbiters of uniqueness\n# global: some repository established by Anaconda, Inc. and conda-forge\n# python: https://pypi.org/simple\n# r: https://cran.r-project.org/web/packages/available_packages_by_name.html\n# erlang: https://hex.pm/packages\n# java: https://repo1.maven.org/maven2/\n# julia: https://pkg.julialang.org/\n# latex: https://ctan.org/pkg\n# lua: https://luarocks.org/m/root\n# js: https://docs.npmjs.com/misc/registry\n# pascal: ???\n# perl: https://www.cpan.org/modules/01modules.index.html\n# php: https://packagist.org/\n# ruby: https://rubygems.org/gems\n# clojure: https://clojars.org/\n\n\n# Not all python namespace packages are registered on PyPI. If a package\n# contains files in site-packages, it probably belongs in the python namespace.\n", "path": "conda/base/constants.py"}]}
| 3,778 | 289 |
gh_patches_debug_14939
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2721
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Access logging raise TypeError after `logging.logProcesses=False`
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Describe the bug
If someone use `logging.logProcesses = False` to disable the calling of `os.getpid()` while logging, the [default formatter of Sanic](https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65) will lead to the exception as shown below.
See [`logging.logProcesses`](https://docs.python.org/3/howto/logging.html#optimization)
```log
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 445, in _format
return self._fmt % values
~~~~~~~~~~^~~~~~~~
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 449, in format
return self._format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 659, in formatMessage
return self._style.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 690, in format
s = self.formatMessage(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 953, in format
return fmt.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1110, in emit
msg = self.format(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 978, in handle
self.emit(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1706, in callHandlers
hdlr.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1644, in handle
self.callHandlers(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1634, in _log
self.handle(record)
File "C:\Program Files\Python\Python311\Lib\logging\__init__.py", line 1489, in info
self._log(INFO, msg, args, **kwargs)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 113, in display
out(indent("\n".join(lines), " "))
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\application\motd.py", line 39, in output
motd_class(logo, serve_location, data, extra).display()
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 579, in motd
MOTD.output(logo, serve_location, display, extra)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 533, in _helper
self.motd(server_settings=server_settings)
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 327, in prepare
server_settings = self._helper(
File "C:\Program Files\Python\Python311\Lib\site-packages\sanic\mixins\startup.py", line 176, in run
self.prepare(
...
TypeError: %d format: a real number is required, not NoneType
```
Without `os.getpid()`, the LogRecord will only be generated with a value dict like `values = {'process': None, ...}`. Then, exception raises when the formatter tries to translate `values = {'process': None, ...}` into `"[%(process)d]"`.
I suggest to use `[%(process)s]` instead of `[%(process)d]`.
https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65
If there is no [conversion flags](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) (like `%06d`) setted, `%s` also has a better performance on converting unsigned integer to string.
See [Why is %s faster than %d for integer substitution in python?](https://stackoverflow.com/a/27800584/18677995)
And here is a shell snippet for you to make a brief test:
```shell
python -m timeit -n 100000 -s "fstr='[%(process)s]'" "fstr % {'process':12345}"
python -m timeit -n 100000 -s "fstr='[%(process)d]'" "fstr % {'process':12345}"
```
Result on my laptop is:
```log
100000 loops, best of 5: 157 nsec per loop
100000 loops, best of 5: 160 nsec per loop
```
### Code snippet
```python
import logging
from sanic import Sanic
from sanic.response import text
logging.logProcesses = False
app = Sanic("MyHelloWorldApp")
@app.get("/")
async def hello_world(request):
return text("Hello, world.")
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8080, debug=True)
```
### Expected Behavior
Log be like (pid shows `None` instead of raising exceptions):
```log
[2023-02-09 10:39:08 +0800] [None] [INFO]
┌─────────────────────────────────────────────────────────────┐
│ Sanic v22.12.0 │
│ Goin' Fast @ http://127.0.0.1:8080 │
├───────────────────────┬─────────────────────────────────────┤
│ │ mode: debug, single worker │
│ ▄███ █████ ██ │ server: sanic, HTTP/1.1 │
│ ██ │ python: 3.11.1 │
│ ▀███████ ███▄ │ platform: Windows-10-10.0.22621-SP0 │
│ ██ │ packages: sanic-routing==22.8.0 │
│ ████ ████████▀ │ │
│ │ │
│ Build Fast. Run Fast. │ │
└───────────────────────┴─────────────────────────────────────┘
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Creating multiprocessing context using 'spawn'
[2023-02-09 10:39:08 +0800] [None] [DEBUG] Starting a process: Sanic-Server-0-0
[2023-02-09 10:39:09 +0800] [None] [DEBUG] Process ack: Sanic-Server-0-0 [13504]
[2023-02-09 10:39:09 +0800] [None] [INFO] Starting worker [13504]
```
### How do you run Sanic?
As a script (`app.run` or `Sanic.serve`)
### Operating System
Windows
### Sanic Version
22.12.0
### Additional context
_No response_
</issue>
<code>
[start of sanic/log.py]
1 import logging
2 import sys
3
4 from enum import Enum
5 from typing import TYPE_CHECKING, Any, Dict
6 from warnings import warn
7
8 from sanic.compat import is_atty
9
10
11 # Python 3.11 changed the way Enum formatting works for mixed-in types.
12 if sys.version_info < (3, 11, 0):
13
14 class StrEnum(str, Enum):
15 pass
16
17 else:
18 if not TYPE_CHECKING:
19 from enum import StrEnum
20
21
22 LOGGING_CONFIG_DEFAULTS: Dict[str, Any] = dict( # no cov
23 version=1,
24 disable_existing_loggers=False,
25 loggers={
26 "sanic.root": {"level": "INFO", "handlers": ["console"]},
27 "sanic.error": {
28 "level": "INFO",
29 "handlers": ["error_console"],
30 "propagate": True,
31 "qualname": "sanic.error",
32 },
33 "sanic.access": {
34 "level": "INFO",
35 "handlers": ["access_console"],
36 "propagate": True,
37 "qualname": "sanic.access",
38 },
39 "sanic.server": {
40 "level": "INFO",
41 "handlers": ["console"],
42 "propagate": True,
43 "qualname": "sanic.server",
44 },
45 },
46 handlers={
47 "console": {
48 "class": "logging.StreamHandler",
49 "formatter": "generic",
50 "stream": sys.stdout,
51 },
52 "error_console": {
53 "class": "logging.StreamHandler",
54 "formatter": "generic",
55 "stream": sys.stderr,
56 },
57 "access_console": {
58 "class": "logging.StreamHandler",
59 "formatter": "access",
60 "stream": sys.stdout,
61 },
62 },
63 formatters={
64 "generic": {
65 "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
66 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
67 "class": "logging.Formatter",
68 },
69 "access": {
70 "format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
71 + "%(request)s %(message)s %(status)d %(byte)d",
72 "datefmt": "[%Y-%m-%d %H:%M:%S %z]",
73 "class": "logging.Formatter",
74 },
75 },
76 )
77 """
78 Defult logging configuration
79 """
80
81
82 class Colors(StrEnum): # no cov
83 END = "\033[0m"
84 BOLD = "\033[1m"
85 BLUE = "\033[34m"
86 GREEN = "\033[32m"
87 PURPLE = "\033[35m"
88 RED = "\033[31m"
89 SANIC = "\033[38;2;255;13;104m"
90 YELLOW = "\033[01;33m"
91
92
93 class VerbosityFilter(logging.Filter):
94 verbosity: int = 0
95
96 def filter(self, record: logging.LogRecord) -> bool:
97 verbosity = getattr(record, "verbosity", 0)
98 return verbosity <= self.verbosity
99
100
101 _verbosity_filter = VerbosityFilter()
102
103 logger = logging.getLogger("sanic.root") # no cov
104 """
105 General Sanic logger
106 """
107 logger.addFilter(_verbosity_filter)
108
109 error_logger = logging.getLogger("sanic.error") # no cov
110 """
111 Logger used by Sanic for error logging
112 """
113 error_logger.addFilter(_verbosity_filter)
114
115 access_logger = logging.getLogger("sanic.access") # no cov
116 """
117 Logger used by Sanic for access logging
118 """
119 access_logger.addFilter(_verbosity_filter)
120
121 server_logger = logging.getLogger("sanic.server") # no cov
122 """
123 Logger used by Sanic for server related messages
124 """
125 logger.addFilter(_verbosity_filter)
126
127
128 def deprecation(message: str, version: float): # no cov
129 version_info = f"[DEPRECATION v{version}] "
130 if is_atty():
131 version_info = f"{Colors.RED}{version_info}"
132 message = f"{Colors.YELLOW}{message}{Colors.END}"
133 warn(version_info + message, DeprecationWarning)
134
[end of sanic/log.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/log.py b/sanic/log.py
--- a/sanic/log.py
+++ b/sanic/log.py
@@ -62,13 +62,13 @@
},
formatters={
"generic": {
- "format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
+ "format": "%(asctime)s [%(process)s] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
"access": {
"format": "%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: "
- + "%(request)s %(message)s %(status)d %(byte)d",
+ + "%(request)s %(message)s %(status)s %(byte)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
},
|
{"golden_diff": "diff --git a/sanic/log.py b/sanic/log.py\n--- a/sanic/log.py\n+++ b/sanic/log.py\n@@ -62,13 +62,13 @@\n },\n formatters={\n \"generic\": {\n- \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n+ \"format\": \"%(asctime)s [%(process)s] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n- + \"%(request)s %(message)s %(status)d %(byte)d\",\n+ + \"%(request)s %(message)s %(status)s %(byte)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n", "issue": "Access logging raise TypeError after `logging.logProcesses=False`\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues\r\n\r\n### Describe the bug\r\n\r\nIf someone use `logging.logProcesses = False` to disable the calling of `os.getpid()` while logging, the [default formatter of Sanic](https://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65) will lead to the exception as shown below.\r\n\r\nSee [`logging.logProcesses`](https://docs.python.org/3/howto/logging.html#optimization)\r\n\r\n```log\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 445, in _format\r\n return self._fmt % values\r\n ~~~~~~~~~~^~~~~~~~\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 449, in format\r\n return self._format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 659, in formatMessage\r\n return self._style.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 690, in format\r\n s = self.formatMessage(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 953, in format\r\n return fmt.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1110, in emit\r\n msg = self.format(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 978, in handle\r\n self.emit(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1706, in callHandlers\r\n hdlr.handle(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1644, in handle\r\n self.callHandlers(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1634, in _log\r\n self.handle(record)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\logging\\__init__.py\", line 1489, in info\r\n self._log(INFO, msg, args, **kwargs)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\application\\motd.py\", line 113, in display\r\n out(indent(\"\\n\".join(lines), \" \"))\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\application\\motd.py\", line 39, in output\r\n motd_class(logo, serve_location, data, extra).display()\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 579, in motd\r\n MOTD.output(logo, serve_location, display, extra)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 533, in _helper\r\n self.motd(server_settings=server_settings)\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 327, in prepare\r\n server_settings = self._helper(\r\n File \"C:\\Program Files\\Python\\Python311\\Lib\\site-packages\\sanic\\mixins\\startup.py\", line 176, in run\r\n self.prepare(\r\n...\r\nTypeError: %d format: a real number is required, not NoneType\r\n```\r\n\r\nWithout `os.getpid()`, the LogRecord will only be generated with a value dict like `values = {'process': None, ...}`. Then, exception raises when the formatter tries to translate `values = {'process': None, ...}` into `\"[%(process)d]\"`.\r\n\r\nI suggest to use `[%(process)s]` instead of `[%(process)d]`.\r\n\r\nhttps://github.com/sanic-org/sanic/blob/5e7f6998bdccce325a4c30d940d02d9d1e40b11e/sanic/log.py#L65\r\n\r\nIf there is no [conversion flags](https://docs.python.org/3/library/stdtypes.html#printf-style-string-formatting) (like `%06d`) setted, `%s` also has a better performance on converting unsigned integer to string.\r\n\r\nSee [Why is %s faster than %d for integer substitution in python?](https://stackoverflow.com/a/27800584/18677995)\r\n\r\nAnd here is a shell snippet for you to make a brief test:\r\n```shell\r\npython -m timeit -n 100000 -s \"fstr='[%(process)s]'\" \"fstr % {'process':12345}\"\r\npython -m timeit -n 100000 -s \"fstr='[%(process)d]'\" \"fstr % {'process':12345}\"\r\n```\r\n\r\nResult on my laptop is:\r\n```log\r\n100000 loops, best of 5: 157 nsec per loop\r\n100000 loops, best of 5: 160 nsec per loop\r\n```\r\n\r\n### Code snippet\r\n\r\n```python\r\nimport logging\r\n\r\nfrom sanic import Sanic\r\nfrom sanic.response import text\r\n\r\nlogging.logProcesses = False\r\n\r\napp = Sanic(\"MyHelloWorldApp\")\r\n\r\n\r\[email protected](\"/\")\r\nasync def hello_world(request):\r\n return text(\"Hello, world.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"127.0.0.1\", port=8080, debug=True)\r\n```\r\n\r\n### Expected Behavior\r\n\r\nLog be like (pid shows `None` instead of raising exceptions):\r\n\r\n```log\r\n[2023-02-09 10:39:08 +0800] [None] [INFO] \r\n \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\r\n \u2502 Sanic v22.12.0 \u2502\r\n \u2502 Goin' Fast @ http://127.0.0.1:8080 \u2502\r\n \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\r\n \u2502 \u2502 mode: debug, single worker \u2502\r\n \u2502 \u2584\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588 \u2588\u2588 \u2502 server: sanic, HTTP/1.1 \u2502\r\n \u2502 \u2588\u2588 \u2502 python: 3.11.1 \u2502\r\n \u2502 \u2580\u2588\u2588\u2588\u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2584 \u2502 platform: Windows-10-10.0.22621-SP0 \u2502\r\n \u2502 \u2588\u2588 \u2502 packages: sanic-routing==22.8.0 \u2502\r\n \u2502 \u2588\u2588\u2588\u2588 \u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2580 \u2502 \u2502\r\n \u2502 \u2502 \u2502\r\n \u2502 Build Fast. Run Fast. \u2502 \u2502\r\n \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\r\n\r\n[2023-02-09 10:39:08 +0800] [None] [DEBUG] Creating multiprocessing context using 'spawn'\r\n[2023-02-09 10:39:08 +0800] [None] [DEBUG] Starting a process: Sanic-Server-0-0\r\n[2023-02-09 10:39:09 +0800] [None] [DEBUG] Process ack: Sanic-Server-0-0 [13504]\r\n[2023-02-09 10:39:09 +0800] [None] [INFO] Starting worker [13504]\r\n```\r\n\r\n### How do you run Sanic?\r\n\r\nAs a script (`app.run` or `Sanic.serve`)\r\n\r\n### Operating System\r\n\r\nWindows\r\n\r\n### Sanic Version\r\n\r\n22.12.0\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any, Dict\nfrom warnings import warn\n\nfrom sanic.compat import is_atty\n\n\n# Python 3.11 changed the way Enum formatting works for mixed-in types.\nif sys.version_info < (3, 11, 0):\n\n class StrEnum(str, Enum):\n pass\n\nelse:\n if not TYPE_CHECKING:\n from enum import StrEnum\n\n\nLOGGING_CONFIG_DEFAULTS: Dict[str, Any] = dict( # no cov\n version=1,\n disable_existing_loggers=False,\n loggers={\n \"sanic.root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n \"sanic.error\": {\n \"level\": \"INFO\",\n \"handlers\": [\"error_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.error\",\n },\n \"sanic.access\": {\n \"level\": \"INFO\",\n \"handlers\": [\"access_console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.access\",\n },\n \"sanic.server\": {\n \"level\": \"INFO\",\n \"handlers\": [\"console\"],\n \"propagate\": True,\n \"qualname\": \"sanic.server\",\n },\n },\n handlers={\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stdout,\n },\n \"error_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"generic\",\n \"stream\": sys.stderr,\n },\n \"access_console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"access\",\n \"stream\": sys.stdout,\n },\n },\n formatters={\n \"generic\": {\n \"format\": \"%(asctime)s [%(process)d] [%(levelname)s] %(message)s\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n \"access\": {\n \"format\": \"%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: \"\n + \"%(request)s %(message)s %(status)d %(byte)d\",\n \"datefmt\": \"[%Y-%m-%d %H:%M:%S %z]\",\n \"class\": \"logging.Formatter\",\n },\n },\n)\n\"\"\"\nDefult logging configuration\n\"\"\"\n\n\nclass Colors(StrEnum): # no cov\n END = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n BLUE = \"\\033[34m\"\n GREEN = \"\\033[32m\"\n PURPLE = \"\\033[35m\"\n RED = \"\\033[31m\"\n SANIC = \"\\033[38;2;255;13;104m\"\n YELLOW = \"\\033[01;33m\"\n\n\nclass VerbosityFilter(logging.Filter):\n verbosity: int = 0\n\n def filter(self, record: logging.LogRecord) -> bool:\n verbosity = getattr(record, \"verbosity\", 0)\n return verbosity <= self.verbosity\n\n\n_verbosity_filter = VerbosityFilter()\n\nlogger = logging.getLogger(\"sanic.root\") # no cov\n\"\"\"\nGeneral Sanic logger\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\nerror_logger = logging.getLogger(\"sanic.error\") # no cov\n\"\"\"\nLogger used by Sanic for error logging\n\"\"\"\nerror_logger.addFilter(_verbosity_filter)\n\naccess_logger = logging.getLogger(\"sanic.access\") # no cov\n\"\"\"\nLogger used by Sanic for access logging\n\"\"\"\naccess_logger.addFilter(_verbosity_filter)\n\nserver_logger = logging.getLogger(\"sanic.server\") # no cov\n\"\"\"\nLogger used by Sanic for server related messages\n\"\"\"\nlogger.addFilter(_verbosity_filter)\n\n\ndef deprecation(message: str, version: float): # no cov\n version_info = f\"[DEPRECATION v{version}] \"\n if is_atty():\n version_info = f\"{Colors.RED}{version_info}\"\n message = f\"{Colors.YELLOW}{message}{Colors.END}\"\n warn(version_info + message, DeprecationWarning)\n", "path": "sanic/log.py"}]}
| 3,650 | 221 |
gh_patches_debug_28507
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-1815
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Worker hangs when connects to PS.
The worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.

</issue>
<code>
[start of elasticdl/python/worker/main.py]
1 import grpc
2
3 from elasticdl.python.common import log_utils
4 from elasticdl.python.common.args import parse_worker_args
5 from elasticdl.python.common.grpc_utils import build_channel
6 from elasticdl.python.worker.worker import Worker
7
8
9 def main():
10 args = parse_worker_args()
11 logger = log_utils.get_logger(__name__)
12 logger.info("Starting worker %d", args.worker_id)
13 if args.master_addr is None:
14 raise ValueError("master_addr is missing for worker")
15
16 master_channel = build_channel(args.master_addr)
17
18 ps_channels = []
19 if args.ps_addrs:
20 ps_addrs = args.ps_addrs.split(",")
21
22 for addr in ps_addrs:
23 # addr is in the form as "ps-pod-name.namespace.svc:port"
24 channel = build_channel(addr)
25
26 # Wait the channel is ready by a Future object.
27 grpc.channel_ready_future(channel).result()
28 logger.info(
29 "grpc channel %s to connect pod %s is ready"
30 % (addr, addr.split(".")[0])
31 )
32 ps_channels.append(channel)
33
34 worker = Worker(
35 args,
36 channel=master_channel,
37 ps_channels=ps_channels,
38 set_parallelism=True,
39 )
40 worker.run()
41
42
43 if __name__ == "__main__":
44 main()
45
[end of elasticdl/python/worker/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py
--- a/elasticdl/python/worker/main.py
+++ b/elasticdl/python/worker/main.py
@@ -5,6 +5,9 @@
from elasticdl.python.common.grpc_utils import build_channel
from elasticdl.python.worker.worker import Worker
+CONNECT_PS_MAX_RETRIES = 3
+CONNECT_PS_TIMEOUT = 60
+
def main():
args = parse_worker_args()
@@ -23,13 +26,29 @@
# addr is in the form as "ps-pod-name.namespace.svc:port"
channel = build_channel(addr)
- # Wait the channel is ready by a Future object.
- grpc.channel_ready_future(channel).result()
- logger.info(
- "grpc channel %s to connect pod %s is ready"
- % (addr, addr.split(".")[0])
- )
- ps_channels.append(channel)
+ succeeded = False
+ for i in range(CONNECT_PS_MAX_RETRIES):
+ try:
+ grpc.channel_ready_future(channel).result(
+ timeout=CONNECT_PS_TIMEOUT
+ )
+ logger.info(
+ "grpc channel %s to connect pod %s is ready"
+ % (addr, addr.split(".")[0])
+ )
+ ps_channels.append(channel)
+ succeeded = True
+ break
+ except grpc.FutureTimeoutError:
+ logger.warning(
+ "Failed to connect pod %s with %d retry"
+ % (addr.split(".")[0], i)
+ )
+ if not succeeded:
+ raise TimeoutError(
+ "Time out to connect pod %s with 3 retries"
+ % addr.split(".")[0]
+ )
worker = Worker(
args,
|
{"golden_diff": "diff --git a/elasticdl/python/worker/main.py b/elasticdl/python/worker/main.py\n--- a/elasticdl/python/worker/main.py\n+++ b/elasticdl/python/worker/main.py\n@@ -5,6 +5,9 @@\n from elasticdl.python.common.grpc_utils import build_channel\n from elasticdl.python.worker.worker import Worker\n \n+CONNECT_PS_MAX_RETRIES = 3\n+CONNECT_PS_TIMEOUT = 60\n+\n \n def main():\n args = parse_worker_args()\n@@ -23,13 +26,29 @@\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n \n- # Wait the channel is ready by a Future object.\n- grpc.channel_ready_future(channel).result()\n- logger.info(\n- \"grpc channel %s to connect pod %s is ready\"\n- % (addr, addr.split(\".\")[0])\n- )\n- ps_channels.append(channel)\n+ succeeded = False\n+ for i in range(CONNECT_PS_MAX_RETRIES):\n+ try:\n+ grpc.channel_ready_future(channel).result(\n+ timeout=CONNECT_PS_TIMEOUT\n+ )\n+ logger.info(\n+ \"grpc channel %s to connect pod %s is ready\"\n+ % (addr, addr.split(\".\")[0])\n+ )\n+ ps_channels.append(channel)\n+ succeeded = True\n+ break\n+ except grpc.FutureTimeoutError:\n+ logger.warning(\n+ \"Failed to connect pod %s with %d retry\"\n+ % (addr.split(\".\")[0], i)\n+ )\n+ if not succeeded:\n+ raise TimeoutError(\n+ \"Time out to connect pod %s with 3 retries\"\n+ % addr.split(\".\")[0]\n+ )\n \n worker = Worker(\n args,\n", "issue": "Worker hangs when connects to PS.\nThe worker will wait the grpc channel of PS is ready using `channel_ready_future`. We should set timeout and retry.\r\n\r\n\n", "before_files": [{"content": "import grpc\n\nfrom elasticdl.python.common import log_utils\nfrom elasticdl.python.common.args import parse_worker_args\nfrom elasticdl.python.common.grpc_utils import build_channel\nfrom elasticdl.python.worker.worker import Worker\n\n\ndef main():\n args = parse_worker_args()\n logger = log_utils.get_logger(__name__)\n logger.info(\"Starting worker %d\", args.worker_id)\n if args.master_addr is None:\n raise ValueError(\"master_addr is missing for worker\")\n\n master_channel = build_channel(args.master_addr)\n\n ps_channels = []\n if args.ps_addrs:\n ps_addrs = args.ps_addrs.split(\",\")\n\n for addr in ps_addrs:\n # addr is in the form as \"ps-pod-name.namespace.svc:port\"\n channel = build_channel(addr)\n\n # Wait the channel is ready by a Future object.\n grpc.channel_ready_future(channel).result()\n logger.info(\n \"grpc channel %s to connect pod %s is ready\"\n % (addr, addr.split(\".\")[0])\n )\n ps_channels.append(channel)\n\n worker = Worker(\n args,\n channel=master_channel,\n ps_channels=ps_channels,\n set_parallelism=True,\n )\n worker.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl/python/worker/main.py"}]}
| 990 | 397 |
gh_patches_debug_33130
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-1666
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] evaluate fails with Imputer
**Describe the bug**
Calling `evaluate` with a pipeline and imputer fails.
**To Reproduce**
```python
import pandas as pd
import numpy as np
from sktime.transformations.series.impute import Imputer
from sktime.forecasting.compose import ForecastingPipeline
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.model_evaluation import evaluate
from sktime.forecasting.model_selection import SlidingWindowSplitter
y = pd.Series(np.random.normal(size=100))
X = pd.DataFrame(np.random.normal(size=(100, 2)))
X.iloc[3, 1] = np.nan
cv = SlidingWindowSplitter(fh=[1, 2, 3])
forecaster = ForecastingPipeline([
("impute", Imputer()),
("forecast", NaiveForecaster())
])
evaluate(forecaster, cv=cv, y=y, X=X)
```
**Expected behavior**
No error.
**Versions**
<details>
System:
python: 3.7.8 | packaged by conda-forge | (default, Jul 31 2020, 02:37:09) [Clang 10.0.1 ]
executable: /Users/mloning/.conda/envs/sktime-dev/bin/python
machine: Darwin-19.6.0-x86_64-i386-64bit
Python dependencies:
pip: 21.2.4
setuptools: 49.6.0.post20210108
sklearn: 0.24.2
sktime: 0.8.1
statsmodels: 0.12.1
numpy: 1.19.3
scipy: 1.6.0
Cython: 0.29.21
pandas: 1.2.3
matplotlib: 3.3.4
joblib: 1.0.1
numba: 0.53.1
pmdarima: 1.8.2
tsfresh: 0.17.0
</details>
<!-- Thanks for contributing! -->
</issue>
<code>
[start of sktime/transformations/series/impute.py]
1 #!/usr/bin/env python3 -u
2 # -*- coding: utf-8 -*-
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Utilities to impute series with missing values."""
5
6 __author__ = ["Martin Walter"]
7 __all__ = ["Imputer"]
8
9
10 import numpy as np
11 import pandas as pd
12
13 from sklearn.base import clone
14 from sklearn.utils import check_random_state
15
16 from sktime.transformations.base import _SeriesToSeriesTransformer
17 from sktime.utils.validation.series import check_series
18 from sktime.forecasting.trend import PolynomialTrendForecaster
19 from sktime.forecasting.base import ForecastingHorizon
20
21
22 class Imputer(_SeriesToSeriesTransformer):
23 """Missing value imputation.
24
25 The Imputer transforms input series by replacing missing values according
26 to an imputation strategy specified by `method`.
27
28 Parameters
29 ----------
30 method : str, default="drift"
31 Method to fill the missing values values.
32
33 * "drift" : drift/trend values by sktime.PolynomialTrendForecaster()
34 * "linear" : linear interpolation, by pd.Series.interpolate()
35 * "nearest" : use nearest value, by pd.Series.interpolate()
36 * "constant" : same constant value (given in arg value) for all NaN
37 * "mean" : pd.Series.mean()
38 * "median" : pd.Series.median()
39 * "backfill" ot "bfill" : adapted from pd.Series.fillna()
40 * "pad" or "ffill" : adapted from pd.Series.fillna()
41 * "random" : random values between pd.Series.min() and .max()
42 * "forecaster" : use an sktime Forecaster, given in arg forecaster
43
44 missing_values : int/float/str, default=None
45 The placeholder for the missing values. All occurrences of
46 missing_values will be imputed. If None then np.nan is used.
47 value : int/float, default=None
48 Value to use to fill missing values when method="constant".
49 forecaster : Any Forecaster based on sktime.BaseForecaster, default=None
50 Use a given Forecaster to impute by insample predictions when
51 method="forecaster". Before fitting, missing data is imputed with
52 method="ffill" or "bfill" as heuristic.
53 random_state : int/float/str, optional
54 Value to set random.seed() if method="random", default None
55
56 Examples
57 --------
58 >>> from sktime.transformations.series.impute import Imputer
59 >>> from sktime.datasets import load_airline
60 >>> y = load_airline()
61 >>> transformer = Imputer(method="drift")
62 >>> y_hat = transformer.fit_transform(y)
63 """
64
65 _tags = {
66 "fit-in-transform": True,
67 "handles-missing-data": True,
68 "skip-inverse-transform": True,
69 "univariate-only": False,
70 }
71
72 def __init__(
73 self,
74 method="drift",
75 random_state=None,
76 value=None,
77 forecaster=None,
78 missing_values=None,
79 ):
80
81 self.method = method
82 self.missing_values = missing_values
83 self.value = value
84 self.forecaster = forecaster
85 self.random_state = random_state
86 super(Imputer, self).__init__()
87
88 def transform(self, Z, X=None):
89 """Transform data.
90
91 Returns a transformed version of Z.
92
93 Parameters
94 ----------
95 Z : pd.Series, pd.DataFrame
96
97 Returns
98 -------
99 Z : pd.Series, pd.DataFrame
100 Transformed time series(es).
101 """
102 self.check_is_fitted()
103 self._check_method()
104 Z = check_series(Z)
105 Z = Z.copy()
106
107 # replace missing_values with np.nan
108 if self.missing_values:
109 Z = Z.replace(to_replace=self.missing_values, value=np.nan)
110
111 if not _has_missing_values(Z):
112 return Z
113
114 elif self.method == "random":
115 if isinstance(Z, pd.DataFrame):
116 for col in Z:
117 Z[col] = Z[col].apply(
118 lambda i: self._get_random(Z[col]) if np.isnan(i) else i
119 )
120 else:
121 Z = Z.apply(lambda i: self._get_random(Z) if np.isnan(i) else i)
122 elif self.method == "constant":
123 Z = Z.fillna(value=self.value)
124 elif self.method in ["backfill", "bfill", "pad", "ffill"]:
125 Z = Z.fillna(method=self.method)
126 elif self.method == "drift":
127 forecaster = PolynomialTrendForecaster(degree=1)
128 Z = _impute_with_forecaster(forecaster, Z)
129 elif self.method == "forecaster":
130 forecaster = clone(self.forecaster)
131 Z = _impute_with_forecaster(forecaster, Z)
132 elif self.method == "mean":
133 Z = Z.fillna(value=Z.mean())
134 elif self.method == "median":
135 Z = Z.fillna(value=Z.median())
136 elif self.method in ["nearest", "linear"]:
137 Z = Z.interpolate(method=self.method)
138 else:
139 raise ValueError(f"`method`: {self.method} not available.")
140 # fill first/last elements of series,
141 # as some methods (e.g. "linear") cant impute those
142 Z = Z.fillna(method="ffill").fillna(method="backfill")
143 return Z
144
145 def _check_method(self):
146 if (
147 self.value is not None
148 and self.method != "constant"
149 or self.method == "constant"
150 and self.value is None
151 ):
152 raise ValueError(
153 """Imputing with a value can only be
154 used if method="constant" and if parameter "value" is not None"""
155 )
156 elif (
157 self.forecaster is not None
158 and self.method != "forecaster"
159 or self.method == "forecaster"
160 and self.forecaster is None
161 ):
162 raise ValueError(
163 """Imputing with a forecaster can only be used if
164 method=\"forecaster\" and if arg forecaster is not None"""
165 )
166 else:
167 pass
168
169 def _get_random(self, Z):
170 """Create a random int or float value.
171
172 :param Z: Series
173 :type Z: pd.Series
174 :return: Random int or float between min and max of Z
175 :rtype: int/float
176 """
177 rng = check_random_state(self.random_state)
178 # check if series contains only int or int-like values (e.g. 3.0)
179 if (Z.dropna() % 1 == 0).all():
180 return rng.randint(Z.min(), Z.max())
181 else:
182 return rng.uniform(Z.min(), Z.max())
183
184
185 def _impute_with_forecaster(forecaster, Z):
186 """Use a given forecaster for imputation by in-sample predictions.
187
188 Parameters
189 ----------
190 forecaster: Forecaster
191 Forecaster to use for imputation
192 Z : pd.Series or pd.DataFrame
193 Series to impute.
194
195 Returns
196 -------
197 zt : pd.Series or pd.DataFrame
198 Series with imputed values.
199 """
200 if isinstance(Z, pd.Series):
201 series = [Z]
202 elif isinstance(Z, pd.DataFrame):
203 series = [Z[column] for column in Z]
204
205 for z in series:
206 # define fh based on index of missing values
207 na_index = z.index[z.isna()]
208 fh = ForecastingHorizon(values=na_index, is_relative=False)
209
210 # fill NaN before fitting with ffill and backfill (heuristic)
211 forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
212
213 # replace missing values with predicted values
214 z[na_index] = forecaster.predict()
215 return Z
216
217
218 def _has_missing_values(Z):
219 return Z.isnull().to_numpy().any()
220
[end of sktime/transformations/series/impute.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/transformations/series/impute.py b/sktime/transformations/series/impute.py
--- a/sktime/transformations/series/impute.py
+++ b/sktime/transformations/series/impute.py
@@ -9,14 +9,13 @@
import numpy as np
import pandas as pd
-
from sklearn.base import clone
from sklearn.utils import check_random_state
+from sktime.forecasting.base import ForecastingHorizon
+from sktime.forecasting.trend import PolynomialTrendForecaster
from sktime.transformations.base import _SeriesToSeriesTransformer
from sktime.utils.validation.series import check_series
-from sktime.forecasting.trend import PolynomialTrendForecaster
-from sktime.forecasting.base import ForecastingHorizon
class Imputer(_SeriesToSeriesTransformer):
@@ -111,7 +110,7 @@
if not _has_missing_values(Z):
return Z
- elif self.method == "random":
+ if self.method == "random":
if isinstance(Z, pd.DataFrame):
for col in Z:
Z[col] = Z[col].apply(
@@ -203,15 +202,16 @@
series = [Z[column] for column in Z]
for z in series:
- # define fh based on index of missing values
- na_index = z.index[z.isna()]
- fh = ForecastingHorizon(values=na_index, is_relative=False)
+ if _has_missing_values(z):
+ # define fh based on index of missing values
+ na_index = z.index[z.isna()]
+ fh = ForecastingHorizon(values=na_index, is_relative=False)
- # fill NaN before fitting with ffill and backfill (heuristic)
- forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
+ # fill NaN before fitting with ffill and backfill (heuristic)
+ forecaster.fit(y=z.fillna(method="ffill").fillna(method="backfill"), fh=fh)
- # replace missing values with predicted values
- z[na_index] = forecaster.predict()
+ # replace missing values with predicted values
+ z[na_index] = forecaster.predict()
return Z
|
{"golden_diff": "diff --git a/sktime/transformations/series/impute.py b/sktime/transformations/series/impute.py\n--- a/sktime/transformations/series/impute.py\n+++ b/sktime/transformations/series/impute.py\n@@ -9,14 +9,13 @@\n \n import numpy as np\n import pandas as pd\n-\n from sklearn.base import clone\n from sklearn.utils import check_random_state\n \n+from sktime.forecasting.base import ForecastingHorizon\n+from sktime.forecasting.trend import PolynomialTrendForecaster\n from sktime.transformations.base import _SeriesToSeriesTransformer\n from sktime.utils.validation.series import check_series\n-from sktime.forecasting.trend import PolynomialTrendForecaster\n-from sktime.forecasting.base import ForecastingHorizon\n \n \n class Imputer(_SeriesToSeriesTransformer):\n@@ -111,7 +110,7 @@\n if not _has_missing_values(Z):\n return Z\n \n- elif self.method == \"random\":\n+ if self.method == \"random\":\n if isinstance(Z, pd.DataFrame):\n for col in Z:\n Z[col] = Z[col].apply(\n@@ -203,15 +202,16 @@\n series = [Z[column] for column in Z]\n \n for z in series:\n- # define fh based on index of missing values\n- na_index = z.index[z.isna()]\n- fh = ForecastingHorizon(values=na_index, is_relative=False)\n+ if _has_missing_values(z):\n+ # define fh based on index of missing values\n+ na_index = z.index[z.isna()]\n+ fh = ForecastingHorizon(values=na_index, is_relative=False)\n \n- # fill NaN before fitting with ffill and backfill (heuristic)\n- forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n+ # fill NaN before fitting with ffill and backfill (heuristic)\n+ forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n \n- # replace missing values with predicted values\n- z[na_index] = forecaster.predict()\n+ # replace missing values with predicted values\n+ z[na_index] = forecaster.predict()\n return Z\n", "issue": "[BUG] evaluate fails with Imputer\n**Describe the bug**\r\nCalling `evaluate` with a pipeline and imputer fails. \r\n\r\n**To Reproduce**\r\n```python\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sktime.transformations.series.impute import Imputer\r\nfrom sktime.forecasting.compose import ForecastingPipeline\r\nfrom sktime.forecasting.naive import NaiveForecaster\r\nfrom sktime.forecasting.model_evaluation import evaluate\r\nfrom sktime.forecasting.model_selection import SlidingWindowSplitter\r\n\r\ny = pd.Series(np.random.normal(size=100))\r\nX = pd.DataFrame(np.random.normal(size=(100, 2)))\r\nX.iloc[3, 1] = np.nan\r\n\r\ncv = SlidingWindowSplitter(fh=[1, 2, 3])\r\n\r\nforecaster = ForecastingPipeline([\r\n (\"impute\", Imputer()),\r\n (\"forecast\", NaiveForecaster())\r\n])\r\n\r\nevaluate(forecaster, cv=cv, y=y, X=X)\r\n```\r\n\r\n**Expected behavior**\r\nNo error. \r\n\r\n**Versions**\r\n<details>\r\nSystem:\r\n python: 3.7.8 | packaged by conda-forge | (default, Jul 31 2020, 02:37:09) [Clang 10.0.1 ]\r\nexecutable: /Users/mloning/.conda/envs/sktime-dev/bin/python\r\n machine: Darwin-19.6.0-x86_64-i386-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2.4\r\n setuptools: 49.6.0.post20210108\r\n sklearn: 0.24.2\r\n sktime: 0.8.1\r\n statsmodels: 0.12.1\r\n numpy: 1.19.3\r\n scipy: 1.6.0\r\n Cython: 0.29.21\r\n pandas: 1.2.3\r\n matplotlib: 3.3.4\r\n joblib: 1.0.1\r\n numba: 0.53.1\r\n pmdarima: 1.8.2\r\n tsfresh: 0.17.0\r\n</details>\r\n\r\n<!-- Thanks for contributing! -->\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Utilities to impute series with missing values.\"\"\"\n\n__author__ = [\"Martin Walter\"]\n__all__ = [\"Imputer\"]\n\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.base import clone\nfrom sklearn.utils import check_random_state\n\nfrom sktime.transformations.base import _SeriesToSeriesTransformer\nfrom sktime.utils.validation.series import check_series\nfrom sktime.forecasting.trend import PolynomialTrendForecaster\nfrom sktime.forecasting.base import ForecastingHorizon\n\n\nclass Imputer(_SeriesToSeriesTransformer):\n \"\"\"Missing value imputation.\n\n The Imputer transforms input series by replacing missing values according\n to an imputation strategy specified by `method`.\n\n Parameters\n ----------\n method : str, default=\"drift\"\n Method to fill the missing values values.\n\n * \"drift\" : drift/trend values by sktime.PolynomialTrendForecaster()\n * \"linear\" : linear interpolation, by pd.Series.interpolate()\n * \"nearest\" : use nearest value, by pd.Series.interpolate()\n * \"constant\" : same constant value (given in arg value) for all NaN\n * \"mean\" : pd.Series.mean()\n * \"median\" : pd.Series.median()\n * \"backfill\" ot \"bfill\" : adapted from pd.Series.fillna()\n * \"pad\" or \"ffill\" : adapted from pd.Series.fillna()\n * \"random\" : random values between pd.Series.min() and .max()\n * \"forecaster\" : use an sktime Forecaster, given in arg forecaster\n\n missing_values : int/float/str, default=None\n The placeholder for the missing values. All occurrences of\n missing_values will be imputed. If None then np.nan is used.\n value : int/float, default=None\n Value to use to fill missing values when method=\"constant\".\n forecaster : Any Forecaster based on sktime.BaseForecaster, default=None\n Use a given Forecaster to impute by insample predictions when\n method=\"forecaster\". Before fitting, missing data is imputed with\n method=\"ffill\" or \"bfill\" as heuristic.\n random_state : int/float/str, optional\n Value to set random.seed() if method=\"random\", default None\n\n Examples\n --------\n >>> from sktime.transformations.series.impute import Imputer\n >>> from sktime.datasets import load_airline\n >>> y = load_airline()\n >>> transformer = Imputer(method=\"drift\")\n >>> y_hat = transformer.fit_transform(y)\n \"\"\"\n\n _tags = {\n \"fit-in-transform\": True,\n \"handles-missing-data\": True,\n \"skip-inverse-transform\": True,\n \"univariate-only\": False,\n }\n\n def __init__(\n self,\n method=\"drift\",\n random_state=None,\n value=None,\n forecaster=None,\n missing_values=None,\n ):\n\n self.method = method\n self.missing_values = missing_values\n self.value = value\n self.forecaster = forecaster\n self.random_state = random_state\n super(Imputer, self).__init__()\n\n def transform(self, Z, X=None):\n \"\"\"Transform data.\n\n Returns a transformed version of Z.\n\n Parameters\n ----------\n Z : pd.Series, pd.DataFrame\n\n Returns\n -------\n Z : pd.Series, pd.DataFrame\n Transformed time series(es).\n \"\"\"\n self.check_is_fitted()\n self._check_method()\n Z = check_series(Z)\n Z = Z.copy()\n\n # replace missing_values with np.nan\n if self.missing_values:\n Z = Z.replace(to_replace=self.missing_values, value=np.nan)\n\n if not _has_missing_values(Z):\n return Z\n\n elif self.method == \"random\":\n if isinstance(Z, pd.DataFrame):\n for col in Z:\n Z[col] = Z[col].apply(\n lambda i: self._get_random(Z[col]) if np.isnan(i) else i\n )\n else:\n Z = Z.apply(lambda i: self._get_random(Z) if np.isnan(i) else i)\n elif self.method == \"constant\":\n Z = Z.fillna(value=self.value)\n elif self.method in [\"backfill\", \"bfill\", \"pad\", \"ffill\"]:\n Z = Z.fillna(method=self.method)\n elif self.method == \"drift\":\n forecaster = PolynomialTrendForecaster(degree=1)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"forecaster\":\n forecaster = clone(self.forecaster)\n Z = _impute_with_forecaster(forecaster, Z)\n elif self.method == \"mean\":\n Z = Z.fillna(value=Z.mean())\n elif self.method == \"median\":\n Z = Z.fillna(value=Z.median())\n elif self.method in [\"nearest\", \"linear\"]:\n Z = Z.interpolate(method=self.method)\n else:\n raise ValueError(f\"`method`: {self.method} not available.\")\n # fill first/last elements of series,\n # as some methods (e.g. \"linear\") cant impute those\n Z = Z.fillna(method=\"ffill\").fillna(method=\"backfill\")\n return Z\n\n def _check_method(self):\n if (\n self.value is not None\n and self.method != \"constant\"\n or self.method == \"constant\"\n and self.value is None\n ):\n raise ValueError(\n \"\"\"Imputing with a value can only be\n used if method=\"constant\" and if parameter \"value\" is not None\"\"\"\n )\n elif (\n self.forecaster is not None\n and self.method != \"forecaster\"\n or self.method == \"forecaster\"\n and self.forecaster is None\n ):\n raise ValueError(\n \"\"\"Imputing with a forecaster can only be used if\n method=\\\"forecaster\\\" and if arg forecaster is not None\"\"\"\n )\n else:\n pass\n\n def _get_random(self, Z):\n \"\"\"Create a random int or float value.\n\n :param Z: Series\n :type Z: pd.Series\n :return: Random int or float between min and max of Z\n :rtype: int/float\n \"\"\"\n rng = check_random_state(self.random_state)\n # check if series contains only int or int-like values (e.g. 3.0)\n if (Z.dropna() % 1 == 0).all():\n return rng.randint(Z.min(), Z.max())\n else:\n return rng.uniform(Z.min(), Z.max())\n\n\ndef _impute_with_forecaster(forecaster, Z):\n \"\"\"Use a given forecaster for imputation by in-sample predictions.\n\n Parameters\n ----------\n forecaster: Forecaster\n Forecaster to use for imputation\n Z : pd.Series or pd.DataFrame\n Series to impute.\n\n Returns\n -------\n zt : pd.Series or pd.DataFrame\n Series with imputed values.\n \"\"\"\n if isinstance(Z, pd.Series):\n series = [Z]\n elif isinstance(Z, pd.DataFrame):\n series = [Z[column] for column in Z]\n\n for z in series:\n # define fh based on index of missing values\n na_index = z.index[z.isna()]\n fh = ForecastingHorizon(values=na_index, is_relative=False)\n\n # fill NaN before fitting with ffill and backfill (heuristic)\n forecaster.fit(y=z.fillna(method=\"ffill\").fillna(method=\"backfill\"), fh=fh)\n\n # replace missing values with predicted values\n z[na_index] = forecaster.predict()\n return Z\n\n\ndef _has_missing_values(Z):\n return Z.isnull().to_numpy().any()\n", "path": "sktime/transformations/series/impute.py"}]}
| 3,311 | 506 |
gh_patches_debug_31741
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-738
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support arbitrary sample_shape in Rejector
This will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.
</issue>
<code>
[start of pyro/distributions/rejector.py]
1 from __future__ import absolute_import, division, print_function
2
3 import torch
4 from pyro.distributions.distribution import Distribution
5 from pyro.distributions.score_parts import ScoreParts
6 from pyro.distributions.util import copy_docs_from
7
8
9 @copy_docs_from(Distribution)
10 class Rejector(Distribution):
11 """
12 Rejection sampled distribution given an acceptance rate function.
13
14 :param Distribution propose: A proposal distribution that samples batched
15 propsals via `propose()`.
16 :param callable log_prob_accept: A callable that inputs a batch of
17 proposals and returns a batch of log acceptance probabilities.
18 :param log_scale: Total log probability of acceptance.
19 """
20 stateful = True
21 reparameterized = True
22
23 def __init__(self, propose, log_prob_accept, log_scale):
24 self.propose = propose
25 self.log_prob_accept = log_prob_accept
26 self._log_scale = log_scale
27
28 # These LRU(1) caches allow work to be shared across different method calls.
29 self._log_prob_accept_cache = None, None
30 self._propose_batch_log_pdf_cache = None, None
31
32 def _log_prob_accept(self, x):
33 if x is not self._log_prob_accept_cache[0]:
34 self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale
35 return self._log_prob_accept_cache[1]
36
37 def _propose_batch_log_pdf(self, x):
38 if x is not self._propose_batch_log_pdf_cache[0]:
39 self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)
40 return self._propose_batch_log_pdf_cache[1]
41
42 def sample(self, sample_shape=torch.Size()):
43 if sample_shape:
44 raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
45 # Implements parallel batched accept-reject sampling.
46 x = self.propose()
47 log_prob_accept = self.log_prob_accept(x)
48 probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
49 done = torch.bernoulli(probs).byte()
50 while not done.all():
51 proposed_x = self.propose()
52 log_prob_accept = self.log_prob_accept(proposed_x)
53 prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
54 accept = torch.bernoulli(prob_accept).byte() & ~done
55 if accept.any():
56 x[accept] = proposed_x[accept]
57 done |= accept
58 return x
59
60 def log_prob(self, x):
61 return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)
62
63 def score_parts(self, x):
64 score_function = self._log_prob_accept(x)
65 log_pdf = self.log_prob(x)
66 return ScoreParts(log_pdf, score_function, log_pdf)
67
[end of pyro/distributions/rejector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py
--- a/pyro/distributions/rejector.py
+++ b/pyro/distributions/rejector.py
@@ -12,7 +12,8 @@
Rejection sampled distribution given an acceptance rate function.
:param Distribution propose: A proposal distribution that samples batched
- propsals via `propose()`.
+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``
+ arg only if ``propose()`` supports a ``sample_shape`` arg.
:param callable log_prob_accept: A callable that inputs a batch of
proposals and returns a batch of log acceptance probabilities.
:param log_scale: Total log probability of acceptance.
@@ -40,15 +41,13 @@
return self._propose_batch_log_pdf_cache[1]
def sample(self, sample_shape=torch.Size()):
- if sample_shape:
- raise ValueError("Arbitrary `sample_shape` not supported by Rejector class.")
# Implements parallel batched accept-reject sampling.
- x = self.propose()
+ x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(x)
probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
done = torch.bernoulli(probs).byte()
while not done.all():
- proposed_x = self.propose()
+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()
log_prob_accept = self.log_prob_accept(proposed_x)
prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
accept = torch.bernoulli(prob_accept).byte() & ~done
|
{"golden_diff": "diff --git a/pyro/distributions/rejector.py b/pyro/distributions/rejector.py\n--- a/pyro/distributions/rejector.py\n+++ b/pyro/distributions/rejector.py\n@@ -12,7 +12,8 @@\n Rejection sampled distribution given an acceptance rate function.\n \n :param Distribution propose: A proposal distribution that samples batched\n- propsals via `propose()`.\n+ proposals via ``propose()``. :meth:`sample` supports a ``sample_shape``\n+ arg only if ``propose()`` supports a ``sample_shape`` arg.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n@@ -40,15 +41,13 @@\n return self._propose_batch_log_pdf_cache[1]\n \n def sample(self, sample_shape=torch.Size()):\n- if sample_shape:\n- raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n- x = self.propose()\n+ x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n- proposed_x = self.propose()\n+ proposed_x = self.propose(sample_shape) if sample_shape else self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n", "issue": "Support arbitrary sample_shape in Rejector\nThis will require the `proposal` to accept an optional `sample_shape` arg and the `log_prob_accept()` method to support broadcasting.\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import copy_docs_from\n\n\n@copy_docs_from(Distribution)\nclass Rejector(Distribution):\n \"\"\"\n Rejection sampled distribution given an acceptance rate function.\n\n :param Distribution propose: A proposal distribution that samples batched\n propsals via `propose()`.\n :param callable log_prob_accept: A callable that inputs a batch of\n proposals and returns a batch of log acceptance probabilities.\n :param log_scale: Total log probability of acceptance.\n \"\"\"\n stateful = True\n reparameterized = True\n\n def __init__(self, propose, log_prob_accept, log_scale):\n self.propose = propose\n self.log_prob_accept = log_prob_accept\n self._log_scale = log_scale\n\n # These LRU(1) caches allow work to be shared across different method calls.\n self._log_prob_accept_cache = None, None\n self._propose_batch_log_pdf_cache = None, None\n\n def _log_prob_accept(self, x):\n if x is not self._log_prob_accept_cache[0]:\n self._log_prob_accept_cache = x, self.log_prob_accept(x) - self._log_scale\n return self._log_prob_accept_cache[1]\n\n def _propose_batch_log_pdf(self, x):\n if x is not self._propose_batch_log_pdf_cache[0]:\n self._propose_batch_log_pdf_cache = x, self.propose.log_prob(x)\n return self._propose_batch_log_pdf_cache[1]\n\n def sample(self, sample_shape=torch.Size()):\n if sample_shape:\n raise ValueError(\"Arbitrary `sample_shape` not supported by Rejector class.\")\n # Implements parallel batched accept-reject sampling.\n x = self.propose()\n log_prob_accept = self.log_prob_accept(x)\n probs = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n done = torch.bernoulli(probs).byte()\n while not done.all():\n proposed_x = self.propose()\n log_prob_accept = self.log_prob_accept(proposed_x)\n prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)\n accept = torch.bernoulli(prob_accept).byte() & ~done\n if accept.any():\n x[accept] = proposed_x[accept]\n done |= accept\n return x\n\n def log_prob(self, x):\n return self._propose_batch_log_pdf(x) + self._log_prob_accept(x)\n\n def score_parts(self, x):\n score_function = self._log_prob_accept(x)\n log_pdf = self.log_prob(x)\n return ScoreParts(log_pdf, score_function, log_pdf)\n", "path": "pyro/distributions/rejector.py"}]}
| 1,320 | 398 |
gh_patches_debug_15112
|
rasdani/github-patches
|
git_diff
|
PaddlePaddle__PaddleSpeech-1496
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[speechx] PaddleInference ds2
- [x] linear feat
- [x] thread pool #1400
- [x] queue for wav, frames, hiddens and so on.
- [x] decodeable interface
- [x] paddle infererence
- [x] ctcdecoder online (swig_decoder)
</issue>
<code>
[start of paddlespeech/s2t/io/sampler.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import math
15
16 import numpy as np
17 from paddle import distributed as dist
18 from paddle.io import BatchSampler
19 from paddle.io import DistributedBatchSampler
20
21 from paddlespeech.s2t.utils.log import Log
22
23 logger = Log(__name__).getlog()
24
25 __all__ = [
26 "SortagradDistributedBatchSampler",
27 "SortagradBatchSampler",
28 ]
29
30
31 def _batch_shuffle(indices, batch_size, epoch, clipped=False):
32 """Put similarly-sized instances into minibatches for better efficiency
33 and make a batch-wise shuffle.
34
35 1. Sort the audio clips by duration.
36 2. Generate a random number `k`, k in [0, batch_size).
37 3. Randomly shift `k` instances in order to create different batches
38 for different epochs. Create minibatches.
39 4. Shuffle the minibatches.
40
41 :param indices: indexes. List of int.
42 :type indices: list
43 :param batch_size: Batch size. This size is also used for generate
44 a random number for batch shuffle.
45 :type batch_size: int
46 :param clipped: Whether to clip the heading (small shift) and trailing
47 (incomplete batch) instances.
48 :type clipped: bool
49 :return: Batch shuffled mainifest.
50 :rtype: list
51 """
52 rng = np.random.RandomState(epoch)
53 shift_len = rng.randint(0, batch_size - 1)
54 batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))
55 rng.shuffle(batch_indices)
56 batch_indices = [item for batch in batch_indices for item in batch]
57 assert clipped is False
58 if not clipped:
59 res_len = len(indices) - shift_len - len(batch_indices)
60 # when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])
61 if res_len != 0:
62 batch_indices.extend(indices[-res_len:])
63 batch_indices.extend(indices[0:shift_len])
64 assert len(indices) == len(
65 batch_indices
66 ), f"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}"
67 return batch_indices
68
69
70 class SortagradDistributedBatchSampler(DistributedBatchSampler):
71 def __init__(self,
72 dataset,
73 batch_size,
74 num_replicas=None,
75 rank=None,
76 shuffle=False,
77 drop_last=False,
78 sortagrad=False,
79 shuffle_method="batch_shuffle"):
80 """Sortagrad Sampler for multi gpus.
81
82 Args:
83 dataset (paddle.io.Dataset):
84 batch_size (int): batch size for one gpu
85 num_replicas (int, optional): world size or numbers of gpus. Defaults to None.
86 rank (int, optional): rank id. Defaults to None.
87 shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
88 drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
89 sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
90 shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
91 """
92 super().__init__(dataset, batch_size, num_replicas, rank, shuffle,
93 drop_last)
94 self._sortagrad = sortagrad
95 self._shuffle_method = shuffle_method
96
97 def __iter__(self):
98 num_samples = len(self.dataset)
99 indices = np.arange(num_samples).tolist()
100 indices += indices[:(self.total_size - len(indices))]
101 assert len(indices) == self.total_size
102
103 # sort (by duration) or batch-wise shuffle the manifest
104 if self.shuffle:
105 if self.epoch == 0 and self._sortagrad:
106 logger.info(
107 f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'
108 )
109 else:
110 logger.info(
111 f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'
112 )
113 if self._shuffle_method == "batch_shuffle":
114 # using `batch_size * nrank`, or will cause instability loss and nan or inf grad,
115 # since diff batch examlpe length in batches case instability loss in diff rank,
116 # e.g. rank0 maxlength 20, rank3 maxlength 1000
117 indices = _batch_shuffle(
118 indices,
119 self.batch_size * self.nranks,
120 self.epoch,
121 clipped=False)
122 elif self._shuffle_method == "instance_shuffle":
123 np.random.RandomState(self.epoch).shuffle(indices)
124 else:
125 raise ValueError("Unknown shuffle method %s." %
126 self._shuffle_method)
127 assert len(
128 indices
129 ) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
130
131 # slice `self.batch_size` examples by rank id
132 def _get_indices_by_batch_size(indices):
133 subsampled_indices = []
134 last_batch_size = self.total_size % (self.batch_size * self.nranks)
135 assert last_batch_size % self.nranks == 0
136 last_local_batch_size = last_batch_size // self.nranks
137
138 for i in range(self.local_rank * self.batch_size,
139 len(indices) - last_batch_size,
140 self.batch_size * self.nranks):
141 subsampled_indices.extend(indices[i:i + self.batch_size])
142
143 indices = indices[len(indices) - last_batch_size:]
144 subsampled_indices.extend(
145 indices[self.local_rank * last_local_batch_size:(
146 self.local_rank + 1) * last_local_batch_size])
147 return subsampled_indices
148
149 if self.nranks > 1:
150 indices = _get_indices_by_batch_size(indices)
151
152 assert len(indices) == self.num_samples
153 _sample_iter = iter(indices)
154
155 batch_indices = []
156 for idx in _sample_iter:
157 batch_indices.append(idx)
158 if len(batch_indices) == self.batch_size:
159 logger.debug(
160 f"rank: {dist.get_rank()} batch index: {batch_indices} ")
161 yield batch_indices
162 batch_indices = []
163 if not self.drop_last and len(batch_indices) > 0:
164 yield batch_indices
165
166 def __len__(self):
167 num_samples = self.num_samples
168 num_samples += int(not self.drop_last) * (self.batch_size - 1)
169 return num_samples // self.batch_size
170
171
172 class SortagradBatchSampler(BatchSampler):
173 def __init__(self,
174 dataset,
175 batch_size,
176 shuffle=False,
177 drop_last=False,
178 sortagrad=False,
179 shuffle_method="batch_shuffle"):
180 """Sortagrad Sampler for one gpu.
181
182 Args:
183 dataset (paddle.io.Dataset):
184 batch_size (int): batch size for one gpu
185 shuffle (bool, optional): True for do shuffle, or else. Defaults to False.
186 drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.
187 sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.
188 shuffle_method (str, optional): shuffle method, "instance_shuffle" or "batch_shuffle". Defaults to "batch_shuffle".
189 """
190 self.dataset = dataset
191
192 assert isinstance(batch_size, int) and batch_size > 0, \
193 "batch_size should be a positive integer"
194 self.batch_size = batch_size
195 assert isinstance(shuffle, bool), \
196 "shuffle should be a boolean value"
197 self.shuffle = shuffle
198 assert isinstance(drop_last, bool), \
199 "drop_last should be a boolean number"
200
201 self.drop_last = drop_last
202 self.epoch = 0
203 self.num_samples = int(math.ceil(len(self.dataset) * 1.0))
204 self.total_size = self.num_samples
205 self._sortagrad = sortagrad
206 self._shuffle_method = shuffle_method
207
208 def __iter__(self):
209 num_samples = len(self.dataset)
210 indices = np.arange(num_samples).tolist()
211 indices += indices[:(self.total_size - len(indices))]
212 assert len(indices) == self.total_size
213
214 # sort (by duration) or batch-wise shuffle the manifest
215 if self.shuffle:
216 if self.epoch == 0 and self._sortagrad:
217 logger.info(f'dataset sortagrad! epoch {self.epoch}')
218 else:
219 logger.info(f'dataset shuffle! epoch {self.epoch}')
220 if self._shuffle_method == "batch_shuffle":
221 indices = _batch_shuffle(
222 indices, self.batch_size, self.epoch, clipped=False)
223 elif self._shuffle_method == "instance_shuffle":
224 np.random.RandomState(self.epoch).shuffle(indices)
225 else:
226 raise ValueError("Unknown shuffle method %s." %
227 self._shuffle_method)
228 assert len(
229 indices
230 ) == self.total_size, f"batch shuffle examples error: {len(indices)} : {self.total_size}"
231
232 assert len(indices) == self.num_samples
233 _sample_iter = iter(indices)
234
235 batch_indices = []
236 for idx in _sample_iter:
237 batch_indices.append(idx)
238 if len(batch_indices) == self.batch_size:
239 logger.debug(
240 f"rank: {dist.get_rank()} batch index: {batch_indices} ")
241 yield batch_indices
242 batch_indices = []
243 if not self.drop_last and len(batch_indices) > 0:
244 yield batch_indices
245
246 self.epoch += 1
247
248 def __len__(self):
249 num_samples = self.num_samples
250 num_samples += int(not self.drop_last) * (self.batch_size - 1)
251 return num_samples // self.batch_size
252
[end of paddlespeech/s2t/io/sampler.py]
[start of paddlespeech/t2s/modules/transformer/repeat.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # Modified from espnet(https://github.com/espnet/espnet)
15 """Repeat the same layer definition."""
16 import paddle
17
18
19 class MultiSequential(paddle.nn.Sequential):
20 """Multi-input multi-output paddle.nn.Sequential."""
21
22 def forward(self, *args):
23 """Repeat."""
24 for m in self:
25 args = m(*args)
26 return args
27
28
29 def repeat(N, fn):
30 """Repeat module N times.
31
32 Args:
33 N (int): Number of repeat time.
34 fn (Callable): Function to generate module.
35
36 Returns:
37 MultiSequential: Repeated model instance.
38 """
39 return MultiSequential(*[fn(n) for n in range(N)])
40
[end of paddlespeech/t2s/modules/transformer/repeat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py
--- a/paddlespeech/s2t/io/sampler.py
+++ b/paddlespeech/s2t/io/sampler.py
@@ -51,7 +51,7 @@
"""
rng = np.random.RandomState(epoch)
shift_len = rng.randint(0, batch_size - 1)
- batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))
+ batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))
rng.shuffle(batch_indices)
batch_indices = [item for batch in batch_indices for item in batch]
assert clipped is False
diff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py
--- a/paddlespeech/t2s/modules/transformer/repeat.py
+++ b/paddlespeech/t2s/modules/transformer/repeat.py
@@ -36,4 +36,4 @@
Returns:
MultiSequential: Repeated model instance.
"""
- return MultiSequential(*[fn(n) for n in range(N)])
+ return MultiSequential(* [fn(n) for n in range(N)])
|
{"golden_diff": "diff --git a/paddlespeech/s2t/io/sampler.py b/paddlespeech/s2t/io/sampler.py\n--- a/paddlespeech/s2t/io/sampler.py\n+++ b/paddlespeech/s2t/io/sampler.py\n@@ -51,7 +51,7 @@\n \"\"\"\n rng = np.random.RandomState(epoch)\n shift_len = rng.randint(0, batch_size - 1)\n- batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))\n+ batch_indices = list(zip(* [iter(indices[shift_len:])] * batch_size))\n rng.shuffle(batch_indices)\n batch_indices = [item for batch in batch_indices for item in batch]\n assert clipped is False\ndiff --git a/paddlespeech/t2s/modules/transformer/repeat.py b/paddlespeech/t2s/modules/transformer/repeat.py\n--- a/paddlespeech/t2s/modules/transformer/repeat.py\n+++ b/paddlespeech/t2s/modules/transformer/repeat.py\n@@ -36,4 +36,4 @@\n Returns:\n MultiSequential: Repeated model instance.\n \"\"\"\n- return MultiSequential(*[fn(n) for n in range(N)])\n+ return MultiSequential(* [fn(n) for n in range(N)])\n", "issue": "[speechx] PaddleInference ds2\n- [x] linear feat \r\n- [x] thread pool #1400 \r\n- [x] queue for wav, frames, hiddens and so on.\r\n- [x] decodeable interface\r\n- [x] paddle infererence\r\n- [x] ctcdecoder online (swig_decoder)\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\n\nimport numpy as np\nfrom paddle import distributed as dist\nfrom paddle.io import BatchSampler\nfrom paddle.io import DistributedBatchSampler\n\nfrom paddlespeech.s2t.utils.log import Log\n\nlogger = Log(__name__).getlog()\n\n__all__ = [\n \"SortagradDistributedBatchSampler\",\n \"SortagradBatchSampler\",\n]\n\n\ndef _batch_shuffle(indices, batch_size, epoch, clipped=False):\n \"\"\"Put similarly-sized instances into minibatches for better efficiency\n and make a batch-wise shuffle.\n\n 1. Sort the audio clips by duration.\n 2. Generate a random number `k`, k in [0, batch_size).\n 3. Randomly shift `k` instances in order to create different batches\n for different epochs. Create minibatches.\n 4. Shuffle the minibatches.\n\n :param indices: indexes. List of int.\n :type indices: list\n :param batch_size: Batch size. This size is also used for generate\n a random number for batch shuffle.\n :type batch_size: int\n :param clipped: Whether to clip the heading (small shift) and trailing\n (incomplete batch) instances.\n :type clipped: bool\n :return: Batch shuffled mainifest.\n :rtype: list\n \"\"\"\n rng = np.random.RandomState(epoch)\n shift_len = rng.randint(0, batch_size - 1)\n batch_indices = list(zip(*[iter(indices[shift_len:])] * batch_size))\n rng.shuffle(batch_indices)\n batch_indices = [item for batch in batch_indices for item in batch]\n assert clipped is False\n if not clipped:\n res_len = len(indices) - shift_len - len(batch_indices)\n # when res_len is 0, will return whole list, len(List[-0:]) = len(List[:])\n if res_len != 0:\n batch_indices.extend(indices[-res_len:])\n batch_indices.extend(indices[0:shift_len])\n assert len(indices) == len(\n batch_indices\n ), f\"_batch_shuffle: {len(indices)} : {len(batch_indices)} : {res_len} - {shift_len}\"\n return batch_indices\n\n\nclass SortagradDistributedBatchSampler(DistributedBatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n num_replicas=None,\n rank=None,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for multi gpus.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n num_replicas (int, optional): world size or numbers of gpus. Defaults to None.\n rank (int, optional): rank id. Defaults to None.\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n super().__init__(dataset, batch_size, num_replicas, rank, shuffle,\n drop_last)\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(\n f'rank: {dist.get_rank()} dataset sortagrad! epoch {self.epoch}'\n )\n else:\n logger.info(\n f'rank: {dist.get_rank()} dataset shuffle! epoch {self.epoch}'\n )\n if self._shuffle_method == \"batch_shuffle\":\n # using `batch_size * nrank`, or will cause instability loss and nan or inf grad, \n # since diff batch examlpe length in batches case instability loss in diff rank, \n # e.g. rank0 maxlength 20, rank3 maxlength 1000\n indices = _batch_shuffle(\n indices,\n self.batch_size * self.nranks,\n self.epoch,\n clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n # slice `self.batch_size` examples by rank id\n def _get_indices_by_batch_size(indices):\n subsampled_indices = []\n last_batch_size = self.total_size % (self.batch_size * self.nranks)\n assert last_batch_size % self.nranks == 0\n last_local_batch_size = last_batch_size // self.nranks\n\n for i in range(self.local_rank * self.batch_size,\n len(indices) - last_batch_size,\n self.batch_size * self.nranks):\n subsampled_indices.extend(indices[i:i + self.batch_size])\n\n indices = indices[len(indices) - last_batch_size:]\n subsampled_indices.extend(\n indices[self.local_rank * last_local_batch_size:(\n self.local_rank + 1) * last_local_batch_size])\n return subsampled_indices\n\n if self.nranks > 1:\n indices = _get_indices_by_batch_size(indices)\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n\n\nclass SortagradBatchSampler(BatchSampler):\n def __init__(self,\n dataset,\n batch_size,\n shuffle=False,\n drop_last=False,\n sortagrad=False,\n shuffle_method=\"batch_shuffle\"):\n \"\"\"Sortagrad Sampler for one gpu.\n\n Args:\n dataset (paddle.io.Dataset): \n batch_size (int): batch size for one gpu\n shuffle (bool, optional): True for do shuffle, or else. Defaults to False.\n drop_last (bool, optional): whether drop last batch which is less than batch size. Defaults to False.\n sortagrad (bool, optional): True, do sortgrad in first epoch, then shuffle as usual; or else. Defaults to False.\n shuffle_method (str, optional): shuffle method, \"instance_shuffle\" or \"batch_shuffle\". Defaults to \"batch_shuffle\".\n \"\"\"\n self.dataset = dataset\n\n assert isinstance(batch_size, int) and batch_size > 0, \\\n \"batch_size should be a positive integer\"\n self.batch_size = batch_size\n assert isinstance(shuffle, bool), \\\n \"shuffle should be a boolean value\"\n self.shuffle = shuffle\n assert isinstance(drop_last, bool), \\\n \"drop_last should be a boolean number\"\n\n self.drop_last = drop_last\n self.epoch = 0\n self.num_samples = int(math.ceil(len(self.dataset) * 1.0))\n self.total_size = self.num_samples\n self._sortagrad = sortagrad\n self._shuffle_method = shuffle_method\n\n def __iter__(self):\n num_samples = len(self.dataset)\n indices = np.arange(num_samples).tolist()\n indices += indices[:(self.total_size - len(indices))]\n assert len(indices) == self.total_size\n\n # sort (by duration) or batch-wise shuffle the manifest\n if self.shuffle:\n if self.epoch == 0 and self._sortagrad:\n logger.info(f'dataset sortagrad! epoch {self.epoch}')\n else:\n logger.info(f'dataset shuffle! epoch {self.epoch}')\n if self._shuffle_method == \"batch_shuffle\":\n indices = _batch_shuffle(\n indices, self.batch_size, self.epoch, clipped=False)\n elif self._shuffle_method == \"instance_shuffle\":\n np.random.RandomState(self.epoch).shuffle(indices)\n else:\n raise ValueError(\"Unknown shuffle method %s.\" %\n self._shuffle_method)\n assert len(\n indices\n ) == self.total_size, f\"batch shuffle examples error: {len(indices)} : {self.total_size}\"\n\n assert len(indices) == self.num_samples\n _sample_iter = iter(indices)\n\n batch_indices = []\n for idx in _sample_iter:\n batch_indices.append(idx)\n if len(batch_indices) == self.batch_size:\n logger.debug(\n f\"rank: {dist.get_rank()} batch index: {batch_indices} \")\n yield batch_indices\n batch_indices = []\n if not self.drop_last and len(batch_indices) > 0:\n yield batch_indices\n\n self.epoch += 1\n\n def __len__(self):\n num_samples = self.num_samples\n num_samples += int(not self.drop_last) * (self.batch_size - 1)\n return num_samples // self.batch_size\n", "path": "paddlespeech/s2t/io/sampler.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# Modified from espnet(https://github.com/espnet/espnet)\n\"\"\"Repeat the same layer definition.\"\"\"\nimport paddle\n\n\nclass MultiSequential(paddle.nn.Sequential):\n \"\"\"Multi-input multi-output paddle.nn.Sequential.\"\"\"\n\n def forward(self, *args):\n \"\"\"Repeat.\"\"\"\n for m in self:\n args = m(*args)\n return args\n\n\ndef repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])\n", "path": "paddlespeech/t2s/modules/transformer/repeat.py"}]}
| 3,895 | 288 |
gh_patches_debug_15538
|
rasdani/github-patches
|
git_diff
|
Lightning-Universe__lightning-flash-438
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
show_predict_batch fails due to no targets being available
## 🐛 Bug
When calling the `show_predict_batch` method for image classification data an error is thrown due to not targets being available.
```python
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
_img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
elif isinstance(data, dict):
_img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
_img: np.ndarray = self._to_numpy(_img)
if isinstance(_label, torch.Tensor):
_label = _label.squeeze().tolist()
# show image and set label as subplot title
ax.imshow(_img)
ax.set_title(str(_label))
ax.axis('off')
```
The fix should be simple:
```python
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
# use the get method to return an empty string if no targets are available
_img, _label = data[i][DefaultDataKeys.INPUT], data[i].get([DefaultDataKeys.TARGET], "")
elif isinstance(data, dict):
# use the get method to return a list that contains an empty string if no targets are available
_img, _label = data[DefaultDataKeys.INPUT][i], data.get([DefaultDataKeys.TARGET], [""])[i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
_img: np.ndarray = self._to_numpy(_img)
if isinstance(_label, torch.Tensor):
_label = _label.squeeze().tolist()
# show image and set label as subplot title
ax.imshow(_img)
ax.set_title(str(_label))
ax.axis('off')
```
I can create a PR later, when I have time.
### To Reproduce
Just have flash installed.
#### Code sample
```python
from flash.core.data.utils import download_data
from flash.image import ImageClassificationData
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
datamodule = ImageClassificationData.from_folders(
train_folder="data/hymenoptera_data/train/",
val_folder="data/hymenoptera_data/val/",
test_folder="data/hymenoptera_data/test/",
predict_folder="data/hymenoptera_data/predict/"
)
datamodule.show_predict_batch()
```
This will give the following error message:
```python
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-4-ff8f29471c71> in <module>
----> 1 datamodule.show_predict_batch()
~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in show_predict_batch(self, hooks_names, reset)
225 """This function is used to visualize a batch from the predict dataloader."""
226 stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]
--> 227 self._show_batch(stage_name, hooks_names, reset=reset)
228
229 @staticmethod
~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in _show_batch(self, stage, func_names, reset)
203 _ = next(iter_dataloader)
204 data_fetcher: BaseVisualization = self.data_fetcher
--> 205 data_fetcher._show(stage, func_names)
206 if reset:
207 self.data_fetcher.batches[stage] = {}
~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in _show(self, running_stage, func_names_list)
110
111 def _show(self, running_stage: RunningStage, func_names_list: List[str]) -> None:
--> 112 self.show(self.batches[running_stage], running_stage, func_names_list)
113
114 def show(self, batch: Dict[str, Any], running_stage: RunningStage, func_names_list: List[str]) -> None:
~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in show(self, batch, running_stage, func_names_list)
124 hook_name = f"show_{func_name}"
125 if _is_overriden(hook_name, self, BaseVisualization):
--> 126 getattr(self, hook_name)(batch[func_name], running_stage)
127
128 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in show_load_sample(self, samples, running_stage)
144 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
145 win_title: str = f"{running_stage} - show_load_sample"
--> 146 self._show_images_and_labels(samples, len(samples), win_title)
147
148 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in _show_images_and_labels(self, data, num_samples, title)
127 # unpack images and labels
128 if isinstance(data, list):
--> 129 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
130 elif isinstance(data, dict):
131 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
KeyError: <DefaultDataKeys.TARGET: 'target'>
```
### Expected behavior
The batch should be shown without labels.
</issue>
<code>
[start of flash/image/classification/data.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Any, Callable, Dict, List, Optional, Tuple, Union
15
16 import numpy as np
17 import torch
18 from pytorch_lightning.trainer.states import RunningStage
19
20 from flash.core.data.base_viz import BaseVisualization # for viz
21 from flash.core.data.callback import BaseDataFetcher
22 from flash.core.data.data_module import DataModule
23 from flash.core.data.data_source import DefaultDataKeys, DefaultDataSources
24 from flash.core.data.process import Deserializer, Preprocess
25 from flash.core.utilities.imports import _MATPLOTLIB_AVAILABLE, _PIL_AVAILABLE, _requires_extras
26 from flash.image.classification.transforms import default_transforms, train_default_transforms
27 from flash.image.data import (
28 ImageDeserializer,
29 ImageFiftyOneDataSource,
30 ImageNumpyDataSource,
31 ImagePathsDataSource,
32 ImageTensorDataSource,
33 )
34
35 if _MATPLOTLIB_AVAILABLE:
36 import matplotlib.pyplot as plt
37 else:
38 plt = None
39
40 if _PIL_AVAILABLE:
41 from PIL import Image
42 else:
43
44 class Image:
45 Image = None
46
47
48 class ImageClassificationPreprocess(Preprocess):
49
50 def __init__(
51 self,
52 train_transform: Optional[Dict[str, Callable]] = None,
53 val_transform: Optional[Dict[str, Callable]] = None,
54 test_transform: Optional[Dict[str, Callable]] = None,
55 predict_transform: Optional[Dict[str, Callable]] = None,
56 image_size: Tuple[int, int] = (196, 196),
57 deserializer: Optional[Deserializer] = None,
58 **data_source_kwargs: Any,
59 ):
60 self.image_size = image_size
61
62 super().__init__(
63 train_transform=train_transform,
64 val_transform=val_transform,
65 test_transform=test_transform,
66 predict_transform=predict_transform,
67 data_sources={
68 DefaultDataSources.FIFTYONE: ImageFiftyOneDataSource(**data_source_kwargs),
69 DefaultDataSources.FILES: ImagePathsDataSource(),
70 DefaultDataSources.FOLDERS: ImagePathsDataSource(),
71 DefaultDataSources.NUMPY: ImageNumpyDataSource(),
72 DefaultDataSources.TENSORS: ImageTensorDataSource(),
73 },
74 deserializer=deserializer or ImageDeserializer(),
75 default_data_source=DefaultDataSources.FILES,
76 )
77
78 def get_state_dict(self) -> Dict[str, Any]:
79 return {**self.transforms, "image_size": self.image_size}
80
81 @classmethod
82 def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):
83 return cls(**state_dict)
84
85 def default_transforms(self) -> Optional[Dict[str, Callable]]:
86 return default_transforms(self.image_size)
87
88 def train_default_transforms(self) -> Optional[Dict[str, Callable]]:
89 return train_default_transforms(self.image_size)
90
91
92 class ImageClassificationData(DataModule):
93 """Data module for image classification tasks."""
94
95 preprocess_cls = ImageClassificationPreprocess
96
97 def set_block_viz_window(self, value: bool) -> None:
98 """Setter method to switch on/off matplotlib to pop up windows."""
99 self.data_fetcher.block_viz_window = value
100
101 @staticmethod
102 def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:
103 return MatplotlibVisualization(*args, **kwargs)
104
105
106 class MatplotlibVisualization(BaseVisualization):
107 """Process and show the image batch and its associated label using matplotlib.
108 """
109 max_cols: int = 4 # maximum number of columns we accept
110 block_viz_window: bool = True # parameter to allow user to block visualisation windows
111
112 @staticmethod
113 @_requires_extras("image")
114 def _to_numpy(img: Union[torch.Tensor, Image.Image]) -> np.ndarray:
115 out: np.ndarray
116 if isinstance(img, Image.Image):
117 out = np.array(img)
118 elif isinstance(img, torch.Tensor):
119 out = img.squeeze(0).permute(1, 2, 0).cpu().numpy()
120 else:
121 raise TypeError(f"Unknown image type. Got: {type(img)}.")
122 return out
123
124 @_requires_extras("image")
125 def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str):
126 # define the image grid
127 cols: int = min(num_samples, self.max_cols)
128 rows: int = num_samples // cols
129
130 # create figure and set title
131 fig, axs = plt.subplots(rows, cols)
132 fig.suptitle(title)
133
134 for i, ax in enumerate(axs.ravel()):
135 # unpack images and labels
136 if isinstance(data, list):
137 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
138 elif isinstance(data, dict):
139 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
140 else:
141 raise TypeError(f"Unknown data type. Got: {type(data)}.")
142 # convert images to numpy
143 _img: np.ndarray = self._to_numpy(_img)
144 if isinstance(_label, torch.Tensor):
145 _label = _label.squeeze().tolist()
146 # show image and set label as subplot title
147 ax.imshow(_img)
148 ax.set_title(str(_label))
149 ax.axis('off')
150 plt.show(block=self.block_viz_window)
151
152 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):
153 win_title: str = f"{running_stage} - show_load_sample"
154 self._show_images_and_labels(samples, len(samples), win_title)
155
156 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
157 win_title: str = f"{running_stage} - show_pre_tensor_transform"
158 self._show_images_and_labels(samples, len(samples), win_title)
159
160 def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
161 win_title: str = f"{running_stage} - show_to_tensor_transform"
162 self._show_images_and_labels(samples, len(samples), win_title)
163
164 def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):
165 win_title: str = f"{running_stage} - show_post_tensor_transform"
166 self._show_images_and_labels(samples, len(samples), win_title)
167
168 def show_per_batch_transform(self, batch: List[Any], running_stage):
169 win_title: str = f"{running_stage} - show_per_batch_transform"
170 self._show_images_and_labels(batch[0], batch[0][DefaultDataKeys.INPUT].shape[0], win_title)
171
[end of flash/image/classification/data.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/flash/image/classification/data.py b/flash/image/classification/data.py
--- a/flash/image/classification/data.py
+++ b/flash/image/classification/data.py
@@ -134,9 +134,9 @@
for i, ax in enumerate(axs.ravel()):
# unpack images and labels
if isinstance(data, list):
- _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]
+ _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get(DefaultDataKeys.TARGET, "")
elif isinstance(data, dict):
- _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]
+ _img, _label = data[DefaultDataKeys.INPUT][i], data.get(DefaultDataKeys.TARGET, [""] * (i + 1))[i]
else:
raise TypeError(f"Unknown data type. Got: {type(data)}.")
# convert images to numpy
|
{"golden_diff": "diff --git a/flash/image/classification/data.py b/flash/image/classification/data.py\n--- a/flash/image/classification/data.py\n+++ b/flash/image/classification/data.py\n@@ -134,9 +134,9 @@\n for i, ax in enumerate(axs.ravel()):\n # unpack images and labels\n if isinstance(data, list):\n- _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\n+ _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get(DefaultDataKeys.TARGET, \"\")\n elif isinstance(data, dict):\n- _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\n+ _img, _label = data[DefaultDataKeys.INPUT][i], data.get(DefaultDataKeys.TARGET, [\"\"] * (i + 1))[i]\n else:\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\n # convert images to numpy\n", "issue": "show_predict_batch fails due to no targets being available\n## \ud83d\udc1b Bug\r\n\r\nWhen calling the `show_predict_batch` method for image classification data an error is thrown due to not targets being available.\r\n\r\n```python\r\nfor i, ax in enumerate(axs.ravel()):\r\n # unpack images and labels\r\n if isinstance(data, list):\r\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\r\n elif isinstance(data, dict):\r\n _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\r\n else:\r\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\r\n # convert images to numpy\r\n _img: np.ndarray = self._to_numpy(_img)\r\n if isinstance(_label, torch.Tensor):\r\n _label = _label.squeeze().tolist()\r\n # show image and set label as subplot title\r\n ax.imshow(_img)\r\n ax.set_title(str(_label))\r\n ax.axis('off')\r\n```\r\n\r\nThe fix should be simple:\r\n\r\n```python\r\nfor i, ax in enumerate(axs.ravel()):\r\n # unpack images and labels\r\n if isinstance(data, list):\r\n # use the get method to return an empty string if no targets are available\r\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i].get([DefaultDataKeys.TARGET], \"\")\r\n elif isinstance(data, dict):\r\n # use the get method to return a list that contains an empty string if no targets are available\r\n _img, _label = data[DefaultDataKeys.INPUT][i], data.get([DefaultDataKeys.TARGET], [\"\"])[i]\r\n else:\r\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\r\n # convert images to numpy\r\n _img: np.ndarray = self._to_numpy(_img)\r\n if isinstance(_label, torch.Tensor):\r\n _label = _label.squeeze().tolist()\r\n # show image and set label as subplot title\r\n ax.imshow(_img)\r\n ax.set_title(str(_label))\r\n ax.axis('off')\r\n```\r\n\r\nI can create a PR later, when I have time.\r\n\r\n### To Reproduce\r\n\r\nJust have flash installed.\r\n\r\n\r\n#### Code sample\r\n```python\r\nfrom flash.core.data.utils import download_data\r\nfrom flash.image import ImageClassificationData\r\n\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n\r\ndatamodule = ImageClassificationData.from_folders(\r\n train_folder=\"data/hymenoptera_data/train/\",\r\n val_folder=\"data/hymenoptera_data/val/\",\r\n test_folder=\"data/hymenoptera_data/test/\",\r\n predict_folder=\"data/hymenoptera_data/predict/\"\r\n)\r\n\r\ndatamodule.show_predict_batch()\r\n```\r\n\r\nThis will give the following error message:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-4-ff8f29471c71> in <module>\r\n----> 1 datamodule.show_predict_batch()\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in show_predict_batch(self, hooks_names, reset)\r\n 225 \"\"\"This function is used to visualize a batch from the predict dataloader.\"\"\"\r\n 226 stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]\r\n--> 227 self._show_batch(stage_name, hooks_names, reset=reset)\r\n 228 \r\n 229 @staticmethod\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/data_module.py in _show_batch(self, stage, func_names, reset)\r\n 203 _ = next(iter_dataloader)\r\n 204 data_fetcher: BaseVisualization = self.data_fetcher\r\n--> 205 data_fetcher._show(stage, func_names)\r\n 206 if reset:\r\n 207 self.data_fetcher.batches[stage] = {}\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in _show(self, running_stage, func_names_list)\r\n 110 \r\n 111 def _show(self, running_stage: RunningStage, func_names_list: List[str]) -> None:\r\n--> 112 self.show(self.batches[running_stage], running_stage, func_names_list)\r\n 113 \r\n 114 def show(self, batch: Dict[str, Any], running_stage: RunningStage, func_names_list: List[str]) -> None:\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/core/data/base_viz.py in show(self, batch, running_stage, func_names_list)\r\n 124 hook_name = f\"show_{func_name}\"\r\n 125 if _is_overriden(hook_name, self, BaseVisualization):\r\n--> 126 getattr(self, hook_name)(batch[func_name], running_stage)\r\n 127 \r\n 128 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in show_load_sample(self, samples, running_stage)\r\n 144 def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\r\n 145 win_title: str = f\"{running_stage} - show_load_sample\"\r\n--> 146 self._show_images_and_labels(samples, len(samples), win_title)\r\n 147 \r\n 148 def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\r\n\r\n~/anaconda3/lib/python3.8/site-packages/flash/image/classification/data.py in _show_images_and_labels(self, data, num_samples, title)\r\n 127 # unpack images and labels\r\n 128 if isinstance(data, list):\r\n--> 129 _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\r\n 130 elif isinstance(data, dict):\r\n 131 _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\r\n\r\nKeyError: <DefaultDataKeys.TARGET: 'target'>\r\n```\r\n### Expected behavior\r\n\r\nThe batch should be shown without labels.\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom pytorch_lightning.trainer.states import RunningStage\n\nfrom flash.core.data.base_viz import BaseVisualization # for viz\nfrom flash.core.data.callback import BaseDataFetcher\nfrom flash.core.data.data_module import DataModule\nfrom flash.core.data.data_source import DefaultDataKeys, DefaultDataSources\nfrom flash.core.data.process import Deserializer, Preprocess\nfrom flash.core.utilities.imports import _MATPLOTLIB_AVAILABLE, _PIL_AVAILABLE, _requires_extras\nfrom flash.image.classification.transforms import default_transforms, train_default_transforms\nfrom flash.image.data import (\n ImageDeserializer,\n ImageFiftyOneDataSource,\n ImageNumpyDataSource,\n ImagePathsDataSource,\n ImageTensorDataSource,\n)\n\nif _MATPLOTLIB_AVAILABLE:\n import matplotlib.pyplot as plt\nelse:\n plt = None\n\nif _PIL_AVAILABLE:\n from PIL import Image\nelse:\n\n class Image:\n Image = None\n\n\nclass ImageClassificationPreprocess(Preprocess):\n\n def __init__(\n self,\n train_transform: Optional[Dict[str, Callable]] = None,\n val_transform: Optional[Dict[str, Callable]] = None,\n test_transform: Optional[Dict[str, Callable]] = None,\n predict_transform: Optional[Dict[str, Callable]] = None,\n image_size: Tuple[int, int] = (196, 196),\n deserializer: Optional[Deserializer] = None,\n **data_source_kwargs: Any,\n ):\n self.image_size = image_size\n\n super().__init__(\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n predict_transform=predict_transform,\n data_sources={\n DefaultDataSources.FIFTYONE: ImageFiftyOneDataSource(**data_source_kwargs),\n DefaultDataSources.FILES: ImagePathsDataSource(),\n DefaultDataSources.FOLDERS: ImagePathsDataSource(),\n DefaultDataSources.NUMPY: ImageNumpyDataSource(),\n DefaultDataSources.TENSORS: ImageTensorDataSource(),\n },\n deserializer=deserializer or ImageDeserializer(),\n default_data_source=DefaultDataSources.FILES,\n )\n\n def get_state_dict(self) -> Dict[str, Any]:\n return {**self.transforms, \"image_size\": self.image_size}\n\n @classmethod\n def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool = False):\n return cls(**state_dict)\n\n def default_transforms(self) -> Optional[Dict[str, Callable]]:\n return default_transforms(self.image_size)\n\n def train_default_transforms(self) -> Optional[Dict[str, Callable]]:\n return train_default_transforms(self.image_size)\n\n\nclass ImageClassificationData(DataModule):\n \"\"\"Data module for image classification tasks.\"\"\"\n\n preprocess_cls = ImageClassificationPreprocess\n\n def set_block_viz_window(self, value: bool) -> None:\n \"\"\"Setter method to switch on/off matplotlib to pop up windows.\"\"\"\n self.data_fetcher.block_viz_window = value\n\n @staticmethod\n def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:\n return MatplotlibVisualization(*args, **kwargs)\n\n\nclass MatplotlibVisualization(BaseVisualization):\n \"\"\"Process and show the image batch and its associated label using matplotlib.\n \"\"\"\n max_cols: int = 4 # maximum number of columns we accept\n block_viz_window: bool = True # parameter to allow user to block visualisation windows\n\n @staticmethod\n @_requires_extras(\"image\")\n def _to_numpy(img: Union[torch.Tensor, Image.Image]) -> np.ndarray:\n out: np.ndarray\n if isinstance(img, Image.Image):\n out = np.array(img)\n elif isinstance(img, torch.Tensor):\n out = img.squeeze(0).permute(1, 2, 0).cpu().numpy()\n else:\n raise TypeError(f\"Unknown image type. Got: {type(img)}.\")\n return out\n\n @_requires_extras(\"image\")\n def _show_images_and_labels(self, data: List[Any], num_samples: int, title: str):\n # define the image grid\n cols: int = min(num_samples, self.max_cols)\n rows: int = num_samples // cols\n\n # create figure and set title\n fig, axs = plt.subplots(rows, cols)\n fig.suptitle(title)\n\n for i, ax in enumerate(axs.ravel()):\n # unpack images and labels\n if isinstance(data, list):\n _img, _label = data[i][DefaultDataKeys.INPUT], data[i][DefaultDataKeys.TARGET]\n elif isinstance(data, dict):\n _img, _label = data[DefaultDataKeys.INPUT][i], data[DefaultDataKeys.TARGET][i]\n else:\n raise TypeError(f\"Unknown data type. Got: {type(data)}.\")\n # convert images to numpy\n _img: np.ndarray = self._to_numpy(_img)\n if isinstance(_label, torch.Tensor):\n _label = _label.squeeze().tolist()\n # show image and set label as subplot title\n ax.imshow(_img)\n ax.set_title(str(_label))\n ax.axis('off')\n plt.show(block=self.block_viz_window)\n\n def show_load_sample(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_load_sample\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_pre_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_pre_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_to_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_to_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_post_tensor_transform(self, samples: List[Any], running_stage: RunningStage):\n win_title: str = f\"{running_stage} - show_post_tensor_transform\"\n self._show_images_and_labels(samples, len(samples), win_title)\n\n def show_per_batch_transform(self, batch: List[Any], running_stage):\n win_title: str = f\"{running_stage} - show_per_batch_transform\"\n self._show_images_and_labels(batch[0], batch[0][DefaultDataKeys.INPUT].shape[0], win_title)\n", "path": "flash/image/classification/data.py"}]}
| 3,866 | 227 |
gh_patches_debug_33861
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1660
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Costa Rica page has changed and now errors
```
Traceback (most recent call last):
File "/home/feeder/lib/fetch_data.py", line 131, in launch_parsers
**parser_kwargs)
File "/home/contrib/parsers/CR.py", line 178, in fetch_production
jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
IndexError: list index out of range
```
https://github.com/tmrowco/electricitymap-contrib/blob/86f06791f2292c8a20e383a4642ced8b16083333/parsers/CR.py#L178
</issue>
<code>
[start of parsers/CR.py]
1 #!/usr/bin/env python3
2 # coding=utf-8
3
4 import logging
5
6 import arrow
7 import pandas as pd
8 import requests
9 from bs4 import BeautifulSoup
10
11 TIMEZONE = 'America/Costa_Rica'
12 DATE_FORMAT = 'DD/MM/YYYY'
13 MONTH_FORMAT = 'MM/YYYY'
14 POWER_PLANTS = {
15 u'Aeroenergía': 'wind',
16 u'Altamira': 'wind',
17 u'Angostura': 'hydro',
18 u'Arenal': 'hydro',
19 u'Balsa Inferior': 'hydro',
20 u'Barranca': 'unknown',
21 u'Barro Morado': 'geothermal',
22 u'Bijagua': 'hydro',
23 u'Birris12': 'hydro',
24 u'Birris3': 'hydro',
25 u'Boca de Pozo': 'hydro',
26 u'CNFL': 'unknown',
27 u'Cachí': 'hydro',
28 u'Campos Azules': 'wind',
29 u'Canalete': 'unknown',
30 u'Cariblanco': 'hydro',
31 u'Carrillos': 'hydro',
32 u'Caño Grande': 'hydro',
33 u'Caño Grande III': 'hydro',
34 u'Chiripa': 'wind',
35 u'Chocosuelas': 'hydro',
36 u'Chucás': 'hydro',
37 u'Cubujuquí': 'hydro',
38 u'Daniel Gutiérrez': 'hydro',
39 u'Dengo': 'hydro',
40 u'Don Pedro': 'hydro',
41 u'Doña Julia': 'hydro',
42 u'Echandi': 'hydro',
43 u'El Angel': 'hydro',
44 u'El Angel Ampliación': 'hydro',
45 u'El Embalse': 'hydro',
46 u'El General': 'hydro',
47 u'El Viejo': 'biomass',
48 u'Garabito': 'oil',
49 u'Garita': 'hydro',
50 u'Guápiles': 'oil',
51 u'Hidrozarcas': 'hydro',
52 u'La Esperanza (CoopeL)': 'hydro',
53 u'La Joya': 'hydro',
54 u'Los Negros': 'hydro',
55 u'Los Negros II': 'hydro',
56 u'Los Santos': 'wind',
57 u'MOVASA': 'wind',
58 u'Matamoros': 'unknown',
59 u'Miravalles I': 'geothermal',
60 u'Miravalles II': 'geothermal',
61 u'Miravalles III': 'geothermal',
62 u'Miravalles V': 'geothermal',
63 u'Moín I': 'oil',
64 u'Moín II': 'oil',
65 u'Moín III': 'oil',
66 u'Orosí': 'wind',
67 u'Orotina': 'unknown',
68 u'Otros': 'unknown',
69 u'PE Mogote': 'wind',
70 u'PEG': 'wind',
71 u'Pailas': 'geothermal',
72 u'Parque Solar Juanilama': 'solar',
73 u'Parque Solar Miravalles': 'solar',
74 u'Peñas Blancas': 'hydro',
75 u'Pirrís': 'hydro',
76 u'Plantas Eólicas': 'wind',
77 u'Platanar': 'hydro',
78 u'Pocosol': 'hydro',
79 u'Poás I y II': 'hydro',
80 u'Reventazón': 'hydro',
81 u'Río Lajas': 'hydro',
82 u'Río Macho': 'hydro',
83 u'San Antonio': 'oil',
84 u'San Lorenzo (C)': 'hydro',
85 u'Sandillal': 'hydro',
86 u'Suerkata': 'hydro',
87 u'Taboga': 'biomass',
88 u'Tacares': 'hydro',
89 u'Tejona': 'wind',
90 u'Tilawind': 'wind',
91 u'Torito': 'hydro',
92 u'Toro I': 'hydro',
93 u'Toro II': 'hydro',
94 u'Toro III': 'hydro',
95 u'Tuis (JASEC)': 'hydro',
96 u'Valle Central': 'wind',
97 u'Vara Blanca': 'hydro',
98 u'Ventanas-Garita': 'hydro',
99 u'Vientos de La Perla': 'wind',
100 u'Vientos de Miramar': 'wind',
101 u'Vientos del Este': 'wind',
102 u'Volcán': 'hydro',
103 }
104
105 CHARACTERISTIC_NAME = 'Angostura'
106
107
108 def empty_record(zone_key):
109 return {
110 'zoneKey': zone_key,
111 'capacity': {},
112 'production': {
113 'biomass': 0.0,
114 'coal': 0.0,
115 'gas': 0.0,
116 'hydro': 0.0,
117 'nuclear': 0.0,
118 'oil': 0.0,
119 'solar': 0.0,
120 'wind': 0.0,
121 'geothermal': 0.0,
122 'unknown': 0.0
123 },
124 'storage': {},
125 'source': 'grupoice.com'
126 }
127
128
129 def df_to_data(zone_key, day, df, logger):
130 df = df.dropna(axis=1, how='any')
131 # Check for empty dataframe
132 if df.shape == (1, 1):
133 return []
134 df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')
135 df = df.iloc[:, :-1]
136
137 results = []
138 unknown_plants = set()
139 hour = 0
140 for column in df:
141 data = empty_record(zone_key)
142 data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime
143 for index, value in df[column].items():
144 source = POWER_PLANTS.get(index)
145 if not source:
146 source = 'unknown'
147 unknown_plants.add(index)
148 data['datetime'] = data_time
149 data['production'][source] += max(0.0, value)
150 hour += 1
151 results.append(data)
152
153 for plant in unknown_plants:
154 logger.warning('{} is not mapped to generation type'.format(plant),
155 extra={'key': zone_key})
156
157 return results
158
159
160 def fetch_production(zone_key='CR', session=None,
161 target_datetime=None, logger=logging.getLogger(__name__)):
162 # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.
163 target_datetime = arrow.get(target_datetime).to(TIMEZONE)
164
165 if target_datetime < arrow.get('2012-07-01'):
166 # data availability limit found by manual trial and error
167 logger.error('CR API does not provide data before 2012-07-01, '
168 '{} was requested'.format(target_datetime),
169 extra={"key": zone_key})
170 return None
171
172 # Do not use existing session as some amount of cache is taking place
173 r = requests.session()
174 url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
175 response = r.get(url)
176
177 soup = BeautifulSoup(response.text, 'html.parser')
178 jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
179
180 data = [
181 ('formPosdespacho', 'formPosdespacho'),
182 ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),
183 ('formPosdespacho:pickFecha', ''),
184 ('formPosdespacho:j_idt60_selection', ''),
185 ('formPosdespacho:j_idt60_scrollState', '0,1915'),
186 ('javax.faces.ViewState', jsf_view_state),
187 ]
188 response = r.post(url, cookies={}, data=data)
189
190 # tell pandas which table to use by providing CHARACTERISTIC_NAME
191 df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
192
193 results = df_to_data(zone_key, target_datetime, df, logger)
194
195 return results
196
197
198 def fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):
199 """Requests the last known power exchange (in MW) between two regions
200
201 Arguments:
202 zone_key1 -- the first country code
203 zone_key2 -- the second country code; order of the two codes in params doesn't matter
204 session (optional) -- request session passed in order to re-use an existing session
205
206 Return:
207 A dictionary in the form:
208 {
209 'sortedZoneKeys': 'DK->NO',
210 'datetime': '2017-01-01T00:00:00Z',
211 'netFlow': 0.0,
212 'source': 'mysource.com'
213 }
214
215 where net flow is from DK into NO
216 """
217 if target_datetime:
218 raise NotImplementedError('This parser is not yet able to parse past dates')
219
220 sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
221
222 df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)
223
224 if sorted_zone_keys == 'CR->NI':
225 flow = df['NICR'][0]
226 elif sorted_zone_keys == 'CR->PA':
227 flow = -1 * df['CRPA'][0]
228 else:
229 raise NotImplementedError('This exchange pair is not implemented')
230
231 data = {
232 'datetime': arrow.now(TIMEZONE).datetime,
233 'sortedZoneKeys': sorted_zone_keys,
234 'netFlow': flow,
235 'source': 'enteoperador.org'
236 }
237
238 return data
239
240
241 if __name__ == '__main__':
242 """Main method, never used by the Electricity Map backend, but handy for testing."""
243
244 from pprint import pprint
245
246 print('fetch_production() ->')
247 pprint(fetch_production())
248
249 print('fetch_production(target_datetime=arrow.get("2018-03-13T12:00Z") ->')
250 pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))
251
252 # this should work
253 print('fetch_production(target_datetime=arrow.get("2013-03-13T12:00Z") ->')
254 pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))
255
256 # this should return None
257 print('fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z") ->')
258 pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))
259
260 print('fetch_exchange() ->')
261 print(fetch_exchange())
262
[end of parsers/CR.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/CR.py b/parsers/CR.py
--- a/parsers/CR.py
+++ b/parsers/CR.py
@@ -67,6 +67,7 @@
u'Orotina': 'unknown',
u'Otros': 'unknown',
u'PE Mogote': 'wind',
+ u'PE Río Naranjo': 'hydro',
u'PEG': 'wind',
u'Pailas': 'geothermal',
u'Parque Solar Juanilama': 'solar',
@@ -151,7 +152,7 @@
results.append(data)
for plant in unknown_plants:
- logger.warning('{} is not mapped to generation type'.format(plant),
+ logger.warning(u'{} is not mapped to generation type'.format(plant),
extra={'key': zone_key})
return results
@@ -171,21 +172,19 @@
# Do not use existing session as some amount of cache is taking place
r = requests.session()
- url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
+ url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'
response = r.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
- jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']
+ jsf_view_state = soup.find("input", {"name": 'javax.faces.ViewState'})['value']
data = [
- ('formPosdespacho', 'formPosdespacho'),
('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),
('formPosdespacho:pickFecha', ''),
- ('formPosdespacho:j_idt60_selection', ''),
- ('formPosdespacho:j_idt60_scrollState', '0,1915'),
+ ('formPosdespacho_SUBMIT', 1),
('javax.faces.ViewState', jsf_view_state),
]
- response = r.post(url, cookies={}, data=data)
+ response = r.post(url, data=data)
# tell pandas which table to use by providing CHARACTERISTIC_NAME
df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]
|
{"golden_diff": "diff --git a/parsers/CR.py b/parsers/CR.py\n--- a/parsers/CR.py\n+++ b/parsers/CR.py\n@@ -67,6 +67,7 @@\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n+ u'PE R\u00edo Naranjo': 'hydro',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n@@ -151,7 +152,7 @@\n results.append(data)\n \n for plant in unknown_plants:\n- logger.warning('{} is not mapped to generation type'.format(plant),\n+ logger.warning(u'{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n \n return results\n@@ -171,21 +172,19 @@\n \n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n- url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n+ url = 'https://apps.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n \n soup = BeautifulSoup(response.text, 'html.parser')\n- jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\n+ jsf_view_state = soup.find(\"input\", {\"name\": 'javax.faces.ViewState'})['value']\n \n data = [\n- ('formPosdespacho', 'formPosdespacho'),\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n- ('formPosdespacho:j_idt60_selection', ''),\n- ('formPosdespacho:j_idt60_scrollState', '0,1915'),\n+ ('formPosdespacho_SUBMIT', 1),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n- response = r.post(url, cookies={}, data=data)\n+ response = r.post(url, data=data)\n \n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n", "issue": "Costa Rica page has changed and now errors\n```\r\nTraceback (most recent call last):\r\n File \"/home/feeder/lib/fetch_data.py\", line 131, in launch_parsers\r\n **parser_kwargs)\r\n File \"/home/contrib/parsers/CR.py\", line 178, in fetch_production\r\n jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\r\nIndexError: list index out of range\r\n```\r\n\r\nhttps://github.com/tmrowco/electricitymap-contrib/blob/86f06791f2292c8a20e383a4642ced8b16083333/parsers/CR.py#L178\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\nimport logging\n\nimport arrow\nimport pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup\n\nTIMEZONE = 'America/Costa_Rica'\nDATE_FORMAT = 'DD/MM/YYYY'\nMONTH_FORMAT = 'MM/YYYY'\nPOWER_PLANTS = {\n u'Aeroenerg\u00eda': 'wind',\n u'Altamira': 'wind',\n u'Angostura': 'hydro',\n u'Arenal': 'hydro',\n u'Balsa Inferior': 'hydro',\n u'Barranca': 'unknown',\n u'Barro Morado': 'geothermal',\n u'Bijagua': 'hydro',\n u'Birris12': 'hydro',\n u'Birris3': 'hydro',\n u'Boca de Pozo': 'hydro',\n u'CNFL': 'unknown',\n u'Cach\u00ed': 'hydro',\n u'Campos Azules': 'wind',\n u'Canalete': 'unknown',\n u'Cariblanco': 'hydro',\n u'Carrillos': 'hydro',\n u'Ca\u00f1o Grande': 'hydro',\n u'Ca\u00f1o Grande III': 'hydro',\n u'Chiripa': 'wind',\n u'Chocosuelas': 'hydro',\n u'Chuc\u00e1s': 'hydro',\n u'Cubujuqu\u00ed': 'hydro',\n u'Daniel Guti\u00e9rrez': 'hydro',\n u'Dengo': 'hydro',\n u'Don Pedro': 'hydro',\n u'Do\u00f1a Julia': 'hydro',\n u'Echandi': 'hydro',\n u'El Angel': 'hydro',\n u'El Angel Ampliaci\u00f3n': 'hydro',\n u'El Embalse': 'hydro',\n u'El General': 'hydro',\n u'El Viejo': 'biomass',\n u'Garabito': 'oil',\n u'Garita': 'hydro',\n u'Gu\u00e1piles': 'oil',\n u'Hidrozarcas': 'hydro',\n u'La Esperanza (CoopeL)': 'hydro',\n u'La Joya': 'hydro',\n u'Los Negros': 'hydro',\n u'Los Negros II': 'hydro',\n u'Los Santos': 'wind',\n u'MOVASA': 'wind',\n u'Matamoros': 'unknown',\n u'Miravalles I': 'geothermal',\n u'Miravalles II': 'geothermal',\n u'Miravalles III': 'geothermal',\n u'Miravalles V': 'geothermal',\n u'Mo\u00edn I': 'oil',\n u'Mo\u00edn II': 'oil',\n u'Mo\u00edn III': 'oil',\n u'Oros\u00ed': 'wind',\n u'Orotina': 'unknown',\n u'Otros': 'unknown',\n u'PE Mogote': 'wind',\n u'PEG': 'wind',\n u'Pailas': 'geothermal',\n u'Parque Solar Juanilama': 'solar',\n u'Parque Solar Miravalles': 'solar',\n u'Pe\u00f1as Blancas': 'hydro',\n u'Pirr\u00eds': 'hydro',\n u'Plantas E\u00f3licas': 'wind',\n u'Platanar': 'hydro',\n u'Pocosol': 'hydro',\n u'Po\u00e1s I y II': 'hydro',\n u'Reventaz\u00f3n': 'hydro',\n u'R\u00edo Lajas': 'hydro',\n u'R\u00edo Macho': 'hydro',\n u'San Antonio': 'oil',\n u'San Lorenzo (C)': 'hydro',\n u'Sandillal': 'hydro',\n u'Suerkata': 'hydro',\n u'Taboga': 'biomass',\n u'Tacares': 'hydro',\n u'Tejona': 'wind',\n u'Tilawind': 'wind',\n u'Torito': 'hydro',\n u'Toro I': 'hydro',\n u'Toro II': 'hydro',\n u'Toro III': 'hydro',\n u'Tuis (JASEC)': 'hydro',\n u'Valle Central': 'wind',\n u'Vara Blanca': 'hydro',\n u'Ventanas-Garita': 'hydro',\n u'Vientos de La Perla': 'wind',\n u'Vientos de Miramar': 'wind',\n u'Vientos del Este': 'wind',\n u'Volc\u00e1n': 'hydro',\n}\n\nCHARACTERISTIC_NAME = 'Angostura'\n\n\ndef empty_record(zone_key):\n return {\n 'zoneKey': zone_key,\n 'capacity': {},\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {},\n 'source': 'grupoice.com'\n }\n\n\ndef df_to_data(zone_key, day, df, logger):\n df = df.dropna(axis=1, how='any')\n # Check for empty dataframe\n if df.shape == (1, 1):\n return []\n df = df.drop(['Intercambio Sur', 'Intercambio Norte', 'Total'], errors='ignore')\n df = df.iloc[:, :-1]\n\n results = []\n unknown_plants = set()\n hour = 0\n for column in df:\n data = empty_record(zone_key)\n data_time = day.replace(hour=hour, minute=0, second=0, microsecond=0).datetime\n for index, value in df[column].items():\n source = POWER_PLANTS.get(index)\n if not source:\n source = 'unknown'\n unknown_plants.add(index)\n data['datetime'] = data_time\n data['production'][source] += max(0.0, value)\n hour += 1\n results.append(data)\n\n for plant in unknown_plants:\n logger.warning('{} is not mapped to generation type'.format(plant),\n extra={'key': zone_key})\n\n return results\n\n\ndef fetch_production(zone_key='CR', session=None,\n target_datetime=None, logger=logging.getLogger(__name__)):\n # ensure we have an arrow object. if no target_datetime is specified, this defaults to now.\n target_datetime = arrow.get(target_datetime).to(TIMEZONE)\n\n if target_datetime < arrow.get('2012-07-01'):\n # data availability limit found by manual trial and error\n logger.error('CR API does not provide data before 2012-07-01, '\n '{} was requested'.format(target_datetime),\n extra={\"key\": zone_key})\n return None\n\n # Do not use existing session as some amount of cache is taking place\n r = requests.session()\n url = 'https://appcenter.grupoice.com/CenceWeb/CencePosdespachoNacional.jsf'\n response = r.get(url)\n\n soup = BeautifulSoup(response.text, 'html.parser')\n jsf_view_state = soup.select('#javax.faces.ViewState')[0]['value']\n\n data = [\n ('formPosdespacho', 'formPosdespacho'),\n ('formPosdespacho:txtFechaInicio_input', target_datetime.format(DATE_FORMAT)),\n ('formPosdespacho:pickFecha', ''),\n ('formPosdespacho:j_idt60_selection', ''),\n ('formPosdespacho:j_idt60_scrollState', '0,1915'),\n ('javax.faces.ViewState', jsf_view_state),\n ]\n response = r.post(url, cookies={}, data=data)\n\n # tell pandas which table to use by providing CHARACTERISTIC_NAME\n df = pd.read_html(response.text, match=CHARACTERISTIC_NAME, skiprows=1, index_col=0)[0]\n\n results = df_to_data(zone_key, target_datetime, df, logger)\n\n return results\n\n\ndef fetch_exchange(zone_key1='CR', zone_key2='NI', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two regions\n\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))\n\n df = pd.read_csv('http://www.enteoperador.org/newsite/flash/data.csv', index_col=False)\n\n if sorted_zone_keys == 'CR->NI':\n flow = df['NICR'][0]\n elif sorted_zone_keys == 'CR->PA':\n flow = -1 * df['CRPA'][0]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': arrow.now(TIMEZONE).datetime,\n 'sortedZoneKeys': sorted_zone_keys,\n 'netFlow': flow,\n 'source': 'enteoperador.org'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n from pprint import pprint\n\n print('fetch_production() ->')\n pprint(fetch_production())\n\n print('fetch_production(target_datetime=arrow.get(\"2018-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2018-03-13T12:00Z')))\n\n # this should work\n print('fetch_production(target_datetime=arrow.get(\"2013-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2013-03-13T12:00Z')))\n\n # this should return None\n print('fetch_production(target_datetime=arrow.get(\"2007-03-13T12:00Z\") ->')\n pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))\n\n print('fetch_exchange() ->')\n print(fetch_exchange())\n", "path": "parsers/CR.py"}]}
| 3,780 | 536 |
gh_patches_debug_1349
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-3150
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
#3289 Interactive Event
**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung/
**device & browser:** *e.g. Firefox 80.0 (64-bit)*
**Comment/Question:**

01 – The questions were supposed to be 20px, but its also fine for me like that.
02 – The Icons are different and fine, but this one seems a bit off, I have save the ones I did here in case you need them: Nextcloud/Projekte/meinBerlin/Material/CI/Bilder & Grafiken/icons/svg
03 – Shall we put a max number of characters here since the questions should be short? @CarolingerSeilchenspringer
04 – I did the spacing between the questions using 8px but if that was already the meinBerlin spacing guidelines, we leave like that.
05 – The category labels seem a bit far from the question, if possible, could we shorten the spacing here a bit so it gets closer from the question and "far" from the Like button?
06 – Are the moderators supposed to like the question? If yes, its not working for me. :/

07 – If possible can we use here the text style from the notes (14px)?

08 – The statistics seem a bit off compared to the sketch on Zeplin. Same on Mobile.

09 – Selected categories seem a bit off after selecting them, they kind of stay there instead of just being selected. (Do you know what I mean? I can also explain to you better sharing the screen or something).

10 – The Menu on Mobile is different than on Zeplin.

11 – Same here, the infobox is difficult to understand. Also the buttons are off, spacing inbetween them are different.

12 – Small thing here but if possible, the success infobox should be aligned properly to the left.

13 – Can we increase the spacing here a bit between the link and icon?

14 – The Blueprint is not updated, I left the .svg file on Taiga
I just didn't manage to test the Initiator's view cause there is no project open to testing, could you edit the phase from one of yours and send me the link? Cause all the ones I created I'm the moderator as well.
Thanks a lot! Let me know if you need anything :)
</issue>
<code>
[start of meinberlin/apps/dashboard/blueprints.py]
1 from django.utils.translation import ugettext_lazy as _
2
3 from adhocracy4.dashboard.blueprints import ProjectBlueprint
4 from meinberlin.apps.budgeting import phases as budgeting_phases
5 from meinberlin.apps.documents import phases as documents_phases
6 from meinberlin.apps.ideas import phases as ideas_phases
7 from meinberlin.apps.kiezkasse import phases as kiezkasse_phases
8 from meinberlin.apps.livequestions import phases as livequestion_phases
9 from meinberlin.apps.mapideas import phases as mapideas_phases
10 from meinberlin.apps.maptopicprio import phases as maptopicprio_phases
11 from meinberlin.apps.polls import phases as poll_phases
12 from meinberlin.apps.topicprio import phases as topicprio_phases
13
14 blueprints = [
15 ('brainstorming',
16 ProjectBlueprint(
17 title=_('Brainstorming'),
18 description=_(
19 'Collect first ideas for a specific topic and comment on them.'
20 ),
21 content=[
22 ideas_phases.CollectPhase(),
23 ],
24 image='images/brainstorming.svg',
25 settings_model=None,
26 )),
27 ('map-brainstorming',
28 ProjectBlueprint(
29 title=_('Spatial Brainstorming'),
30 description=_(
31 'Collect location specific ideas for a topic and comment on them.'
32 ),
33 content=[
34 mapideas_phases.CollectPhase(),
35 ],
36 image='images/map-brainstorming.svg',
37 settings_model=('a4maps', 'AreaSettings'),
38 )),
39 ('map-idea-collection',
40 ProjectBlueprint(
41 title=_('Spatial Idea Collection'),
42 description=_(
43 'Collect location specific ideas that can be rated and commented.'
44 ),
45 content=[
46 mapideas_phases.CollectFeedbackPhase(),
47 ],
48 image='images/map-idea-collection.svg',
49 settings_model=('a4maps', 'AreaSettings'),
50 )),
51 ('agenda-setting',
52 ProjectBlueprint(
53 title=_('Agenda Setting'),
54 description=_(
55 'With Agenda-Setting it’s possible to identify topics and to '
56 'define mission statements. Anyone can submit topics that can be '
57 'commented and rated.'
58 ),
59 content=[
60 ideas_phases.CollectFeedbackPhase(),
61 ],
62 image='images/agenda-setting.svg',
63 settings_model=None,
64 )),
65 ('text-review',
66 ProjectBlueprint(
67 title=_('Text Review'),
68 description=_(
69 'In the text-review it’s possible to structure draft texts '
70 'that can be commented.'
71 ),
72 content=[
73 documents_phases.CommentPhase(),
74 ],
75 image='images/text-review.svg',
76 settings_model=None,
77 )),
78 ('participatory-budgeting',
79 ProjectBlueprint(
80 title=_('Participatory budgeting'),
81 description=_(
82 'With participatory-budgeting it’s possible to make proposals '
83 'with budget specifications and locate them. Anyone can comment '
84 'and rate on different proposals.'
85 ),
86 content=[
87 budgeting_phases.RequestPhase()
88 ],
89 image='images/participatory-budgeting.svg',
90 settings_model=('a4maps', 'AreaSettings'),
91 )),
92 ('poll',
93 ProjectBlueprint(
94 title=_('Poll'),
95 description=_(
96 'Create a poll with multiple questions and possible answers. '
97 'Anyone can cast votes and comment on the poll.'
98 ),
99 content=[
100 poll_phases.VotingPhase(),
101 ],
102 image='images/poll.svg',
103 settings_model=None,
104 )),
105 ('topic-prioritization',
106 ProjectBlueprint(
107 title=_('Topic Priorization'),
108 description=_(
109 'Comment and prioritize topics.'
110 ),
111 content=[
112 topicprio_phases.PrioritizePhase(),
113 ],
114 image='images/priorization.svg',
115 settings_model=None,
116 )),
117 ('map-topic-prioritization',
118 ProjectBlueprint(
119 title=_('Place Prioritization'),
120 description=_(
121 'Comment and prioritize places located on a map.'
122 ),
123 content=[
124 maptopicprio_phases.PrioritizePhase(),
125 ],
126 image='images/place-priotization.svg',
127 settings_model=('a4maps', 'AreaSettings'),
128 )),
129 ('kiezkasse',
130 ProjectBlueprint(
131 title=_('Kiezkasse'),
132 description=_(
133 'With kiezkasse it’s possible to make proposals '
134 'with budget specifications and locate them. '
135 'The proposals can be commented and rated.'
136 ),
137 content=[
138 kiezkasse_phases.RequestFeedbackPhase(),
139 ],
140 image='images/kiezkasse.svg',
141 settings_model=('a4maps', 'AreaSettings'),
142 )),
143 ('interactive-event',
144 ProjectBlueprint(
145 title=_('Interactive Event'),
146 description=_(
147 'The participants of an event can ask their questions online. '
148 'Other participants can support the question. You as the '
149 'moderator can sort the questions by support or '
150 'characteristic.'
151 ),
152 content=[
153 livequestion_phases.IssuePhase(),
154 ],
155 image='images/text-review.svg',
156 settings_model=None,
157 )),
158 ]
159
[end of meinberlin/apps/dashboard/blueprints.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/meinberlin/apps/dashboard/blueprints.py b/meinberlin/apps/dashboard/blueprints.py
--- a/meinberlin/apps/dashboard/blueprints.py
+++ b/meinberlin/apps/dashboard/blueprints.py
@@ -152,7 +152,7 @@
content=[
livequestion_phases.IssuePhase(),
],
- image='images/text-review.svg',
+ image='images/interactive-event.svg',
settings_model=None,
)),
]
|
{"golden_diff": "diff --git a/meinberlin/apps/dashboard/blueprints.py b/meinberlin/apps/dashboard/blueprints.py\n--- a/meinberlin/apps/dashboard/blueprints.py\n+++ b/meinberlin/apps/dashboard/blueprints.py\n@@ -152,7 +152,7 @@\n content=[\n livequestion_phases.IssuePhase(),\n ],\n- image='images/text-review.svg',\n+ image='images/interactive-event.svg',\n settings_model=None,\n )),\n ]\n", "issue": "#3289 Interactive Event \n**URL:** https://meinberlin-dev.liqd.net/projekte/module/interaktive-veranstaltung/\r\n**device & browser:** *e.g. Firefox 80.0 (64-bit)*\r\n\r\n**Comment/Question:**\r\n\r\n01 \u2013 The questions were supposed to be 20px, but its also fine for me like that. \r\n02 \u2013 The Icons are different and fine, but this one seems a bit off, I have save the ones I did here in case you need them: Nextcloud/Projekte/meinBerlin/Material/CI/Bilder & Grafiken/icons/svg\r\n03 \u2013 Shall we put a max number of characters here since the questions should be short? @CarolingerSeilchenspringer \r\n04 \u2013 I did the spacing between the questions using 8px but if that was already the meinBerlin spacing guidelines, we leave like that. \r\n05 \u2013 The category labels seem a bit far from the question, if possible, could we shorten the spacing here a bit so it gets closer from the question and \"far\" from the Like button? \r\n06 \u2013 Are the moderators supposed to like the question? If yes, its not working for me. :/\r\n\r\n\r\n07 \u2013 If possible can we use here the text style from the notes (14px)? \r\n\r\n\r\n08 \u2013 The statistics seem a bit off compared to the sketch on Zeplin. Same on Mobile.\r\n\r\n\r\n09 \u2013 Selected categories seem a bit off after selecting them, they kind of stay there instead of just being selected. (Do you know what I mean? I can also explain to you better sharing the screen or something).\r\n\r\n\r\n10 \u2013 The Menu on Mobile is different than on Zeplin. \r\n\r\n\r\n11 \u2013 Same here, the infobox is difficult to understand. Also the buttons are off, spacing inbetween them are different.\r\n\r\n\r\n12 \u2013 Small thing here but if possible, the success infobox should be aligned properly to the left. \r\n\r\n\r\n13 \u2013 Can we increase the spacing here a bit between the link and icon? \r\n\r\n\r\n14 \u2013 The Blueprint is not updated, I left the .svg file on Taiga\r\n \r\nI just didn't manage to test the Initiator's view cause there is no project open to testing, could you edit the phase from one of yours and send me the link? Cause all the ones I created I'm the moderator as well. \r\n\r\nThanks a lot! Let me know if you need anything :)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4.dashboard.blueprints import ProjectBlueprint\nfrom meinberlin.apps.budgeting import phases as budgeting_phases\nfrom meinberlin.apps.documents import phases as documents_phases\nfrom meinberlin.apps.ideas import phases as ideas_phases\nfrom meinberlin.apps.kiezkasse import phases as kiezkasse_phases\nfrom meinberlin.apps.livequestions import phases as livequestion_phases\nfrom meinberlin.apps.mapideas import phases as mapideas_phases\nfrom meinberlin.apps.maptopicprio import phases as maptopicprio_phases\nfrom meinberlin.apps.polls import phases as poll_phases\nfrom meinberlin.apps.topicprio import phases as topicprio_phases\n\nblueprints = [\n ('brainstorming',\n ProjectBlueprint(\n title=_('Brainstorming'),\n description=_(\n 'Collect first ideas for a specific topic and comment on them.'\n ),\n content=[\n ideas_phases.CollectPhase(),\n ],\n image='images/brainstorming.svg',\n settings_model=None,\n )),\n ('map-brainstorming',\n ProjectBlueprint(\n title=_('Spatial Brainstorming'),\n description=_(\n 'Collect location specific ideas for a topic and comment on them.'\n ),\n content=[\n mapideas_phases.CollectPhase(),\n ],\n image='images/map-brainstorming.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('map-idea-collection',\n ProjectBlueprint(\n title=_('Spatial Idea Collection'),\n description=_(\n 'Collect location specific ideas that can be rated and commented.'\n ),\n content=[\n mapideas_phases.CollectFeedbackPhase(),\n ],\n image='images/map-idea-collection.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('agenda-setting',\n ProjectBlueprint(\n title=_('Agenda Setting'),\n description=_(\n 'With Agenda-Setting it\u2019s possible to identify topics and to '\n 'define mission statements. Anyone can submit topics that can be '\n 'commented and rated.'\n ),\n content=[\n ideas_phases.CollectFeedbackPhase(),\n ],\n image='images/agenda-setting.svg',\n settings_model=None,\n )),\n ('text-review',\n ProjectBlueprint(\n title=_('Text Review'),\n description=_(\n 'In the text-review it\u2019s possible to structure draft texts '\n 'that can be commented.'\n ),\n content=[\n documents_phases.CommentPhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n ('participatory-budgeting',\n ProjectBlueprint(\n title=_('Participatory budgeting'),\n description=_(\n 'With participatory-budgeting it\u2019s possible to make proposals '\n 'with budget specifications and locate them. Anyone can comment '\n 'and rate on different proposals.'\n ),\n content=[\n budgeting_phases.RequestPhase()\n ],\n image='images/participatory-budgeting.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('poll',\n ProjectBlueprint(\n title=_('Poll'),\n description=_(\n 'Create a poll with multiple questions and possible answers. '\n 'Anyone can cast votes and comment on the poll.'\n ),\n content=[\n poll_phases.VotingPhase(),\n ],\n image='images/poll.svg',\n settings_model=None,\n )),\n ('topic-prioritization',\n ProjectBlueprint(\n title=_('Topic Priorization'),\n description=_(\n 'Comment and prioritize topics.'\n ),\n content=[\n topicprio_phases.PrioritizePhase(),\n ],\n image='images/priorization.svg',\n settings_model=None,\n )),\n ('map-topic-prioritization',\n ProjectBlueprint(\n title=_('Place Prioritization'),\n description=_(\n 'Comment and prioritize places located on a map.'\n ),\n content=[\n maptopicprio_phases.PrioritizePhase(),\n ],\n image='images/place-priotization.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('kiezkasse',\n ProjectBlueprint(\n title=_('Kiezkasse'),\n description=_(\n 'With kiezkasse it\u2019s possible to make proposals '\n 'with budget specifications and locate them. '\n 'The proposals can be commented and rated.'\n ),\n content=[\n kiezkasse_phases.RequestFeedbackPhase(),\n ],\n image='images/kiezkasse.svg',\n settings_model=('a4maps', 'AreaSettings'),\n )),\n ('interactive-event',\n ProjectBlueprint(\n title=_('Interactive Event'),\n description=_(\n 'The participants of an event can ask their questions online. '\n 'Other participants can support the question. You as the '\n 'moderator can sort the questions by support or '\n 'characteristic.'\n ),\n content=[\n livequestion_phases.IssuePhase(),\n ],\n image='images/text-review.svg',\n settings_model=None,\n )),\n]\n", "path": "meinberlin/apps/dashboard/blueprints.py"}]}
| 3,236 | 103 |
gh_patches_debug_10405
|
rasdani/github-patches
|
git_diff
|
e-valuation__EvaP-340
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
A user must never evaluate him/herself
In rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).
The system has to make sure that this user can't answer questions related to him/herself.
</issue>
<code>
[start of evap/student/views.py]
1 from django.contrib import messages
2 from django.core.exceptions import PermissionDenied
3 from django.db import transaction
4 from django.shortcuts import get_object_or_404, redirect, render_to_response
5 from django.template import RequestContext
6 from django.utils.datastructures import SortedDict
7 from django.utils.translation import ugettext as _
8
9 from evap.evaluation.auth import login_required
10 from evap.evaluation.models import Course, Semester
11 from evap.evaluation.tools import questionnaires_and_contributions
12 from evap.student.forms import QuestionsForm
13 from evap.student.tools import make_form_identifier
14
15 from datetime import datetime
16
17
18 @login_required
19 def index(request):
20 # retrieve all courses, which the user can evaluate at some point
21 users_courses = Course.objects.filter(
22 participants=request.user
23 ).exclude(
24 voters=request.user
25 )
26 # split up into current and future courses
27 current_courses = users_courses.filter(state='inEvaluation')
28 future_courses = users_courses.filter(state='approved')
29
30 return render_to_response(
31 "student_index.html",
32 dict(current_courses=current_courses,
33 future_courses=future_courses),
34 context_instance=RequestContext(request))
35
36
37 @login_required
38 def vote(request, course_id):
39 # retrieve course and make sure that the user is allowed to vote
40 course = get_object_or_404(Course, id=course_id)
41 if not course.can_user_vote(request.user):
42 raise PermissionDenied
43
44 # build forms
45 forms = SortedDict()
46 for questionnaire, contribution in questionnaires_and_contributions(course):
47 form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
48 forms[(contribution, questionnaire)] = form
49
50 if all(form.is_valid() for form in forms.values()):
51 # begin vote operation
52 with transaction.commit_on_success():
53 for (contribution, questionnaire), form in forms.items():
54 for question in questionnaire.question_set.all():
55 identifier = make_form_identifier(contribution, questionnaire, question)
56 value = form.cleaned_data.get(identifier)
57
58 if type(value) in [str, unicode]:
59 value = value.strip()
60
61 if value == 6: #no answer
62 value = None
63
64 # store the answer if one was given
65 if value:
66 question.answer_class.objects.create(
67 contribution=contribution,
68 question=question,
69 answer=value)
70
71 # remember that the user voted already
72 course.voters.add(request.user)
73
74 messages.add_message(request, messages.INFO, _("Your vote was recorded."))
75 return redirect('evap.student.views.index')
76 else:
77 return render_to_response(
78 "student_vote.html",
79 dict(forms=forms.values(),
80 course=course),
81 context_instance=RequestContext(request))
82
[end of evap/student/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/evap/student/views.py b/evap/student/views.py
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -45,6 +45,8 @@
forms = SortedDict()
for questionnaire, contribution in questionnaires_and_contributions(course):
form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)
+ if form.contribution.contributor == request.user:
+ continue # users shall not vote about themselves
forms[(contribution, questionnaire)] = form
if all(form.is_valid() for form in forms.values()):
|
{"golden_diff": "diff --git a/evap/student/views.py b/evap/student/views.py\n--- a/evap/student/views.py\n+++ b/evap/student/views.py\n@@ -45,6 +45,8 @@\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n+ if form.contribution.contributor == request.user:\n+ continue # users shall not vote about themselves\n forms[(contribution, questionnaire)] = form\n \n if all(form.is_valid() for form in forms.values()):\n", "issue": "A user must never evaluate him/herself\nIn rare cases a user can be participant and contributor in a course (e.g. participating student and tutor for the exercises at the same time).\nThe system has to make sure that this user can't answer questions related to him/herself.\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render_to_response\nfrom django.template import RequestContext\nfrom django.utils.datastructures import SortedDict\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import login_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import questionnaires_and_contributions\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom datetime import datetime\n\n\n@login_required\ndef index(request):\n # retrieve all courses, which the user can evaluate at some point\n users_courses = Course.objects.filter(\n participants=request.user\n ).exclude(\n voters=request.user\n )\n # split up into current and future courses\n current_courses = users_courses.filter(state='inEvaluation')\n future_courses = users_courses.filter(state='approved')\n\n return render_to_response(\n \"student_index.html\",\n dict(current_courses=current_courses,\n future_courses=future_courses),\n context_instance=RequestContext(request))\n\n\n@login_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # build forms\n forms = SortedDict()\n for questionnaire, contribution in questionnaires_and_contributions(course):\n form = QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire)\n forms[(contribution, questionnaire)] = form\n\n if all(form.is_valid() for form in forms.values()):\n # begin vote operation\n with transaction.commit_on_success():\n for (contribution, questionnaire), form in forms.items():\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = form.cleaned_data.get(identifier)\n\n if type(value) in [str, unicode]:\n value = value.strip()\n\n if value == 6: #no answer\n value = None\n\n # store the answer if one was given\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n messages.add_message(request, messages.INFO, _(\"Your vote was recorded.\"))\n return redirect('evap.student.views.index')\n else:\n return render_to_response(\n \"student_vote.html\",\n dict(forms=forms.values(),\n course=course),\n context_instance=RequestContext(request))\n", "path": "evap/student/views.py"}]}
| 1,327 | 134 |
gh_patches_debug_49873
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-6285
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After successful charge, order redirection to success page fails

</issue>
<code>
[start of app/api/helpers/ticketing.py]
1 from datetime import datetime
2
3 from app.api.helpers.db import save_to_db, get_count
4 from app.api.helpers.exceptions import ConflictException
5 from app.api.helpers.files import make_frontend_url
6 from app.api.helpers.mail import send_email_to_attendees
7 from app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer
8 from app.api.helpers.order import delete_related_attendees_for_order, create_pdf_tickets_for_holder
9 from app.api.helpers.payment import StripePaymentsManager, PayPalPaymentsManager
10 from app.models import db
11 from app.models.ticket_fee import TicketFees
12 from app.models.ticket_holder import TicketHolder
13
14
15 class TicketingManager(object):
16 """All ticketing and orders related helper functions"""
17
18 @staticmethod
19 def get_order_expiry():
20 return 10
21
22 @staticmethod
23 def match_discount_quantity(discount_code, ticket_holders=None):
24 qty = 0
25 old_holders = get_count(TicketHolder.query.filter(TicketHolder.ticket_id.in_(discount_code.tickets.split(","))))
26
27 for holder in ticket_holders:
28 ticket_holder = TicketHolder.query.filter_by(id=holder).one()
29 if ticket_holder.ticket.id in discount_code.tickets.split(","):
30 qty += 1
31 if (qty + old_holders) <= discount_code.tickets_number and \
32 discount_code.min_quantity <= qty <= discount_code.max_quantity:
33 return True
34
35 return False
36
37 @staticmethod
38 def calculate_update_amount(order):
39 discount = None
40 if order.discount_code_id:
41 discount = order.discount_code
42 # Access code part will be done ticket_holders API
43 amount = 0
44 total_discount = 0
45 fees = TicketFees.query.filter_by(currency=order.event.payment_currency).first()
46
47 for order_ticket in order.order_tickets:
48 with db.session.no_autoflush:
49 if order_ticket.ticket.is_fee_absorbed or not fees:
50 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity)
51 amount += (order_ticket.ticket.price * order_ticket.quantity)
52 else:
53 order_fee = fees.service_fee * (order_ticket.ticket.price * order_ticket.quantity) / 100
54 if order_fee > fees.maximum_fee:
55 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee
56 amount += (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee
57 else:
58 ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + order_fee
59 amount += (order_ticket.ticket.price * order_ticket.quantity) + order_fee
60
61 if discount and str(order_ticket.ticket.id) in discount.tickets.split(","):
62 if discount.type == "amount":
63 total_discount += discount.value * order_ticket.quantity
64 else:
65 total_discount += discount.value * ticket_amount / 100
66
67 if discount:
68 if discount.type == "amount":
69 order.amount = max(amount - total_discount, 0)
70 elif discount.type == "percent":
71 order.amount = amount - (discount.value * amount / 100.0)
72 else:
73 order.amount = amount
74 save_to_db(order)
75 return order
76
77 @staticmethod
78 def charge_stripe_order_payment(order, token_id):
79 """
80 Charge the user through Stripe
81 :param order: Order for which to charge for
82 :param token_id: Stripe token
83 :return:
84 """
85 # save the stripe token with the order
86 order.stripe_token = token_id
87 save_to_db(order)
88
89 # charge the user
90 try:
91 charge = StripePaymentsManager.capture_payment(order)
92 except ConflictException as e:
93 # payment failed hence expire the order
94 order.status = 'expired'
95 save_to_db(order)
96
97 # delete related attendees to unlock the tickets
98 delete_related_attendees_for_order(order)
99
100 raise e
101
102 # charge.paid is true if the charge succeeded, or was successfully authorized for later capture.
103 if charge.paid:
104 # update the order in the db.
105 order.paid_via = charge.source.object
106 order.brand = charge.source.brand
107 order.exp_month = charge.source.exp_month
108 order.exp_year = charge.source.exp_year
109 order.last4 = charge.source.last4
110 order.transaction_id = charge.id
111 order.status = 'completed'
112 order.completed_at = datetime.utcnow()
113 save_to_db(order)
114
115 # create tickets.
116 create_pdf_tickets_for_holder(order)
117
118 # send email and notifications.
119 send_email_to_attendees(order, current_user.id)
120 send_notif_to_attendees(order, current_user.id)
121
122 order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))
123 for organizer in order.event.organizers:
124 send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,
125 order.id)
126 if order.event.owner:
127 send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,
128 order.event.name, order.id)
129
130 return True, 'Charge successful'
131 else:
132 # payment failed hence expire the order
133 order.status = 'expired'
134 save_to_db(order)
135
136 # delete related attendees to unlock the tickets
137 delete_related_attendees_for_order(order)
138
139 # return the failure message from stripe.
140 return False, charge.failure_message
141
142 @staticmethod
143 def charge_paypal_order_payment(order, paypal_payer_id, paypal_payment_id):
144 """
145 Charge the user through paypal.
146 :param order: Order for which to charge for.
147 :param paypal_payment_id: payment_id
148 :param paypal_payer_id: payer_id
149 :return:
150 """
151
152 # save the paypal payment_id with the order
153 order.paypal_token = paypal_payment_id
154 save_to_db(order)
155
156 # create the transaction.
157 status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)
158
159 if status:
160 # successful transaction hence update the order details.
161 order.paid_via = 'paypal'
162 order.status = 'completed'
163 order.transaction_id = paypal_payment_id
164 order.completed_at = datetime.utcnow()
165 save_to_db(order)
166
167 # create tickets
168 create_pdf_tickets_for_holder(order)
169
170 # send email and notifications
171 send_email_to_attendees(order, order.user_id)
172 send_notif_to_attendees(order, order.user_id)
173
174 order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))
175 for organizer in order.event.organizers:
176 send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,
177 order.id)
178 if order.event.owner:
179 send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,
180 order.event.name, order.id)
181
182 return True, 'Charge successful'
183 else:
184 # payment failed hence expire the order
185 order.status = 'expired'
186 save_to_db(order)
187
188 # delete related attendees to unlock the tickets
189 delete_related_attendees_for_order(order)
190
191 # return the error message from Paypal
192 return False, error
193
[end of app/api/helpers/ticketing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/helpers/ticketing.py b/app/api/helpers/ticketing.py
--- a/app/api/helpers/ticketing.py
+++ b/app/api/helpers/ticketing.py
@@ -10,6 +10,7 @@
from app.models import db
from app.models.ticket_fee import TicketFees
from app.models.ticket_holder import TicketHolder
+from flask_jwt_extended import current_user
class TicketingManager(object):
|
{"golden_diff": "diff --git a/app/api/helpers/ticketing.py b/app/api/helpers/ticketing.py\n--- a/app/api/helpers/ticketing.py\n+++ b/app/api/helpers/ticketing.py\n@@ -10,6 +10,7 @@\n from app.models import db\n from app.models.ticket_fee import TicketFees\n from app.models.ticket_holder import TicketHolder\n+from flask_jwt_extended import current_user\n \n \n class TicketingManager(object):\n", "issue": "After successful charge, order redirection to success page fails\n\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom app.api.helpers.db import save_to_db, get_count\nfrom app.api.helpers.exceptions import ConflictException\nfrom app.api.helpers.files import make_frontend_url\nfrom app.api.helpers.mail import send_email_to_attendees\nfrom app.api.helpers.notification import send_notif_to_attendees, send_notif_ticket_purchase_organizer\nfrom app.api.helpers.order import delete_related_attendees_for_order, create_pdf_tickets_for_holder\nfrom app.api.helpers.payment import StripePaymentsManager, PayPalPaymentsManager\nfrom app.models import db\nfrom app.models.ticket_fee import TicketFees\nfrom app.models.ticket_holder import TicketHolder\n\n\nclass TicketingManager(object):\n \"\"\"All ticketing and orders related helper functions\"\"\"\n\n @staticmethod\n def get_order_expiry():\n return 10\n\n @staticmethod\n def match_discount_quantity(discount_code, ticket_holders=None):\n qty = 0\n old_holders = get_count(TicketHolder.query.filter(TicketHolder.ticket_id.in_(discount_code.tickets.split(\",\"))))\n\n for holder in ticket_holders:\n ticket_holder = TicketHolder.query.filter_by(id=holder).one()\n if ticket_holder.ticket.id in discount_code.tickets.split(\",\"):\n qty += 1\n if (qty + old_holders) <= discount_code.tickets_number and \\\n discount_code.min_quantity <= qty <= discount_code.max_quantity:\n return True\n\n return False\n\n @staticmethod\n def calculate_update_amount(order):\n discount = None\n if order.discount_code_id:\n discount = order.discount_code\n # Access code part will be done ticket_holders API\n amount = 0\n total_discount = 0\n fees = TicketFees.query.filter_by(currency=order.event.payment_currency).first()\n\n for order_ticket in order.order_tickets:\n with db.session.no_autoflush:\n if order_ticket.ticket.is_fee_absorbed or not fees:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity)\n amount += (order_ticket.ticket.price * order_ticket.quantity)\n else:\n order_fee = fees.service_fee * (order_ticket.ticket.price * order_ticket.quantity) / 100\n if order_fee > fees.maximum_fee:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + fees.maximum_fee\n else:\n ticket_amount = (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n amount += (order_ticket.ticket.price * order_ticket.quantity) + order_fee\n\n if discount and str(order_ticket.ticket.id) in discount.tickets.split(\",\"):\n if discount.type == \"amount\":\n total_discount += discount.value * order_ticket.quantity\n else:\n total_discount += discount.value * ticket_amount / 100\n\n if discount:\n if discount.type == \"amount\":\n order.amount = max(amount - total_discount, 0)\n elif discount.type == \"percent\":\n order.amount = amount - (discount.value * amount / 100.0)\n else:\n order.amount = amount\n save_to_db(order)\n return order\n\n @staticmethod\n def charge_stripe_order_payment(order, token_id):\n \"\"\"\n Charge the user through Stripe\n :param order: Order for which to charge for\n :param token_id: Stripe token\n :return:\n \"\"\"\n # save the stripe token with the order\n order.stripe_token = token_id\n save_to_db(order)\n\n # charge the user\n try:\n charge = StripePaymentsManager.capture_payment(order)\n except ConflictException as e:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n raise e\n\n # charge.paid is true if the charge succeeded, or was successfully authorized for later capture.\n if charge.paid:\n # update the order in the db.\n order.paid_via = charge.source.object\n order.brand = charge.source.brand\n order.exp_month = charge.source.exp_month\n order.exp_year = charge.source.exp_year\n order.last4 = charge.source.last4\n order.transaction_id = charge.id\n order.status = 'completed'\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets.\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications.\n send_email_to_attendees(order, current_user.id)\n send_notif_to_attendees(order, current_user.id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the failure message from stripe.\n return False, charge.failure_message\n\n @staticmethod\n def charge_paypal_order_payment(order, paypal_payer_id, paypal_payment_id):\n \"\"\"\n Charge the user through paypal.\n :param order: Order for which to charge for.\n :param paypal_payment_id: payment_id\n :param paypal_payer_id: payer_id\n :return:\n \"\"\"\n\n # save the paypal payment_id with the order\n order.paypal_token = paypal_payment_id\n save_to_db(order)\n\n # create the transaction.\n status, error = PayPalPaymentsManager.execute_payment(paypal_payer_id, paypal_payment_id)\n\n if status:\n # successful transaction hence update the order details.\n order.paid_via = 'paypal'\n order.status = 'completed'\n order.transaction_id = paypal_payment_id\n order.completed_at = datetime.utcnow()\n save_to_db(order)\n\n # create tickets\n create_pdf_tickets_for_holder(order)\n\n # send email and notifications\n send_email_to_attendees(order, order.user_id)\n send_notif_to_attendees(order, order.user_id)\n\n order_url = make_frontend_url(path='/orders/{identifier}'.format(identifier=order.identifier))\n for organizer in order.event.organizers:\n send_notif_ticket_purchase_organizer(organizer, order.invoice_number, order_url, order.event.name,\n order.id)\n if order.event.owner:\n send_notif_ticket_purchase_organizer(order.event.owner, order.invoice_number, order_url,\n order.event.name, order.id)\n\n return True, 'Charge successful'\n else:\n # payment failed hence expire the order\n order.status = 'expired'\n save_to_db(order)\n\n # delete related attendees to unlock the tickets\n delete_related_attendees_for_order(order)\n\n # return the error message from Paypal\n return False, error\n", "path": "app/api/helpers/ticketing.py"}]}
| 2,633 | 93 |
gh_patches_debug_5905
|
rasdani/github-patches
|
git_diff
|
scoutapp__scout_apm_python-746
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation notice about urllib3[secure]
### Description
pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)
https://github.com/urllib3/urllib3/issues/2680
Removed 'urllib3[secure] < 1.25 ; python_version < "3.5"' and 'urllib3[secure] < 2 ; python_version >= "3.5"'
Closes #746
</issue>
<code>
[start of setup.py]
1 # coding=utf-8
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 import os
5 import sys
6
7 from setuptools import Extension, find_packages, setup
8
9 with open("README.md", "r") as fp:
10 long_description = fp.read()
11
12 packages = find_packages("src")
13 if sys.version_info < (3, 6):
14 packages = [p for p in packages if not p.startswith("scout_apm.async_")]
15
16 compile_extensions = (
17 # Python 3+
18 sys.version_info >= (3,)
19 # Not Jython
20 and not sys.platform.startswith("java")
21 # Not PyPy
22 and "__pypy__" not in sys.builtin_module_names
23 # Not explicitly disabled
24 and (os.environ.get("SCOUT_DISABLE_EXTENSIONS", "") == "")
25 )
26 if compile_extensions:
27 ext_modules = [
28 Extension(
29 name=str("scout_apm.core._objtrace"),
30 sources=[str("src/scout_apm/core/_objtrace.c")],
31 optional=True,
32 )
33 ]
34 else:
35 ext_modules = []
36
37 setup(
38 name="scout_apm",
39 version="2.26.1",
40 description="Scout Application Performance Monitoring Agent",
41 long_description=long_description,
42 long_description_content_type="text/markdown",
43 url="https://github.com/scoutapp/scout_apm_python",
44 project_urls={
45 "Documentation": "https://docs.scoutapm.com/#python-agent",
46 "Changelog": (
47 "https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md"
48 ),
49 },
50 author="Scout",
51 author_email="[email protected]",
52 license="MIT",
53 zip_safe=False,
54 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
55 packages=packages,
56 package_dir={str(""): str("src")},
57 ext_modules=ext_modules,
58 entry_points={
59 "console_scripts": [
60 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
61 ]
62 },
63 install_requires=[
64 'asgiref ; python_version >= "3.5"',
65 'contextvars ; python_version >= "3.6" and python_version < "3.7"',
66 'importlib-metadata ; python_version < "3.8"',
67 "psutil>=5,<6",
68 'urllib3[secure] < 1.25 ; python_version < "3.5"',
69 'urllib3[secure] < 2 ; python_version >= "3.5"',
70 "wrapt>=1.10,<2.0",
71 ],
72 keywords=["apm", "performance monitoring", "development"],
73 classifiers=[
74 "Development Status :: 5 - Production/Stable",
75 "Framework :: Bottle",
76 "Framework :: Django",
77 "Framework :: Django :: 1.8",
78 "Framework :: Django :: 1.9",
79 "Framework :: Django :: 1.10",
80 "Framework :: Django :: 1.11",
81 "Framework :: Django :: 2.0",
82 "Framework :: Django :: 2.1",
83 "Framework :: Django :: 2.2",
84 "Framework :: Django :: 3.0",
85 "Framework :: Django :: 3.1",
86 "Framework :: Django :: 3.2",
87 "Framework :: Django :: 4.0",
88 "Framework :: Flask",
89 "Framework :: Pyramid",
90 "Intended Audience :: Developers",
91 "Topic :: System :: Monitoring",
92 "License :: OSI Approved :: MIT License",
93 "Operating System :: MacOS",
94 "Operating System :: POSIX",
95 "Operating System :: POSIX :: Linux",
96 "Programming Language :: Python :: 2",
97 "Programming Language :: Python :: 2.7",
98 "Programming Language :: Python :: 3",
99 "Programming Language :: Python :: 3.4",
100 "Programming Language :: Python :: 3.5",
101 "Programming Language :: Python :: 3.6",
102 "Programming Language :: Python :: 3.7",
103 "Programming Language :: Python :: 3.8",
104 "Programming Language :: Python :: 3.9",
105 "Programming Language :: Python :: 3.10",
106 ],
107 )
108
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
'importlib-metadata ; python_version < "3.8"',
"psutil>=5,<6",
'urllib3[secure] < 1.25 ; python_version < "3.5"',
- 'urllib3[secure] < 2 ; python_version >= "3.5"',
+ 'urllib3 < 2 ; python_version >= "3.5"',
"wrapt>=1.10,<2.0",
],
keywords=["apm", "performance monitoring", "development"],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n- 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n+ 'urllib3 < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n", "issue": "Deprecation notice about urllib3[secure]\n ### Description\r\n\r\n pyOpenSSL and urllib3[secure] are deprecated in the upcoming release (1.26.12)\r\n https://github.com/urllib3/urllib3/issues/2680\r\n Removed 'urllib3[secure] < 1.25 ; python_version < \"3.5\"' and 'urllib3[secure] < 2 ; python_version >= \"3.5\"'\r\n\r\nCloses #746\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nwith open(\"README.md\", \"r\") as fp:\n long_description = fp.read()\n\npackages = find_packages(\"src\")\nif sys.version_info < (3, 6):\n packages = [p for p in packages if not p.startswith(\"scout_apm.async_\")]\n\ncompile_extensions = (\n # Python 3+\n sys.version_info >= (3,)\n # Not Jython\n and not sys.platform.startswith(\"java\")\n # Not PyPy\n and \"__pypy__\" not in sys.builtin_module_names\n # Not explicitly disabled\n and (os.environ.get(\"SCOUT_DISABLE_EXTENSIONS\", \"\") == \"\")\n)\nif compile_extensions:\n ext_modules = [\n Extension(\n name=str(\"scout_apm.core._objtrace\"),\n sources=[str(\"src/scout_apm/core/_objtrace.c\")],\n optional=True,\n )\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"scout_apm\",\n version=\"2.26.1\",\n description=\"Scout Application Performance Monitoring Agent\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/scoutapp/scout_apm_python\",\n project_urls={\n \"Documentation\": \"https://docs.scoutapm.com/#python-agent\",\n \"Changelog\": (\n \"https://github.com/scoutapp/scout_apm_python/blob/master/CHANGELOG.md\"\n ),\n },\n author=\"Scout\",\n author_email=\"[email protected]\",\n license=\"MIT\",\n zip_safe=False,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n packages=packages,\n package_dir={str(\"\"): str(\"src\")},\n ext_modules=ext_modules,\n entry_points={\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n install_requires=[\n 'asgiref ; python_version >= \"3.5\"',\n 'contextvars ; python_version >= \"3.6\" and python_version < \"3.7\"',\n 'importlib-metadata ; python_version < \"3.8\"',\n \"psutil>=5,<6\",\n 'urllib3[secure] < 1.25 ; python_version < \"3.5\"',\n 'urllib3[secure] < 2 ; python_version >= \"3.5\"',\n \"wrapt>=1.10,<2.0\",\n ],\n keywords=[\"apm\", \"performance monitoring\", \"development\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Framework :: Bottle\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.8\",\n \"Framework :: Django :: 1.9\",\n \"Framework :: Django :: 1.10\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Flask\",\n \"Framework :: Pyramid\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n)\n", "path": "setup.py"}]}
| 1,784 | 146 |
gh_patches_debug_36454
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1493
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] If no checks to display in table - should state that
**Describe the bug**
Current status:

(picture taken from here: https://docs.deepchecks.com/dev/user-guide/tabular/auto_tutorials/plot_phishing_urls.html#understanding-the-checks-results)
**Expected behavior**
Used to have a text instead, recover that text
</issue>
<code>
[start of deepchecks/core/serialization/suite_result/widget.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing ipywidget serializer for the SuiteResult type."""
12 import typing as t
13 import warnings
14
15 import pandas as pd
16 from ipywidgets import HTML, Tab, VBox, Widget
17
18 from deepchecks.core import check_result as check_types
19 from deepchecks.core import suite
20 from deepchecks.core.serialization.abc import WidgetSerializer
21 from deepchecks.core.serialization.check_result.html import CheckResultSection
22 from deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer
23 from deepchecks.core.serialization.common import Html as CommonHtml
24 from deepchecks.core.serialization.common import join, normalize_widget_style
25 from deepchecks.core.serialization.dataframe.widget import DataFrameSerializer
26
27 from . import html
28
29 __all__ = ['SuiteResultSerializer']
30
31
32 class SuiteResultSerializer(WidgetSerializer['suite.SuiteResult']):
33 """Serializes any SuiteResult instance into ipywidgets.Widget instance.
34
35 Parameters
36 ----------
37 value : SuiteResult
38 SuiteResult instance that needed to be serialized.
39 """
40
41 def __init__(self, value: 'suite.SuiteResult', **kwargs):
42 if not isinstance(value, suite.SuiteResult):
43 raise TypeError(
44 f'Expected "SuiteResult" but got "{type(value).__name__}"'
45 )
46 self.value = value
47 self._html_serializer = html.SuiteResultSerializer(self.value)
48
49 def serialize(
50 self,
51 output_id: t.Optional[str] = None,
52 **kwargs
53 ) -> VBox:
54 """Serialize a SuiteResult instance into ipywidgets.Widget instance.
55
56 Parameters
57 ----------
58 output_id : Optional[str], default None
59 unique output identifier that will be used to form anchor links
60
61 Returns
62 -------
63 ipywidgets.VBox
64 """
65 tab = Tab()
66 tab.set_title(0, 'Checks With Conditions')
67 tab.set_title(1, 'Checks Without Conditions')
68 tab.set_title(2, 'Checks Without Output')
69
70 tab.children = [
71 self.prepare_results_with_condition_and_display(
72 output_id=output_id, **kwargs
73 ),
74 self.prepare_results_without_condition(
75 output_id=output_id,
76 check_sections=['additional-output'],
77 **kwargs
78 ),
79 self.prepare_failures_list()
80 ]
81
82 style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {flex: 0 1 auto}</style>'
83
84 return VBox(children=[
85 HTML(value=style),
86 self.prepare_summary(output_id=output_id, **kwargs),
87 tab
88 ])
89
90 def prepare_summary(
91 self,
92 output_id: t.Optional[str] = None,
93 **kwargs
94 ) -> HTML:
95 """Prepare summary widget."""
96 return HTML(value=self._html_serializer.prepare_summary(
97 output_id=output_id,
98 **kwargs
99 ))
100
101 def prepare_conditions_table(
102 self,
103 output_id: t.Optional[str] = None,
104 **kwargs
105 ) -> HTML:
106 """Prepare summary widget."""
107 return normalize_widget_style(HTML(value=self._html_serializer.prepare_conditions_table(
108 output_id=output_id,
109 include_check_name=True,
110 **kwargs
111 )))
112
113 def prepare_failures_list(self) -> HTML:
114 """Prepare failures list widget."""
115 return normalize_widget_style(HTML(
116 value=self._html_serializer.prepare_failures_list()
117 ))
118
119 def prepare_results_without_condition(
120 self,
121 output_id: t.Optional[str] = None,
122 check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
123 **kwargs
124 ) -> VBox:
125 """Prepare widget that shows results without conditions.
126
127 Parameters
128 ----------
129 output_id : Optional[str], default None
130 unique output identifier that will be used to form anchor links
131 check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
132 sequence of check result sections to include into the output,
133 in case of 'None' all sections will be included
134
135 Returns
136 -------
137 ipywidgets.VBox
138 """
139 results = t.cast(
140 t.List[check_types.CheckResult],
141 self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)
142 )
143 results_without_conditions = [
144 CheckResultWidgetSerializer(it).serialize(
145 output_id=output_id,
146 include=check_sections,
147 **kwargs
148 )
149 for it in results
150 ]
151 return normalize_widget_style(VBox(children=[
152 HTML(value='<h2>Check Without Conditions Output</h2>'),
153 self.prepare_navigation_for_unconditioned_results(output_id),
154 HTML(value=CommonHtml.light_hr),
155 *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
156 ]))
157
158 def prepare_results_with_condition_and_display(
159 self,
160 output_id: t.Optional[str] = None,
161 check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,
162 **kwargs
163 ) -> VBox:
164 """Prepare widget that shows results with conditions and display.
165
166 Parameters
167 ----------
168 output_id : Optional[str], default None
169 unique output identifier that will be used to form anchor links
170 check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None
171 sequence of check result sections to include into the output,
172 in case of 'None' all sections will be included
173
174 Returns
175 -------
176 ipywidgets.VBox
177 """
178 results = t.cast(
179 t.List[check_types.CheckResult],
180 self.value.select_results(self.value.results_with_conditions & self.value.results_with_display)
181 )
182 results_with_condition_and_display = [
183 CheckResultWidgetSerializer(it).serialize(
184 output_id=output_id,
185 include=check_sections,
186 **kwargs
187 )
188 for it in results
189 ]
190 output = (
191 join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
192 if len(results_with_condition_and_display) > 0
193 else (HTML(value='<p>Nothing to show</p>'),)
194 )
195 return normalize_widget_style(VBox(children=[
196 self.prepare_conditions_table(output_id=output_id),
197 HTML(value='<h2>Check With Conditions Output</h2>'),
198 *output
199 ]))
200
201 def prepare_navigation_for_unconditioned_results(
202 self,
203 output_id: t.Optional[str] = None,
204 **kwargs
205 ) -> Widget:
206 """Prepare navigation widget for the tab with unconditioned_results.
207
208 Parameters
209 ----------
210 output_id : Optional[str], default None
211 unique output identifier that will be used to form anchor links
212
213 Returns
214 -------
215 ipywidgets.Widget
216 """
217 data = []
218
219 results = t.cast(
220 t.List[check_types.CheckResult],
221 self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)
222 )
223
224 for check_result in results:
225 check_header = check_result.get_header()
226
227 if output_id:
228 href = f'href="#{check_result.get_check_id(output_id)}"'
229 header = f'<a {href}>{check_header}</a>'
230 else:
231 header = check_header
232
233 summary = check_result.get_metadata(with_doc_link=True)['summary']
234 data.append([header, summary])
235
236 df = pd.DataFrame(
237 data=data,
238 columns=['Check', 'Summary']
239 )
240
241 with warnings.catch_warnings():
242 warnings.simplefilter(action='ignore', category=FutureWarning)
243 return DataFrameSerializer(df.style.hide_index()).serialize()
244
[end of deepchecks/core/serialization/suite_result/widget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/core/serialization/suite_result/widget.py b/deepchecks/core/serialization/suite_result/widget.py
--- a/deepchecks/core/serialization/suite_result/widget.py
+++ b/deepchecks/core/serialization/suite_result/widget.py
@@ -113,7 +113,7 @@
def prepare_failures_list(self) -> HTML:
"""Prepare failures list widget."""
return normalize_widget_style(HTML(
- value=self._html_serializer.prepare_failures_list()
+ value=self._html_serializer.prepare_failures_list() or '<p>No outputs to show.</p>'
))
def prepare_results_without_condition(
@@ -148,12 +148,19 @@
)
for it in results
]
- return normalize_widget_style(VBox(children=[
- HTML(value='<h2>Check Without Conditions Output</h2>'),
- self.prepare_navigation_for_unconditioned_results(output_id),
- HTML(value=CommonHtml.light_hr),
- *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
- ]))
+ if len(results_without_conditions) > 0:
+ children = (
+ HTML(value='<h2>Check Without Conditions Output</h2>'),
+ self.prepare_navigation_for_unconditioned_results(output_id),
+ HTML(value=CommonHtml.light_hr),
+ *join(results_without_conditions, HTML(value=CommonHtml.light_hr))
+ )
+ else:
+ children = (
+ HTML(value='<p>No outputs to show.</p>'),
+ )
+
+ return normalize_widget_style(VBox(children=children))
def prepare_results_with_condition_and_display(
self,
@@ -187,16 +194,18 @@
)
for it in results
]
- output = (
- join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
- if len(results_with_condition_and_display) > 0
- else (HTML(value='<p>Nothing to show</p>'),)
- )
- return normalize_widget_style(VBox(children=[
- self.prepare_conditions_table(output_id=output_id),
- HTML(value='<h2>Check With Conditions Output</h2>'),
- *output
- ]))
+
+ if len(results_with_condition_and_display) > 0:
+ children = (
+ self.prepare_conditions_table(output_id=output_id),
+ HTML(value='<h2>Check With Conditions Output</h2>'),
+ *join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))
+ )
+ else:
+ children = (
+ HTML(value='<p>No outputs to show.</p>'),
+ )
+ return normalize_widget_style(VBox(children=children))
def prepare_navigation_for_unconditioned_results(
self,
|
{"golden_diff": "diff --git a/deepchecks/core/serialization/suite_result/widget.py b/deepchecks/core/serialization/suite_result/widget.py\n--- a/deepchecks/core/serialization/suite_result/widget.py\n+++ b/deepchecks/core/serialization/suite_result/widget.py\n@@ -113,7 +113,7 @@\n def prepare_failures_list(self) -> HTML:\n \"\"\"Prepare failures list widget.\"\"\"\n return normalize_widget_style(HTML(\n- value=self._html_serializer.prepare_failures_list()\n+ value=self._html_serializer.prepare_failures_list() or '<p>No outputs to show.</p>'\n ))\n \n def prepare_results_without_condition(\n@@ -148,12 +148,19 @@\n )\n for it in results\n ]\n- return normalize_widget_style(VBox(children=[\n- HTML(value='<h2>Check Without Conditions Output</h2>'),\n- self.prepare_navigation_for_unconditioned_results(output_id),\n- HTML(value=CommonHtml.light_hr),\n- *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n- ]))\n+ if len(results_without_conditions) > 0:\n+ children = (\n+ HTML(value='<h2>Check Without Conditions Output</h2>'),\n+ self.prepare_navigation_for_unconditioned_results(output_id),\n+ HTML(value=CommonHtml.light_hr),\n+ *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n+ )\n+ else:\n+ children = (\n+ HTML(value='<p>No outputs to show.</p>'),\n+ )\n+\n+ return normalize_widget_style(VBox(children=children))\n \n def prepare_results_with_condition_and_display(\n self,\n@@ -187,16 +194,18 @@\n )\n for it in results\n ]\n- output = (\n- join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n- if len(results_with_condition_and_display) > 0\n- else (HTML(value='<p>Nothing to show</p>'),)\n- )\n- return normalize_widget_style(VBox(children=[\n- self.prepare_conditions_table(output_id=output_id),\n- HTML(value='<h2>Check With Conditions Output</h2>'),\n- *output\n- ]))\n+\n+ if len(results_with_condition_and_display) > 0:\n+ children = (\n+ self.prepare_conditions_table(output_id=output_id),\n+ HTML(value='<h2>Check With Conditions Output</h2>'),\n+ *join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n+ )\n+ else:\n+ children = (\n+ HTML(value='<p>No outputs to show.</p>'),\n+ )\n+ return normalize_widget_style(VBox(children=children))\n \n def prepare_navigation_for_unconditioned_results(\n self,\n", "issue": "[BUG] If no checks to display in table - should state that\n**Describe the bug**\r\nCurrent status:\r\n\r\n(picture taken from here: https://docs.deepchecks.com/dev/user-guide/tabular/auto_tutorials/plot_phishing_urls.html#understanding-the-checks-results)\r\n\r\n**Expected behavior**\r\nUsed to have a text instead, recover that text\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing ipywidget serializer for the SuiteResult type.\"\"\"\nimport typing as t\nimport warnings\n\nimport pandas as pd\nfrom ipywidgets import HTML, Tab, VBox, Widget\n\nfrom deepchecks.core import check_result as check_types\nfrom deepchecks.core import suite\nfrom deepchecks.core.serialization.abc import WidgetSerializer\nfrom deepchecks.core.serialization.check_result.html import CheckResultSection\nfrom deepchecks.core.serialization.check_result.widget import CheckResultSerializer as CheckResultWidgetSerializer\nfrom deepchecks.core.serialization.common import Html as CommonHtml\nfrom deepchecks.core.serialization.common import join, normalize_widget_style\nfrom deepchecks.core.serialization.dataframe.widget import DataFrameSerializer\n\nfrom . import html\n\n__all__ = ['SuiteResultSerializer']\n\n\nclass SuiteResultSerializer(WidgetSerializer['suite.SuiteResult']):\n \"\"\"Serializes any SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n value : SuiteResult\n SuiteResult instance that needed to be serialized.\n \"\"\"\n\n def __init__(self, value: 'suite.SuiteResult', **kwargs):\n if not isinstance(value, suite.SuiteResult):\n raise TypeError(\n f'Expected \"SuiteResult\" but got \"{type(value).__name__}\"'\n )\n self.value = value\n self._html_serializer = html.SuiteResultSerializer(self.value)\n\n def serialize(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Serialize a SuiteResult instance into ipywidgets.Widget instance.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n tab = Tab()\n tab.set_title(0, 'Checks With Conditions')\n tab.set_title(1, 'Checks Without Conditions')\n tab.set_title(2, 'Checks Without Output')\n\n tab.children = [\n self.prepare_results_with_condition_and_display(\n output_id=output_id, **kwargs\n ),\n self.prepare_results_without_condition(\n output_id=output_id,\n check_sections=['additional-output'],\n **kwargs\n ),\n self.prepare_failures_list()\n ]\n\n style = '<style>.jupyter-widgets.widget-tab > .p-TabBar .p-TabBar-tab {flex: 0 1 auto}</style>'\n\n return VBox(children=[\n HTML(value=style),\n self.prepare_summary(output_id=output_id, **kwargs),\n tab\n ])\n\n def prepare_summary(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return HTML(value=self._html_serializer.prepare_summary(\n output_id=output_id,\n **kwargs\n ))\n\n def prepare_conditions_table(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> HTML:\n \"\"\"Prepare summary widget.\"\"\"\n return normalize_widget_style(HTML(value=self._html_serializer.prepare_conditions_table(\n output_id=output_id,\n include_check_name=True,\n **kwargs\n )))\n\n def prepare_failures_list(self) -> HTML:\n \"\"\"Prepare failures list widget.\"\"\"\n return normalize_widget_style(HTML(\n value=self._html_serializer.prepare_failures_list()\n ))\n\n def prepare_results_without_condition(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results without conditions.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n results_without_conditions = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n return normalize_widget_style(VBox(children=[\n HTML(value='<h2>Check Without Conditions Output</h2>'),\n self.prepare_navigation_for_unconditioned_results(output_id),\n HTML(value=CommonHtml.light_hr),\n *join(results_without_conditions, HTML(value=CommonHtml.light_hr))\n ]))\n\n def prepare_results_with_condition_and_display(\n self,\n output_id: t.Optional[str] = None,\n check_sections: t.Optional[t.Sequence[CheckResultSection]] = None,\n **kwargs\n ) -> VBox:\n \"\"\"Prepare widget that shows results with conditions and display.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n check_sections : Optional[Sequence[Literal['condition-table', 'additional-output']]], default None\n sequence of check result sections to include into the output,\n in case of 'None' all sections will be included\n\n Returns\n -------\n ipywidgets.VBox\n \"\"\"\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_with_conditions & self.value.results_with_display)\n )\n results_with_condition_and_display = [\n CheckResultWidgetSerializer(it).serialize(\n output_id=output_id,\n include=check_sections,\n **kwargs\n )\n for it in results\n ]\n output = (\n join(results_with_condition_and_display, HTML(value=CommonHtml.light_hr))\n if len(results_with_condition_and_display) > 0\n else (HTML(value='<p>Nothing to show</p>'),)\n )\n return normalize_widget_style(VBox(children=[\n self.prepare_conditions_table(output_id=output_id),\n HTML(value='<h2>Check With Conditions Output</h2>'),\n *output\n ]))\n\n def prepare_navigation_for_unconditioned_results(\n self,\n output_id: t.Optional[str] = None,\n **kwargs\n ) -> Widget:\n \"\"\"Prepare navigation widget for the tab with unconditioned_results.\n\n Parameters\n ----------\n output_id : Optional[str], default None\n unique output identifier that will be used to form anchor links\n\n Returns\n -------\n ipywidgets.Widget\n \"\"\"\n data = []\n\n results = t.cast(\n t.List[check_types.CheckResult],\n self.value.select_results(self.value.results_without_conditions & self.value.results_with_display)\n )\n\n for check_result in results:\n check_header = check_result.get_header()\n\n if output_id:\n href = f'href=\"#{check_result.get_check_id(output_id)}\"'\n header = f'<a {href}>{check_header}</a>'\n else:\n header = check_header\n\n summary = check_result.get_metadata(with_doc_link=True)['summary']\n data.append([header, summary])\n\n df = pd.DataFrame(\n data=data,\n columns=['Check', 'Summary']\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(action='ignore', category=FutureWarning)\n return DataFrameSerializer(df.style.hide_index()).serialize()\n", "path": "deepchecks/core/serialization/suite_result/widget.py"}]}
| 2,982 | 614 |
gh_patches_debug_31075
|
rasdani/github-patches
|
git_diff
|
databricks__koalas-104
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix pypi description
See https://pypi.org/project/databricks-koalas/
It just pulls in our entire README in markdown format.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 #
4 # Copyright (C) 2019 Databricks, Inc.
5 #
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
17 #
18
19 from setuptools import setup
20
21
22 install_requires = [
23 'pandas>=0.23',
24 'decorator',
25 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
26 ]
27
28 setup(
29 name='databricks-koalas',
30 version='0.0.6',
31 packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
32 'databricks.koalas.missing'],
33 extras_require={
34 'spark': ['pyspark>=2.4.0'],
35 },
36 install_requires=install_requires,
37 author="Timothy Hunter",
38 author_email="[email protected]",
39 license='http://www.apache.org/licenses/LICENSE-2.0',
40 long_description=open('README.md').read(),
41 )
42
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,24 +18,46 @@
from setuptools import setup
+DESCRIPTION = "Pandas DataFrame API on Apache Spark"
-install_requires = [
- 'pandas>=0.23',
- 'decorator',
- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
-]
+LONG_DESCRIPTION = """
+Koalas makes data scientists more productive when interacting with big data,
+by augmenting Apache Spark's Python DataFrame API to be compatible with
+Pandas'.
+
+Pandas is the de facto standard (single-node) dataframe implementation in
+Python, while Spark is the de facto standard for big data processing.
+With this package, data scientists can:
+
+- Be immediately productive with Spark, with no learning curve, if one
+ is already familiar with Pandas.
+- Have a single codebase that works both with Pandas (tests, smaller datasets)
+ and with Spark (distributed datasets).
+"""
setup(
- name='databricks-koalas',
+ name='koalas',
version='0.0.6',
packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',
'databricks.koalas.missing'],
extras_require={
'spark': ['pyspark>=2.4.0'],
},
- install_requires=install_requires,
- author="Timothy Hunter",
- author_email="[email protected]",
+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
+ install_requires=[
+ 'pandas>=0.23',
+ 'decorator',
+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26
+ ],
+ maintainer="Databricks",
+ maintainer_email="[email protected]",
license='http://www.apache.org/licenses/LICENSE-2.0',
- long_description=open('README.md').read(),
+ url="https://github.com/databricks/spark-pandas",
+ project_urls={
+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',
+ # 'Documentation': '',
+ 'Source Code': 'https://github.com/databricks/spark-pandas'
+ },
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
)
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,24 +18,46 @@\n \n from setuptools import setup\n \n+DESCRIPTION = \"Pandas DataFrame API on Apache Spark\"\n \n-install_requires = [\n- 'pandas>=0.23',\n- 'decorator',\n- 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n-]\n+LONG_DESCRIPTION = \"\"\"\n+Koalas makes data scientists more productive when interacting with big data,\n+by augmenting Apache Spark's Python DataFrame API to be compatible with\n+Pandas'.\n+\n+Pandas is the de facto standard (single-node) dataframe implementation in\n+Python, while Spark is the de facto standard for big data processing.\n+With this package, data scientists can:\n+\n+- Be immediately productive with Spark, with no learning curve, if one\n+ is already familiar with Pandas.\n+- Have a single codebase that works both with Pandas (tests, smaller datasets)\n+ and with Spark (distributed datasets).\n+\"\"\"\n \n setup(\n- name='databricks-koalas',\n+ name='koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n- install_requires=install_requires,\n- author=\"Timothy Hunter\",\n- author_email=\"[email protected]\",\n+ python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',\n+ install_requires=[\n+ 'pandas>=0.23',\n+ 'decorator',\n+ 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n+ ],\n+ maintainer=\"Databricks\",\n+ maintainer_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n- long_description=open('README.md').read(),\n+ url=\"https://github.com/databricks/spark-pandas\",\n+ project_urls={\n+ 'Bug Tracker': 'https://github.com/databricks/spark-pandas/issues',\n+ # 'Documentation': '',\n+ 'Source Code': 'https://github.com/databricks/spark-pandas'\n+ },\n+ description=DESCRIPTION,\n+ long_description=LONG_DESCRIPTION,\n )\n", "issue": "Fix pypi description\nSee https://pypi.org/project/databricks-koalas/\r\n\r\nIt just pulls in our entire README in markdown format.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n#\n# Copyright (C) 2019 Databricks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom setuptools import setup\n\n\ninstall_requires = [\n 'pandas>=0.23',\n 'decorator',\n 'pyarrow>=0.10,<0.11', # See https://github.com/databricks/spark-pandas/issues/26\n]\n\nsetup(\n name='databricks-koalas',\n version='0.0.6',\n packages=['databricks', 'databricks.koalas', 'databricks.koalas.dask',\n 'databricks.koalas.missing'],\n extras_require={\n 'spark': ['pyspark>=2.4.0'],\n },\n install_requires=install_requires,\n author=\"Timothy Hunter\",\n author_email=\"[email protected]\",\n license='http://www.apache.org/licenses/LICENSE-2.0',\n long_description=open('README.md').read(),\n)\n", "path": "setup.py"}]}
| 978 | 597 |
gh_patches_debug_31276
|
rasdani/github-patches
|
git_diff
|
pulp__pulpcore-3857
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Global tasks like repair or reclaim should probably not run in parallel
**Version**
Please provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.
**Describe the bug**
Global tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29
**To Reproduce**
Steps to reproduce the behavior:
**Expected behavior**
A clear and concise description of what you expected to happen.
**Additional context**
Add any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.
</issue>
<code>
[start of pulpcore/app/views/repair.py]
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.views import APIView
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer
6 from pulpcore.app.tasks import repair_all_artifacts
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class RepairView(APIView):
11 @extend_schema(
12 description=(
13 "Trigger an asynchronous task that checks for missing "
14 "or corrupted artifacts, and attempts to redownload them."
15 ),
16 summary="Repair Artifact Storage",
17 request=RepairSerializer,
18 responses={202: AsyncOperationResponseSerializer},
19 )
20 def post(self, request):
21 """
22 Repair artifacts.
23 """
24 serializer = RepairSerializer(data=request.data)
25 serializer.is_valid(raise_exception=True)
26
27 verify_checksums = serializer.validated_data["verify_checksums"]
28
29 task = dispatch(repair_all_artifacts, args=[verify_checksums])
30
31 return OperationPostponedResponse(task, request)
32
[end of pulpcore/app/views/repair.py]
[start of pulpcore/app/viewsets/reclaim.py]
1 from drf_spectacular.utils import extend_schema
2 from rest_framework.viewsets import ViewSet
3
4 from pulpcore.app.response import OperationPostponedResponse
5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer
6 from pulpcore.app.tasks import reclaim_space
7 from pulpcore.tasking.tasks import dispatch
8
9
10 class ReclaimSpaceViewSet(ViewSet):
11 """
12 Viewset for reclaim disk space endpoint.
13 """
14
15 serializer_class = ReclaimSpaceSerializer
16
17 @extend_schema(
18 description="Trigger an asynchronous space reclaim operation.",
19 responses={202: AsyncOperationResponseSerializer},
20 )
21 def reclaim(self, request):
22 """
23 Triggers an asynchronous space reclaim operation.
24 """
25 serializer = ReclaimSpaceSerializer(data=request.data)
26 serializer.is_valid(raise_exception=True)
27
28 repos = serializer.validated_data.get("repo_hrefs", [])
29 keeplist = serializer.validated_data.get("repo_versions_keeplist", [])
30 reclaim_repo_pks = []
31 keeplist_rv_pks = []
32 for repo in repos:
33 reclaim_repo_pks.append(repo.pk)
34 for rv in keeplist:
35 repos.append(rv.repository)
36 keeplist_rv_pks.append(rv.pk)
37
38 task = dispatch(
39 reclaim_space,
40 shared_resources=repos,
41 kwargs={
42 "repo_pks": reclaim_repo_pks,
43 "keeplist_rv_pks": keeplist_rv_pks,
44 },
45 )
46
47 return OperationPostponedResponse(task, request)
48
[end of pulpcore/app/viewsets/reclaim.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py
--- a/pulpcore/app/views/repair.py
+++ b/pulpcore/app/views/repair.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.views import APIView
from pulpcore.app.response import OperationPostponedResponse
@@ -26,6 +27,9 @@
verify_checksums = serializer.validated_data["verify_checksums"]
- task = dispatch(repair_all_artifacts, args=[verify_checksums])
+ uri = "/api/v3/repair/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])
return OperationPostponedResponse(task, request)
diff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py
--- a/pulpcore/app/viewsets/reclaim.py
+++ b/pulpcore/app/viewsets/reclaim.py
@@ -1,4 +1,5 @@
from drf_spectacular.utils import extend_schema
+from django.conf import settings
from rest_framework.viewsets import ViewSet
from pulpcore.app.response import OperationPostponedResponse
@@ -35,8 +36,17 @@
repos.append(rv.repository)
keeplist_rv_pks.append(rv.pk)
+ if repos:
+ exclusive_resources = None
+ else:
+ uri = "/api/v3/repositories/reclaim_space/"
+ if settings.DOMAIN_ENABLED:
+ uri = f"/{request.pulp_domain.name}{uri}"
+ exclusive_resources = [uri]
+
task = dispatch(
reclaim_space,
+ exclusive_resources=exclusive_resources,
shared_resources=repos,
kwargs={
"repo_pks": reclaim_repo_pks,
|
{"golden_diff": "diff --git a/pulpcore/app/views/repair.py b/pulpcore/app/views/repair.py\n--- a/pulpcore/app/views/repair.py\n+++ b/pulpcore/app/views/repair.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.views import APIView\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -26,6 +27,9 @@\n \n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n \n- task = dispatch(repair_all_artifacts, args=[verify_checksums])\n+ uri = \"/api/v3/repair/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ task = dispatch(repair_all_artifacts, exclusive_resources=[uri], args=[verify_checksums])\n \n return OperationPostponedResponse(task, request)\ndiff --git a/pulpcore/app/viewsets/reclaim.py b/pulpcore/app/viewsets/reclaim.py\n--- a/pulpcore/app/viewsets/reclaim.py\n+++ b/pulpcore/app/viewsets/reclaim.py\n@@ -1,4 +1,5 @@\n from drf_spectacular.utils import extend_schema\n+from django.conf import settings\n from rest_framework.viewsets import ViewSet\n \n from pulpcore.app.response import OperationPostponedResponse\n@@ -35,8 +36,17 @@\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n \n+ if repos:\n+ exclusive_resources = None\n+ else:\n+ uri = \"/api/v3/repositories/reclaim_space/\"\n+ if settings.DOMAIN_ENABLED:\n+ uri = f\"/{request.pulp_domain.name}{uri}\"\n+ exclusive_resources = [uri]\n+\n task = dispatch(\n reclaim_space,\n+ exclusive_resources=exclusive_resources,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n", "issue": "Global tasks like repair or reclaim should probably not run in parallel\n**Version**\r\nPlease provide the versions of the pulpcore and plugin packages in use, and how they are installed. If you are using Pulp via Katello, please provide the Katello version.\r\n\r\n**Describe the bug**\r\nGlobal tasks that involve all repos like repair or reclaim should run one at a time, like orphan cleanup https://github.com/pulp/pulpcore/blob/main/pulpcore/app/viewsets/orphans.py#L29\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. Please provide links to any previous discussions via Discourse or Bugzilla.\r\n\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, RepairSerializer\nfrom pulpcore.app.tasks import repair_all_artifacts\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass RepairView(APIView):\n @extend_schema(\n description=(\n \"Trigger an asynchronous task that checks for missing \"\n \"or corrupted artifacts, and attempts to redownload them.\"\n ),\n summary=\"Repair Artifact Storage\",\n request=RepairSerializer,\n responses={202: AsyncOperationResponseSerializer},\n )\n def post(self, request):\n \"\"\"\n Repair artifacts.\n \"\"\"\n serializer = RepairSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n verify_checksums = serializer.validated_data[\"verify_checksums\"]\n\n task = dispatch(repair_all_artifacts, args=[verify_checksums])\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/repair.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, ReclaimSpaceSerializer\nfrom pulpcore.app.tasks import reclaim_space\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass ReclaimSpaceViewSet(ViewSet):\n \"\"\"\n Viewset for reclaim disk space endpoint.\n \"\"\"\n\n serializer_class = ReclaimSpaceSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous space reclaim operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def reclaim(self, request):\n \"\"\"\n Triggers an asynchronous space reclaim operation.\n \"\"\"\n serializer = ReclaimSpaceSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n repos = serializer.validated_data.get(\"repo_hrefs\", [])\n keeplist = serializer.validated_data.get(\"repo_versions_keeplist\", [])\n reclaim_repo_pks = []\n keeplist_rv_pks = []\n for repo in repos:\n reclaim_repo_pks.append(repo.pk)\n for rv in keeplist:\n repos.append(rv.repository)\n keeplist_rv_pks.append(rv.pk)\n\n task = dispatch(\n reclaim_space,\n shared_resources=repos,\n kwargs={\n \"repo_pks\": reclaim_repo_pks,\n \"keeplist_rv_pks\": keeplist_rv_pks,\n },\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/reclaim.py"}]}
| 1,403 | 433 |
gh_patches_debug_15423
|
rasdani/github-patches
|
git_diff
|
deis__deis-5006
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Unable to start application with ' (quote) in env variable
Our customer's bug:
**STR**
``` bash
git clone https://github.com/deis/example-python-flask && cd example-python-flask
deis create
git push deis master
echo "MAILER_SUBJECT=ISP cron can't create booklet" > .env
deis config:push
```
**Actual result**
Creating config... .o.
Client return 504 error, after ~30 min(balancer config)
_Controller log_
``` log
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Tagging Docker image 10.21.12.123:5000/forecast:git-1c89b116 as forecast:git-1c89b116
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Building Docker image 10.21.12.123:5000/forecast:v4
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO forecast: 500 Server Error: Internal Server Error ("Syntax error - can't find = in "cron". Must be of the form: name=value")
Dec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO [forecast]: 500 Server Error: Internal Server Error ("Syntax error - can't find = in "cron". Must be of the form: name=value")
```
_deis info_
``` bash
=== forecast Application
updated: 2015-12-14T02:57:36UTC
uuid: 01eacd72-1950-4ec1-a301-4212b59572b6
created: 2015-12-14T02:50:35UTC
url: forecast.web.2gis.local
owner: v.reyder
id: forecast
=== forecast Processes
--- web:
web.1 up (v3)
web.1 down (v4)
=== forecast Domains
```
_Actually config is set_
``` bash
$ deis config
=== forecast Config
MAILER_SUBJECT ISP cron can't create booklet
```
_Registry_
``` bash
s3cmd ls s3://devregistry/registry/repositories/library/forecast/
2015-12-14 02:57 11780 s3://devregistry/registry/repositories/library/forecast/_index_images
2015-12-14 02:56 64 s3://devregistry/registry/repositories/library/forecast/tag_git-1c89b116
2015-12-14 02:57 64 s3://devregistry/registry/repositories/library/forecast/tag_v3
2015-12-14 02:56 147 s3://devregistry/registry/repositories/library/forecast/taggit-1c89b116_json
2015-12-14 02:57 147 s3://devregistry/registry/repositories/library/forecast/tagv3_json
```
**Important: Only v3 tags presents and v4 is not exist**
**Expected result**
Config is set, application is up, and proper tags create in registry.
</issue>
<code>
[start of controller/registry/dockerclient.py]
1 # -*- coding: utf-8 -*-
2 """Support the Deis workflow by manipulating and publishing Docker images."""
3
4 from __future__ import unicode_literals
5 import io
6 import logging
7
8 from django.conf import settings
9 from rest_framework.exceptions import PermissionDenied
10 from simpleflock import SimpleFlock
11 import docker
12
13 logger = logging.getLogger(__name__)
14
15
16 class DockerClient(object):
17 """Use the Docker API to pull, tag, build, and push images to deis-registry."""
18
19 FLOCKFILE = '/tmp/controller-pull'
20
21 def __init__(self):
22 self.client = docker.Client(version='auto')
23 self.registry = settings.REGISTRY_HOST + ':' + str(settings.REGISTRY_PORT)
24
25 def publish_release(self, source, config, target, deis_registry):
26 """Update a source Docker image with environment config and publish it to deis-registry."""
27 # get the source repository name and tag
28 src_name, src_tag = docker.utils.parse_repository_tag(source)
29 # get the target repository name and tag
30 name, tag = docker.utils.parse_repository_tag(target)
31 # strip any "http://host.domain:port" prefix from the target repository name,
32 # since we always publish to the Deis registry
33 name = strip_prefix(name)
34
35 # pull the source image from the registry
36 # NOTE: this relies on an implementation detail of deis-builder, that
37 # the image has been uploaded already to deis-registry
38 if deis_registry:
39 repo = "{}/{}".format(self.registry, src_name)
40 else:
41 repo = src_name
42 self.pull(repo, src_tag)
43
44 # tag the image locally without the repository URL
45 image = "{}:{}".format(repo, src_tag)
46 self.tag(image, src_name, tag=src_tag)
47
48 # build a Docker image that adds a "last-mile" layer of environment
49 config.update({'DEIS_APP': name, 'DEIS_RELEASE': tag})
50 self.build(source, config, name, tag)
51
52 # push the image to deis-registry
53 self.push("{}/{}".format(self.registry, name), tag)
54
55 def build(self, source, config, repo, tag):
56 """Add a "last-mile" layer of environment config to a Docker image for deis-registry."""
57 check_blacklist(repo)
58 env = ' '.join("{}='{}'".format(
59 k, v.encode('unicode-escape').replace("'", "\\'")) for k, v in config.viewitems())
60 dockerfile = "FROM {}\nENV {}".format(source, env)
61 f = io.BytesIO(dockerfile.encode('utf-8'))
62 target_repo = "{}/{}:{}".format(self.registry, repo, tag)
63 logger.info("Building Docker image {}".format(target_repo))
64 with SimpleFlock(self.FLOCKFILE, timeout=1200):
65 stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)
66 log_output(stream)
67
68 def pull(self, repo, tag):
69 """Pull a Docker image into the local storage graph."""
70 check_blacklist(repo)
71 logger.info("Pulling Docker image {}:{}".format(repo, tag))
72 with SimpleFlock(self.FLOCKFILE, timeout=1200):
73 stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)
74 log_output(stream)
75
76 def push(self, repo, tag):
77 """Push a local Docker image to a registry."""
78 logger.info("Pushing Docker image {}:{}".format(repo, tag))
79 stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)
80 log_output(stream)
81
82 def tag(self, image, repo, tag):
83 """Tag a local Docker image with a new name and tag."""
84 check_blacklist(repo)
85 logger.info("Tagging Docker image {} as {}:{}".format(image, repo, tag))
86 if not self.client.tag(image, repo, tag=tag, force=True):
87 raise docker.errors.DockerException("tagging failed")
88
89
90 def check_blacklist(repo):
91 """Check a Docker repository name for collision with deis/* components."""
92 blacklisted = [ # NOTE: keep this list up to date!
93 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',
94 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',
95 'store-gateway', 'store-metadata', 'store-monitor',
96 ]
97 if any("deis/{}".format(c) in repo for c in blacklisted):
98 raise PermissionDenied("Repository name {} is not allowed".format(repo))
99
100
101 def log_output(stream):
102 """Log a stream at DEBUG level, and raise DockerException if it contains "error"."""
103 for chunk in stream:
104 logger.debug(chunk)
105 # error handling requires looking at the response body
106 if '"error"' in chunk.lower():
107 raise docker.errors.DockerException(chunk)
108
109
110 def strip_prefix(name):
111 """Strip the schema and host:port from a Docker repository name."""
112 paths = name.split('/')
113 return '/'.join(p for p in paths if p and '.' not in p and ':' not in p)
114
115
116 def publish_release(source, config, target, deis_registry):
117
118 client = DockerClient()
119 return client.publish_release(source, config, target, deis_registry)
120
[end of controller/registry/dockerclient.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/controller/registry/dockerclient.py b/controller/registry/dockerclient.py
--- a/controller/registry/dockerclient.py
+++ b/controller/registry/dockerclient.py
@@ -55,8 +55,8 @@
def build(self, source, config, repo, tag):
"""Add a "last-mile" layer of environment config to a Docker image for deis-registry."""
check_blacklist(repo)
- env = ' '.join("{}='{}'".format(
- k, v.encode('unicode-escape').replace("'", "\\'")) for k, v in config.viewitems())
+ env = ' '.join('{}="{}"'.format(
+ k, v.encode('unicode-escape').replace('"', '\\"')) for k, v in config.viewitems())
dockerfile = "FROM {}\nENV {}".format(source, env)
f = io.BytesIO(dockerfile.encode('utf-8'))
target_repo = "{}/{}:{}".format(self.registry, repo, tag)
|
{"golden_diff": "diff --git a/controller/registry/dockerclient.py b/controller/registry/dockerclient.py\n--- a/controller/registry/dockerclient.py\n+++ b/controller/registry/dockerclient.py\n@@ -55,8 +55,8 @@\n def build(self, source, config, repo, tag):\n \"\"\"Add a \"last-mile\" layer of environment config to a Docker image for deis-registry.\"\"\"\n check_blacklist(repo)\n- env = ' '.join(\"{}='{}'\".format(\n- k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n+ env = ' '.join('{}=\"{}\"'.format(\n+ k, v.encode('unicode-escape').replace('\"', '\\\\\"')) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n", "issue": "[bug] Unable to start application with ' (quote) in env variable\nOur customer's bug:\n**STR**\n\n``` bash\ngit clone https://github.com/deis/example-python-flask && cd example-python-flask\ndeis create\ngit push deis master\necho \"MAILER_SUBJECT=ISP cron can't create booklet\" > .env\ndeis config:push\n```\n\n**Actual result**\nCreating config... .o.\nClient return 504 error, after ~30 min(balancer config)\n_Controller log_\n\n``` log\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Tagging Docker image 10.21.12.123:5000/forecast:git-1c89b116 as forecast:git-1c89b116\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO Building Docker image 10.21.12.123:5000/forecast:v4\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO forecast: 500 Server Error: Internal Server Error (\"Syntax error - can't find = in \"cron\". Must be of the form: name=value\")\nDec 14 03:01:10 deis-v1121-4.novalocal sh[18564]: INFO [forecast]: 500 Server Error: Internal Server Error (\"Syntax error - can't find = in \"cron\". Must be of the form: name=value\")\n```\n\n_deis info_\n\n``` bash\n=== forecast Application\nupdated: 2015-12-14T02:57:36UTC\nuuid: 01eacd72-1950-4ec1-a301-4212b59572b6\ncreated: 2015-12-14T02:50:35UTC\nurl: forecast.web.2gis.local\nowner: v.reyder\nid: forecast\n\n=== forecast Processes\n--- web:\nweb.1 up (v3)\nweb.1 down (v4)\n\n=== forecast Domains\n```\n\n_Actually config is set_\n\n``` bash\n$ deis config\n=== forecast Config\nMAILER_SUBJECT ISP cron can't create booklet\n```\n\n_Registry_\n\n``` bash\ns3cmd ls s3://devregistry/registry/repositories/library/forecast/\n2015-12-14 02:57 11780 s3://devregistry/registry/repositories/library/forecast/_index_images\n2015-12-14 02:56 64 s3://devregistry/registry/repositories/library/forecast/tag_git-1c89b116\n2015-12-14 02:57 64 s3://devregistry/registry/repositories/library/forecast/tag_v3\n2015-12-14 02:56 147 s3://devregistry/registry/repositories/library/forecast/taggit-1c89b116_json\n2015-12-14 02:57 147 s3://devregistry/registry/repositories/library/forecast/tagv3_json\n```\n\n**Important: Only v3 tags presents and v4 is not exist**\n\n**Expected result**\nConfig is set, application is up, and proper tags create in registry.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Support the Deis workflow by manipulating and publishing Docker images.\"\"\"\n\nfrom __future__ import unicode_literals\nimport io\nimport logging\n\nfrom django.conf import settings\nfrom rest_framework.exceptions import PermissionDenied\nfrom simpleflock import SimpleFlock\nimport docker\n\nlogger = logging.getLogger(__name__)\n\n\nclass DockerClient(object):\n \"\"\"Use the Docker API to pull, tag, build, and push images to deis-registry.\"\"\"\n\n FLOCKFILE = '/tmp/controller-pull'\n\n def __init__(self):\n self.client = docker.Client(version='auto')\n self.registry = settings.REGISTRY_HOST + ':' + str(settings.REGISTRY_PORT)\n\n def publish_release(self, source, config, target, deis_registry):\n \"\"\"Update a source Docker image with environment config and publish it to deis-registry.\"\"\"\n # get the source repository name and tag\n src_name, src_tag = docker.utils.parse_repository_tag(source)\n # get the target repository name and tag\n name, tag = docker.utils.parse_repository_tag(target)\n # strip any \"http://host.domain:port\" prefix from the target repository name,\n # since we always publish to the Deis registry\n name = strip_prefix(name)\n\n # pull the source image from the registry\n # NOTE: this relies on an implementation detail of deis-builder, that\n # the image has been uploaded already to deis-registry\n if deis_registry:\n repo = \"{}/{}\".format(self.registry, src_name)\n else:\n repo = src_name\n self.pull(repo, src_tag)\n\n # tag the image locally without the repository URL\n image = \"{}:{}\".format(repo, src_tag)\n self.tag(image, src_name, tag=src_tag)\n\n # build a Docker image that adds a \"last-mile\" layer of environment\n config.update({'DEIS_APP': name, 'DEIS_RELEASE': tag})\n self.build(source, config, name, tag)\n\n # push the image to deis-registry\n self.push(\"{}/{}\".format(self.registry, name), tag)\n\n def build(self, source, config, repo, tag):\n \"\"\"Add a \"last-mile\" layer of environment config to a Docker image for deis-registry.\"\"\"\n check_blacklist(repo)\n env = ' '.join(\"{}='{}'\".format(\n k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n logger.info(\"Building Docker image {}\".format(target_repo))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)\n log_output(stream)\n\n def pull(self, repo, tag):\n \"\"\"Pull a Docker image into the local storage graph.\"\"\"\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def push(self, repo, tag):\n \"\"\"Push a local Docker image to a registry.\"\"\"\n logger.info(\"Pushing Docker image {}:{}\".format(repo, tag))\n stream = self.client.push(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)\n\n def tag(self, image, repo, tag):\n \"\"\"Tag a local Docker image with a new name and tag.\"\"\"\n check_blacklist(repo)\n logger.info(\"Tagging Docker image {} as {}:{}\".format(image, repo, tag))\n if not self.client.tag(image, repo, tag=tag, force=True):\n raise docker.errors.DockerException(\"tagging failed\")\n\n\ndef check_blacklist(repo):\n \"\"\"Check a Docker repository name for collision with deis/* components.\"\"\"\n blacklisted = [ # NOTE: keep this list up to date!\n 'builder', 'cache', 'controller', 'database', 'logger', 'logspout',\n 'publisher', 'registry', 'router', 'store-admin', 'store-daemon',\n 'store-gateway', 'store-metadata', 'store-monitor',\n ]\n if any(\"deis/{}\".format(c) in repo for c in blacklisted):\n raise PermissionDenied(\"Repository name {} is not allowed\".format(repo))\n\n\ndef log_output(stream):\n \"\"\"Log a stream at DEBUG level, and raise DockerException if it contains \"error\".\"\"\"\n for chunk in stream:\n logger.debug(chunk)\n # error handling requires looking at the response body\n if '\"error\"' in chunk.lower():\n raise docker.errors.DockerException(chunk)\n\n\ndef strip_prefix(name):\n \"\"\"Strip the schema and host:port from a Docker repository name.\"\"\"\n paths = name.split('/')\n return '/'.join(p for p in paths if p and '.' not in p and ':' not in p)\n\n\ndef publish_release(source, config, target, deis_registry):\n\n client = DockerClient()\n return client.publish_release(source, config, target, deis_registry)\n", "path": "controller/registry/dockerclient.py"}]}
| 2,775 | 215 |
gh_patches_debug_6402
|
rasdani/github-patches
|
git_diff
|
engnadeau__pybotics-425
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Strip links/badges/images from README for PyPi
- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)
- These elements should be stripped from the README prior to upload
</issue>
<code>
[start of setup.py]
1 """Setup module."""
2 import logging
3 from pathlib import Path
4
5 from setuptools import find_packages, setup # type: ignore
6
7
8 def main() -> None:
9 """Run setup."""
10 # run setup
11 setup(name='pybotics',
12 packages=find_packages(include=['pybotics']),
13 url='https://github.com/nnadeau/pybotics',
14 license='MIT',
15 author='Nicholas Nadeau',
16 author_email='[email protected]',
17 description='Python Toolbox for Robotics',
18 long_description=get_readme(),
19 long_description_content_type='text/markdown',
20 use_scm_version=True,
21 setup_requires=['setuptools_scm'],
22 install_requires=get_requirements(), # type: ignore
23 tests_require=['pytest'],
24 classifiers=[
25 'Development Status :: 4 - Beta',
26 'Intended Audience :: Developers',
27 'Intended Audience :: Education',
28 'Intended Audience :: End Users/Desktop',
29 'Intended Audience :: Manufacturing',
30 'Intended Audience :: Science/Research',
31 'Topic :: Education',
32 'Topic :: Scientific/Engineering',
33 'Topic :: Scientific/Engineering :: Artificial Intelligence',
34 'Topic :: Scientific/Engineering :: Human Machine Interfaces',
35 'Topic :: Scientific/Engineering :: Mathematics',
36 'Topic :: Scientific/Engineering :: Physics',
37 'Topic :: Utilities',
38 'License :: OSI Approved :: MIT License',
39 'Programming Language :: Python :: 3 :: Only',
40 'Programming Language :: Python :: 3',
41 'Programming Language :: Python :: 3.4',
42 'Programming Language :: Python :: 3.5',
43 'Programming Language :: Python :: 3.6',
44 ],
45 keywords='python robot robotics research '
46 'automation kinematics geometry')
47
48
49 def get_readme() -> str:
50 """Get README text."""
51 # description
52 readme_path = Path(__file__).parent / 'README.md'
53 logging.info('README path: {}'.format(readme_path.resolve()))
54 with open(str(readme_path)) as f:
55 readme = f.read()
56 return readme
57
58
59 # don't want to import typing... so ignore
60 def get_requirements(): # type: ignore
61 """Get requirements list."""
62 # requirements
63 requirements_path = Path(__file__).parent / 'requirements.txt'
64 logging.info('Requirements path: {}'.format(requirements_path.resolve()))
65 with open(str(requirements_path)) as f:
66 requirements = f.read().splitlines()
67 for i, req in enumerate(requirements):
68 requirements[i] = req.split()[0]
69 return requirements
70
71
72 if __name__ == '__main__':
73 logging.basicConfig(level=logging.INFO)
74 main()
75
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
long_description=get_readme(),
long_description_content_type='text/markdown',
use_scm_version=True,
- setup_requires=['setuptools_scm'],
+ setup_requires=['setuptools', 'setuptools_scm'],
install_requires=get_requirements(), # type: ignore
tests_require=['pytest'],
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n- setup_requires=['setuptools_scm'],\n+ setup_requires=['setuptools', 'setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n", "issue": "Strip links/badges/images from README for PyPi\n- While the new PyPi Warehouse supports markdown, images/badges/links are slow to load and will not work if they use relative paths (e.g., https://test.pypi.org/project/pybotics/201803222157/#description)\r\n- These elements should be stripped from the README prior to upload\n", "before_files": [{"content": "\"\"\"Setup module.\"\"\"\nimport logging\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup # type: ignore\n\n\ndef main() -> None:\n \"\"\"Run setup.\"\"\"\n # run setup\n setup(name='pybotics',\n packages=find_packages(include=['pybotics']),\n url='https://github.com/nnadeau/pybotics',\n license='MIT',\n author='Nicholas Nadeau',\n author_email='[email protected]',\n description='Python Toolbox for Robotics',\n long_description=get_readme(),\n long_description_content_type='text/markdown',\n use_scm_version=True,\n setup_requires=['setuptools_scm'],\n install_requires=get_requirements(), # type: ignore\n tests_require=['pytest'],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: Manufacturing',\n 'Intended Audience :: Science/Research',\n 'Topic :: Education',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Human Machine Interfaces',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='python robot robotics research '\n 'automation kinematics geometry')\n\n\ndef get_readme() -> str:\n \"\"\"Get README text.\"\"\"\n # description\n readme_path = Path(__file__).parent / 'README.md'\n logging.info('README path: {}'.format(readme_path.resolve()))\n with open(str(readme_path)) as f:\n readme = f.read()\n return readme\n\n\n# don't want to import typing... so ignore\ndef get_requirements(): # type: ignore\n \"\"\"Get requirements list.\"\"\"\n # requirements\n requirements_path = Path(__file__).parent / 'requirements.txt'\n logging.info('Requirements path: {}'.format(requirements_path.resolve()))\n with open(str(requirements_path)) as f:\n requirements = f.read().splitlines()\n for i, req in enumerate(requirements):\n requirements[i] = req.split()[0]\n return requirements\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n main()\n", "path": "setup.py"}]}
| 1,316 | 101 |
gh_patches_debug_57184
|
rasdani/github-patches
|
git_diff
|
beeware__toga-410
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Menu items broken on Cocoa backend: fails with object has no attribute '_menu_items'
I believe this is a regression from 1d41d3833eb4b8785faf8eb7850f3feec4650350 / #373. The changes `appDelegate.interface` from the `App` instance to `App.interface`. Unfortunately, selecting a menu item still expects `appDelegate.interface` to be the `App` instance, see below where it calls `self.interface._menu_items`:
https://github.com/pybee/toga/blob/ad91f1a65a109b670256028e31c887dc18a4876d/src/cocoa/toga_cocoa/app.py#L71-L75
## Expected Behavior
The action successfully runs.
## Current Behavior
The action doesn't run, you get an error in the terminal:
```
Traceback (most recent call last):
File "_ctypes/callbacks.c", line 234, in 'calling callback function'
File "/Users/pcloke/.virtualenvs/toga/lib/python3.6/site-packages/rubicon/objc/runtime.py", line 1033, in _objc_method
result = f(py_self, *args)
File "/Users/pcloke/toga/src/cocoa/toga_cocoa/app.py", line 73, in selectMenuItem_
cmd = self.interface._menu_items[sender]
AttributeError: 'TogaDemo' object has no attribute '_menu_items'
```
## Steps to reproduce
1. Run the toga demo app (`cd toga && python -m toga_demo`)
2. Click on one of the "Commands" > "Action 1"
## Your Environment
* Python 3.6.
* macOS High Sierra
* Toga Target: cocoa
</issue>
<code>
[start of src/cocoa/toga_cocoa/app.py]
1 import asyncio
2 import os
3 import sys
4
5 import toga
6 from rubicon.objc.eventloop import EventLoopPolicy, CocoaLifecycle
7
8 from .libs import *
9 from .window import Window
10
11
12 class MainWindow(Window):
13 def on_close(self):
14 self.interface.app.exit()
15
16
17 class AppDelegate(NSObject):
18 @objc_method
19 def applicationDidFinishLaunching_(self, notification):
20 self.native.activateIgnoringOtherApps(True)
21
22 @objc_method
23 def applicationOpenUntitledFile_(self, sender) -> bool:
24 # FIXME This should be all we need; but for some reason, application types
25 # aren't being registered correctly..
26 # NSDocumentController.sharedDocumentController().openDocument_(None)
27
28 # ...so we do this instead.
29 panel = NSOpenPanel.openPanel()
30 # print("Open documents of type", NSDocumentController.sharedDocumentController().defaultType)
31
32 fileTypes = NSMutableArray.alloc().init()
33 for filetype in self.interface.document_types:
34 fileTypes.addObject(filetype)
35
36 NSDocumentController.sharedDocumentController.runModalOpenPanel(panel, forTypes=fileTypes)
37
38 # print("Untitled File opened?", panel.URLs)
39 self.application_openFiles_(None, panel.URLs)
40
41 return True
42
43 @objc_method
44 def addDocument_(self, document) -> None:
45 # print("Add Document", document)
46 super().addDocument_(document)
47
48 @objc_method
49 def applicationShouldOpenUntitledFile_(self, sender) -> bool:
50 return True
51
52 @objc_method
53 def application_openFiles_(self, app, filenames) -> None:
54 for i in range(0, len(filenames)):
55 filename = filenames.objectAtIndex(i)
56 if isinstance(filename, str):
57 fileURL = NSURL.fileURLWithPath(filename)
58
59 elif filename.objc_class.name == 'NSURL':
60 # This case only exists because we aren't using the
61 # DocumentController to display the file open dialog.
62 # If we were, *all* filenames passed in would be
63 # string paths.
64 fileURL = filename
65 else:
66 return
67
68 self.interface.open_document(fileURL.absoluteString)
69 # NSDocumentController.sharedDocumentController().openDocumentWithContentsOfURL_display_completionHandler_(fileURL, True, None)
70
71 @objc_method
72 def selectMenuItem_(self, sender) -> None:
73 cmd = self.interface._menu_items[sender]
74 if cmd.action:
75 cmd.action(None)
76
77
78 class App:
79 _MAIN_WINDOW_CLASS = MainWindow
80
81 def __init__(self, interface):
82 self.interface = interface
83 self.interface._impl = self
84
85 asyncio.set_event_loop_policy(EventLoopPolicy())
86 self.loop = asyncio.get_event_loop()
87
88 def create(self):
89 self.native = NSApplication.sharedApplication
90 self.native.setActivationPolicy(NSApplicationActivationPolicyRegular)
91
92 self.native.setApplicationIconImage_(self.interface.icon.bind(self.interface.factory).native)
93
94 self.resource_path = os.path.dirname(os.path.dirname(NSBundle.mainBundle.bundlePath))
95
96 appDelegate = AppDelegate.alloc().init()
97 appDelegate.interface = self.interface
98 appDelegate.native = self.native
99 self.native.setDelegate_(appDelegate)
100
101 app_name = self.interface.name
102
103 self.interface.commands.add(
104 toga.Command(None, 'About ' + app_name, group=toga.Group.APP),
105 toga.Command(None, 'Preferences', group=toga.Group.APP),
106 # Quit should always be the last item, in a section on it's own
107 toga.Command(lambda s: self.exit(), 'Quit ' + app_name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),
108
109 toga.Command(None, 'Visit homepage', group=toga.Group.HELP)
110 )
111
112 # Call user code to populate the main window
113 self.interface.startup()
114
115 # Create the lookup table of menu items,
116 # then force the creation of the menus.
117 self._menu_items = {}
118 self.create_menus()
119
120 def open_document(self, fileURL):
121 '''Add a new document to this app.'''
122 print("STUB: If you want to handle opening documents, implement App.open_document(fileURL)")
123
124 def create_menus(self):
125 # Only create the menu if the menu item index has been created.
126 if hasattr(self, '_menu_items'):
127 self._menu_items = {}
128 menubar = NSMenu.alloc().initWithTitle('MainMenu')
129 submenu = None
130 for cmd in self.interface.commands:
131 if cmd == toga.GROUP_BREAK:
132 menubar.setSubmenu(submenu, forItem=menuItem)
133 submenu = None
134 elif cmd == toga.SECTION_BREAK:
135 submenu.addItem_(NSMenuItem.separatorItem())
136 else:
137 if submenu is None:
138 menuItem = menubar.addItemWithTitle(cmd.group.label, action=None, keyEquivalent='')
139 submenu = NSMenu.alloc().initWithTitle(cmd.group.label)
140 submenu.setAutoenablesItems(False)
141
142 item = NSMenuItem.alloc().initWithTitle(
143 cmd.label,
144 action=SEL('selectMenuItem:'),
145 keyEquivalent=cmd.shortcut if cmd.shortcut else ''
146 )
147
148 cmd._widgets.append(item)
149 self._menu_items[item] = cmd
150
151 # This line may appear redundant, but it triggers the logic
152 # to force the enabled status on the underlying widgets.
153 cmd.enabled = cmd.enabled
154 submenu.addItem(item)
155
156 if submenu:
157 menubar.setSubmenu(submenu, forItem=menuItem)
158
159 # Set the menu for the app.
160 self.native.mainMenu = menubar
161
162 def main_loop(self):
163 # Stimulate the build of the app
164 self.create()
165
166 self.loop.run_forever(lifecycle=CocoaLifecycle(self.native))
167
168 def exit(self):
169 self.native.terminate(None)
170
[end of src/cocoa/toga_cocoa/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/cocoa/toga_cocoa/app.py b/src/cocoa/toga_cocoa/app.py
--- a/src/cocoa/toga_cocoa/app.py
+++ b/src/cocoa/toga_cocoa/app.py
@@ -70,7 +70,7 @@
@objc_method
def selectMenuItem_(self, sender) -> None:
- cmd = self.interface._menu_items[sender]
+ cmd = self.interface._impl._menu_items[sender]
if cmd.action:
cmd.action(None)
|
{"golden_diff": "diff --git a/src/cocoa/toga_cocoa/app.py b/src/cocoa/toga_cocoa/app.py\n--- a/src/cocoa/toga_cocoa/app.py\n+++ b/src/cocoa/toga_cocoa/app.py\n@@ -70,7 +70,7 @@\n \n @objc_method\n def selectMenuItem_(self, sender) -> None:\n- cmd = self.interface._menu_items[sender]\n+ cmd = self.interface._impl._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n", "issue": "Menu items broken on Cocoa backend: fails with object has no attribute '_menu_items'\nI believe this is a regression from 1d41d3833eb4b8785faf8eb7850f3feec4650350 / #373. The changes `appDelegate.interface` from the `App` instance to `App.interface`. Unfortunately, selecting a menu item still expects `appDelegate.interface` to be the `App` instance, see below where it calls `self.interface._menu_items`:\r\n\r\nhttps://github.com/pybee/toga/blob/ad91f1a65a109b670256028e31c887dc18a4876d/src/cocoa/toga_cocoa/app.py#L71-L75\r\n\r\n## Expected Behavior\r\nThe action successfully runs.\r\n\r\n## Current Behavior\r\nThe action doesn't run, you get an error in the terminal:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"_ctypes/callbacks.c\", line 234, in 'calling callback function'\r\n File \"/Users/pcloke/.virtualenvs/toga/lib/python3.6/site-packages/rubicon/objc/runtime.py\", line 1033, in _objc_method\r\n result = f(py_self, *args)\r\n File \"/Users/pcloke/toga/src/cocoa/toga_cocoa/app.py\", line 73, in selectMenuItem_\r\n cmd = self.interface._menu_items[sender]\r\nAttributeError: 'TogaDemo' object has no attribute '_menu_items'\r\n```\r\n\r\n## Steps to reproduce\r\n\r\n1. Run the toga demo app (`cd toga && python -m toga_demo`)\r\n2. Click on one of the \"Commands\" > \"Action 1\"\r\n\r\n## Your Environment\r\n\r\n* Python 3.6.\r\n* macOS High Sierra\r\n* Toga Target: cocoa\n", "before_files": [{"content": "import asyncio\nimport os\nimport sys\n\nimport toga\nfrom rubicon.objc.eventloop import EventLoopPolicy, CocoaLifecycle\n\nfrom .libs import *\nfrom .window import Window\n\n\nclass MainWindow(Window):\n def on_close(self):\n self.interface.app.exit()\n\n\nclass AppDelegate(NSObject):\n @objc_method\n def applicationDidFinishLaunching_(self, notification):\n self.native.activateIgnoringOtherApps(True)\n\n @objc_method\n def applicationOpenUntitledFile_(self, sender) -> bool:\n # FIXME This should be all we need; but for some reason, application types\n # aren't being registered correctly..\n # NSDocumentController.sharedDocumentController().openDocument_(None)\n\n # ...so we do this instead.\n panel = NSOpenPanel.openPanel()\n # print(\"Open documents of type\", NSDocumentController.sharedDocumentController().defaultType)\n\n fileTypes = NSMutableArray.alloc().init()\n for filetype in self.interface.document_types:\n fileTypes.addObject(filetype)\n\n NSDocumentController.sharedDocumentController.runModalOpenPanel(panel, forTypes=fileTypes)\n\n # print(\"Untitled File opened?\", panel.URLs)\n self.application_openFiles_(None, panel.URLs)\n\n return True\n\n @objc_method\n def addDocument_(self, document) -> None:\n # print(\"Add Document\", document)\n super().addDocument_(document)\n\n @objc_method\n def applicationShouldOpenUntitledFile_(self, sender) -> bool:\n return True\n\n @objc_method\n def application_openFiles_(self, app, filenames) -> None:\n for i in range(0, len(filenames)):\n filename = filenames.objectAtIndex(i)\n if isinstance(filename, str):\n fileURL = NSURL.fileURLWithPath(filename)\n\n elif filename.objc_class.name == 'NSURL':\n # This case only exists because we aren't using the\n # DocumentController to display the file open dialog.\n # If we were, *all* filenames passed in would be\n # string paths.\n fileURL = filename\n else:\n return\n\n self.interface.open_document(fileURL.absoluteString)\n # NSDocumentController.sharedDocumentController().openDocumentWithContentsOfURL_display_completionHandler_(fileURL, True, None)\n\n @objc_method\n def selectMenuItem_(self, sender) -> None:\n cmd = self.interface._menu_items[sender]\n if cmd.action:\n cmd.action(None)\n\n\nclass App:\n _MAIN_WINDOW_CLASS = MainWindow\n\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n\n asyncio.set_event_loop_policy(EventLoopPolicy())\n self.loop = asyncio.get_event_loop()\n\n def create(self):\n self.native = NSApplication.sharedApplication\n self.native.setActivationPolicy(NSApplicationActivationPolicyRegular)\n\n self.native.setApplicationIconImage_(self.interface.icon.bind(self.interface.factory).native)\n\n self.resource_path = os.path.dirname(os.path.dirname(NSBundle.mainBundle.bundlePath))\n\n appDelegate = AppDelegate.alloc().init()\n appDelegate.interface = self.interface\n appDelegate.native = self.native\n self.native.setDelegate_(appDelegate)\n\n app_name = self.interface.name\n\n self.interface.commands.add(\n toga.Command(None, 'About ' + app_name, group=toga.Group.APP),\n toga.Command(None, 'Preferences', group=toga.Group.APP),\n # Quit should always be the last item, in a section on it's own\n toga.Command(lambda s: self.exit(), 'Quit ' + app_name, shortcut='q', group=toga.Group.APP, section=sys.maxsize),\n\n toga.Command(None, 'Visit homepage', group=toga.Group.HELP)\n )\n\n # Call user code to populate the main window\n self.interface.startup()\n\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self._menu_items = {}\n self.create_menus()\n\n def open_document(self, fileURL):\n '''Add a new document to this app.'''\n print(\"STUB: If you want to handle opening documents, implement App.open_document(fileURL)\")\n\n def create_menus(self):\n # Only create the menu if the menu item index has been created.\n if hasattr(self, '_menu_items'):\n self._menu_items = {}\n menubar = NSMenu.alloc().initWithTitle('MainMenu')\n submenu = None\n for cmd in self.interface.commands:\n if cmd == toga.GROUP_BREAK:\n menubar.setSubmenu(submenu, forItem=menuItem)\n submenu = None\n elif cmd == toga.SECTION_BREAK:\n submenu.addItem_(NSMenuItem.separatorItem())\n else:\n if submenu is None:\n menuItem = menubar.addItemWithTitle(cmd.group.label, action=None, keyEquivalent='')\n submenu = NSMenu.alloc().initWithTitle(cmd.group.label)\n submenu.setAutoenablesItems(False)\n\n item = NSMenuItem.alloc().initWithTitle(\n cmd.label,\n action=SEL('selectMenuItem:'),\n keyEquivalent=cmd.shortcut if cmd.shortcut else ''\n )\n\n cmd._widgets.append(item)\n self._menu_items[item] = cmd\n\n # This line may appear redundant, but it triggers the logic\n # to force the enabled status on the underlying widgets.\n cmd.enabled = cmd.enabled\n submenu.addItem(item)\n\n if submenu:\n menubar.setSubmenu(submenu, forItem=menuItem)\n\n # Set the menu for the app.\n self.native.mainMenu = menubar\n\n def main_loop(self):\n # Stimulate the build of the app\n self.create()\n\n self.loop.run_forever(lifecycle=CocoaLifecycle(self.native))\n\n def exit(self):\n self.native.terminate(None)\n", "path": "src/cocoa/toga_cocoa/app.py"}]}
| 2,591 | 114 |
gh_patches_debug_32678
|
rasdani/github-patches
|
git_diff
|
raspiblitz__raspiblitz-1227
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Move BlitzTUI log to RAM disk
The RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.
</issue>
<code>
[start of home.admin/BlitzTUI/blitztui/version.py]
1 """ Store the version here so:
2 # 1) we don't load dependencies by storing it in __init__.py
3 # 2) we can import it in setup.py for the same reason
4 # 3) we can import it into your module module
5 """
6
7 __version_info__ = ('0', '47', '0')
8 __version__ = '.'.join(__version_info__)
9
[end of home.admin/BlitzTUI/blitztui/version.py]
[start of home.admin/BlitzTUI/blitztui/file_logger.py]
1 import json
2 import logging
3 import logging.config
4 import os
5 import sys
6
7 IS_WIN32_ENV = sys.platform == "win32"
8
9
10 def setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level="INFO"):
11 """Setup logging configuration"""
12 path = default_path
13 if os.path.exists(path):
14 with open(path, 'rt') as f:
15 config = json.load(f)
16 logging.config.dictConfig(config)
17
18 else: # if $default_path does not exist use the following default log setup
19
20 if IS_WIN32_ENV:
21 log_file = "blitz-tui.log"
22 else:
23 log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
24
25 default_config_as_dict = dict(
26 version=1,
27 disable_existing_loggers=False,
28 formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},
29 'extended': {
30 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},
31 handlers={'console': {'class': 'logging.StreamHandler',
32 'level': 'INFO',
33 'formatter': 'extended',
34 'stream': 'ext://sys.stdout'},
35 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',
36 'level': log_level,
37 'formatter': 'extended',
38 'filename': log_file,
39 'maxBytes': 10485760,
40 'backupCount': 0,
41 'encoding': 'utf8'}},
42 loggers={'infoblitz': {'level': 'DEBUG',
43 'handlers': ['console', 'file_handler'],
44 'propagate': 'no'}},
45 root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}
46 )
47
48 logging.config.dictConfig(default_config_as_dict)
49
[end of home.admin/BlitzTUI/blitztui/file_logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py
--- a/home.admin/BlitzTUI/blitztui/file_logger.py
+++ b/home.admin/BlitzTUI/blitztui/file_logger.py
@@ -1,3 +1,4 @@
+import getpass
import json
import logging
import logging.config
@@ -20,7 +21,14 @@
if IS_WIN32_ENV:
log_file = "blitz-tui.log"
else:
- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
+ if os.path.isdir('/var/cache/raspiblitz'):
+ try:
+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))
+ except FileExistsError:
+ pass
+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))
+ else:
+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))
default_config_as_dict = dict(
version=1,
@@ -36,8 +44,8 @@
'level': log_level,
'formatter': 'extended',
'filename': log_file,
- 'maxBytes': 10485760,
- 'backupCount': 0,
+ 'maxBytes': 2*1024*1024, # 2 MB
+ 'backupCount': 1,
'encoding': 'utf8'}},
loggers={'infoblitz': {'level': 'DEBUG',
'handlers': ['console', 'file_handler'],
diff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py
--- a/home.admin/BlitzTUI/blitztui/version.py
+++ b/home.admin/BlitzTUI/blitztui/version.py
@@ -4,5 +4,5 @@
# 3) we can import it into your module module
"""
-__version_info__ = ('0', '47', '0')
+__version_info__ = ('0', '48', '1')
__version__ = '.'.join(__version_info__)
|
{"golden_diff": "diff --git a/home.admin/BlitzTUI/blitztui/file_logger.py b/home.admin/BlitzTUI/blitztui/file_logger.py\n--- a/home.admin/BlitzTUI/blitztui/file_logger.py\n+++ b/home.admin/BlitzTUI/blitztui/file_logger.py\n@@ -1,3 +1,4 @@\n+import getpass\n import json\n import logging\n import logging.config\n@@ -20,7 +21,14 @@\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n- log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n+ if os.path.isdir('/var/cache/raspiblitz'):\n+ try:\n+ os.mkdir('/var/cache/raspiblitz/{}'.format(getpass.getuser()))\n+ except FileExistsError:\n+ pass\n+ log_file = os.path.abspath('/var/cache/raspiblitz/{}/blitz-tui.log'.format(getpass.getuser()))\n+ else:\n+ log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n \n default_config_as_dict = dict(\n version=1,\n@@ -36,8 +44,8 @@\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n- 'maxBytes': 10485760,\n- 'backupCount': 0,\n+ 'maxBytes': 2*1024*1024, # 2 MB\n+ 'backupCount': 1,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\ndiff --git a/home.admin/BlitzTUI/blitztui/version.py b/home.admin/BlitzTUI/blitztui/version.py\n--- a/home.admin/BlitzTUI/blitztui/version.py\n+++ b/home.admin/BlitzTUI/blitztui/version.py\n@@ -4,5 +4,5 @@\n # 3) we can import it into your module module\n \"\"\"\n \n-__version_info__ = ('0', '47', '0')\n+__version_info__ = ('0', '48', '1')\n __version__ = '.'.join(__version_info__)\n", "issue": "Move BlitzTUI log to RAM disk\nThe RAM disk (`/var/cache/raspiblitz/`) is a nice way to avoid reads and writes to either the SD card or the external disk for non-persistent data. Several things can be moved; the BlitzTUI log should be a prime example.\n", "before_files": [{"content": "\"\"\" Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module module\n\"\"\"\n\n__version_info__ = ('0', '47', '0')\n__version__ = '.'.join(__version_info__)\n", "path": "home.admin/BlitzTUI/blitztui/version.py"}, {"content": "import json\nimport logging\nimport logging.config\nimport os\nimport sys\n\nIS_WIN32_ENV = sys.platform == \"win32\"\n\n\ndef setup_logging(default_path=os.path.abspath(os.path.expanduser('~/.blitz-tui.json')), log_level=\"INFO\"):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n else: # if $default_path does not exist use the following default log setup\n\n if IS_WIN32_ENV:\n log_file = \"blitz-tui.log\"\n else:\n log_file = os.path.abspath(os.path.expanduser('~/blitz-tui.log'))\n\n default_config_as_dict = dict(\n version=1,\n disable_existing_loggers=False,\n formatters={'simple': {'format': '%(asctime)s - %(levelname)s - %(message)s'},\n 'extended': {\n 'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'}},\n handlers={'console': {'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'extended',\n 'stream': 'ext://sys.stdout'},\n 'file_handler': {'class': 'logging.handlers.RotatingFileHandler',\n 'level': log_level,\n 'formatter': 'extended',\n 'filename': log_file,\n 'maxBytes': 10485760,\n 'backupCount': 0,\n 'encoding': 'utf8'}},\n loggers={'infoblitz': {'level': 'DEBUG',\n 'handlers': ['console', 'file_handler'],\n 'propagate': 'no'}},\n root={'level': 'DEBUG', 'handlers': ['console', 'file_handler']}\n )\n\n logging.config.dictConfig(default_config_as_dict)\n", "path": "home.admin/BlitzTUI/blitztui/file_logger.py"}]}
| 1,245 | 523 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.