problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_37791
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__pytorch-lightning-1378
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Training loop temporarily hangs after every 4 steps
I am porting some of my code to pytorch lightning, and everything seems to work fine. However, for some reason after every 4 training steps I see some temporary hanging (~1 second), which is severely slowing down my overall training time. Am I missing some obvious configuration? This is my Trainer configuration:
```
trainer = pl.Trainer(
gpus=8
num_nodes=1,
distributed_backend='ddp',
checkpoint_callback=False,
max_epochs=50,
max_steps=None,
progress_bar_refresh_rate=1,
check_val_every_n_epoch=1,
val_check_interval=1.0,
gradient_clip_val=0.0,
log_save_interval=0,
num_sanity_val_steps=0,
amp_level='O0',
)
```
</issue>
<code>
[start of pytorch_lightning/trainer/data_loading.py]
1 from abc import ABC, abstractmethod
2 from typing import Union, List, Tuple, Callable
3
4 import torch.distributed as torch_distrib
5 from torch.utils.data import SequentialSampler, DataLoader
6 from torch.utils.data.distributed import DistributedSampler
7
8 from pytorch_lightning.core import LightningModule
9 from pytorch_lightning.utilities.exceptions import MisconfigurationException
10
11 try:
12 from apex import amp
13 except ImportError:
14 APEX_AVAILABLE = False
15 else:
16 APEX_AVAILABLE = True
17
18 try:
19 import torch_xla
20 import torch_xla.core.xla_model as xm
21 import torch_xla.distributed.xla_multiprocessing as xmp
22 except ImportError:
23 XLA_AVAILABLE = False
24 else:
25 XLA_AVAILABLE = True
26
27
28 def _has_len(dataloader: DataLoader) -> bool:
29 """ Checks if a given Dataloader has __len__ method implemented i.e. if
30 it is a finite dataloader or infinite dataloader """
31 try:
32 # try getting the length
33 if len(dataloader) == 0:
34 raise ValueError('Dataloader returned 0 length. Please make sure'
35 ' that your Dataloader atleast returns 1 batch')
36 return True
37 except TypeError:
38 return False
39
40
41 class TrainerDataLoadingMixin(ABC):
42
43 # this is just a summary on variables used in this abstract class,
44 # the proper values/initialisation should be done in child class
45 proc_rank: int
46 use_ddp: bool
47 use_ddp2: bool
48 shown_warnings: ...
49 val_check_interval: float
50 use_tpu: bool
51 tpu_local_core_rank: int
52 train_dataloader: DataLoader
53 num_training_batches: Union[int, float]
54 val_check_batch: ...
55 val_dataloaders: List[DataLoader]
56 num_val_batches: Union[int, float]
57 test_dataloaders: List[DataLoader]
58 num_test_batches: Union[int, float]
59 train_percent_check: float
60 val_percent_check: float
61 test_percent_check: float
62
63 @abstractmethod
64 def is_overriden(self, *args):
65 """Warning: this is just empty shell for code implemented in other class."""
66
67 def _percent_range_check(self, name: str) -> None:
68 value = getattr(self, name)
69 msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'
70 if name == 'val_check_interval':
71 msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'
72
73 if not 0. <= value <= 1.:
74 raise ValueError(msg)
75
76 def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:
77
78 # don't do anything if it's not a dataloader
79 if not isinstance(dataloader, DataLoader):
80 return dataloader
81
82 need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu
83 no_sampler_added = dataloader.sampler is None
84
85 if need_dist_sampler and no_sampler_added:
86
87 skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']
88
89 dl_args = {
90 k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys
91 }
92
93 if self.use_tpu:
94 sampler = DistributedSampler(
95 dataloader.dataset,
96 num_replicas=xm.xrt_world_size(),
97 rank=xm.get_ordinal()
98 )
99 else:
100 sampler = DistributedSampler(dataloader.dataset)
101
102 dl_args['sampler'] = sampler
103 dataloader = type(dataloader)(**dl_args)
104
105 return dataloader
106
107 def reset_train_dataloader(self, model: LightningModule) -> None:
108 """Resets the train dataloader and initialises required variables
109 (number of batches, when to validate, etc.).
110
111 Args:
112 model: The current `LightningModule`
113 """
114 self.train_dataloader = self.request_dataloader(model.train_dataloader)
115 self.num_training_batches = 0
116
117 # automatically add samplers
118 self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)
119
120 self._percent_range_check('train_percent_check')
121
122 if not _has_len(self.train_dataloader):
123 self.num_training_batches = float('inf')
124 else:
125 # try getting the length
126 self.num_training_batches = len(self.train_dataloader)
127 self.num_training_batches = int(self.num_training_batches * self.train_percent_check)
128
129 # determine when to check validation
130 # if int passed in, val checks that often
131 # otherwise, it checks in [0, 1.0] % range of a training epoch
132 if isinstance(self.val_check_interval, int):
133 self.val_check_batch = self.val_check_interval
134 if self.val_check_batch > self.num_training_batches:
135 raise ValueError(
136 f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '
137 f'to the number of the training batches ({self.num_training_batches}). '
138 'If you want to disable validation set `val_percent_check` to 0.0 instead.')
139 else:
140 if not _has_len(self.train_dataloader):
141 if self.val_check_interval == 1.0:
142 self.val_check_batch = float('inf')
143 else:
144 raise MisconfigurationException(
145 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
146 'DataLoader does not implement `__len__`) for `train_dataloader`, '
147 '`Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies '
148 'checking validation every k training batches.')
149 else:
150 self._percent_range_check('val_check_interval')
151
152 self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
153 self.val_check_batch = max(1, self.val_check_batch)
154
155 def _reset_eval_dataloader(self, model: LightningModule,
156 mode: str) -> Tuple[int, List[DataLoader]]:
157 """Generic method to reset a dataloader for evaluation.
158
159 Args:
160 model: The current `LightningModule`
161 mode: Either `'val'` or `'test'`
162
163 Returns:
164 Tuple (num_batches, dataloaders)
165 """
166 dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))
167
168 if not isinstance(dataloaders, list):
169 dataloaders = [dataloaders]
170
171 # add samplers
172 dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]
173
174 num_batches = 0
175
176 # determine number of batches
177 # datasets could be none, 1 or 2+
178 if len(dataloaders) != 0:
179 for dataloader in dataloaders:
180 if not _has_len(dataloader):
181 num_batches = float('inf')
182 break
183
184 percent_check = getattr(self, f'{mode}_percent_check')
185
186 if num_batches != float('inf'):
187 self._percent_range_check(f'{mode}_percent_check')
188
189 num_batches = sum(len(dataloader) for dataloader in dataloaders)
190 num_batches = int(num_batches * percent_check)
191 elif percent_check not in (0.0, 1.0):
192 raise MisconfigurationException(
193 'When using an infinite DataLoader (e.g. with an IterableDataset or when '
194 f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '
195 f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')
196 return num_batches, dataloaders
197
198 def reset_val_dataloader(self, model: LightningModule) -> None:
199 """Resets the validation dataloader and determines the number of batches.
200
201 Args:
202 model: The current `LightningModule`
203 """
204 if self.is_overriden('validation_step'):
205 self.num_val_batches, self.val_dataloaders =\
206 self._reset_eval_dataloader(model, 'val')
207
208 def reset_test_dataloader(self, model) -> None:
209 """Resets the validation dataloader and determines the number of batches.
210
211 Args:
212 model: The current `LightningModule`
213 """
214 if self.is_overriden('test_step'):
215 self.num_test_batches, self.test_dataloaders =\
216 self._reset_eval_dataloader(model, 'test')
217
218 def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:
219 """Handles downloading data in the GPU or TPU case.
220
221 Args:
222 dataloader_fx: The bound dataloader getter
223
224 Returns:
225 The dataloader
226 """
227 dataloader = dataloader_fx()
228
229 # get the function we'll use to get data
230 if self.use_ddp or self.use_ddp2:
231 # all processes wait until data download has happened
232 torch_distrib.barrier()
233
234 # data download/load on TPU
235 elif self.use_tpu and XLA_AVAILABLE:
236 # all processes wait until data download has happened
237 torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')
238
239 return dataloader
240
241 def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,
242 test_percent_check: float, overfit_pct: float) -> None:
243 """Use less data for debugging purposes
244 """
245 self.train_percent_check = train_percent_check
246 self.val_percent_check = val_percent_check
247 self.test_percent_check = test_percent_check
248 if overfit_pct > 0:
249 if overfit_pct > 1:
250 raise ValueError(
251 f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')
252
253 self.train_percent_check = overfit_pct
254 self.val_percent_check = overfit_pct
255 self.test_percent_check = overfit_pct
256
[end of pytorch_lightning/trainer/data_loading.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py
--- a/pytorch_lightning/trainer/data_loading.py
+++ b/pytorch_lightning/trainer/data_loading.py
@@ -1,8 +1,9 @@
+import warnings
from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Callable
import torch.distributed as torch_distrib
-from torch.utils.data import SequentialSampler, DataLoader
+from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.core import LightningModule
@@ -73,6 +74,12 @@
if not 0. <= value <= 1.:
raise ValueError(msg)
+ def _worker_check(self, dataloader: DataLoader, name: str) -> None:
+ if isinstance(dataloader, DataLoader) and dataloader.num_workers <= 2:
+ warnings.warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'
+ ' Consider increasing the value of the `num_workers` argument`'
+ ' in the `DataLoader` init to improve performance.')
+
def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:
# don't do anything if it's not a dataloader
@@ -112,11 +119,13 @@
model: The current `LightningModule`
"""
self.train_dataloader = self.request_dataloader(model.train_dataloader)
+
self.num_training_batches = 0
# automatically add samplers
self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)
+ self._worker_check(self.train_dataloader, 'train dataloader')
self._percent_range_check('train_percent_check')
if not _has_len(self.train_dataloader):
@@ -176,10 +185,10 @@
# determine number of batches
# datasets could be none, 1 or 2+
if len(dataloaders) != 0:
- for dataloader in dataloaders:
+ for i, dataloader in enumerate(dataloaders):
+ self._worker_check(dataloader, f'{mode} dataloader {i}')
if not _has_len(dataloader):
num_batches = float('inf')
- break
percent_check = getattr(self, f'{mode}_percent_check')
|
{"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading.py b/pytorch_lightning/trainer/data_loading.py\n--- a/pytorch_lightning/trainer/data_loading.py\n+++ b/pytorch_lightning/trainer/data_loading.py\n@@ -1,8 +1,9 @@\n+import warnings\n from abc import ABC, abstractmethod\n from typing import Union, List, Tuple, Callable\n \n import torch.distributed as torch_distrib\n-from torch.utils.data import SequentialSampler, DataLoader\n+from torch.utils.data import DataLoader\n from torch.utils.data.distributed import DistributedSampler\n \n from pytorch_lightning.core import LightningModule\n@@ -73,6 +74,12 @@\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n \n+ def _worker_check(self, dataloader: DataLoader, name: str) -> None:\n+ if isinstance(dataloader, DataLoader) and dataloader.num_workers <= 2:\n+ warnings.warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'\n+ ' Consider increasing the value of the `num_workers` argument`'\n+ ' in the `DataLoader` init to improve performance.')\n+\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n \n # don't do anything if it's not a dataloader\n@@ -112,11 +119,13 @@\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n+\n self.num_training_batches = 0\n \n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n \n+ self._worker_check(self.train_dataloader, 'train dataloader')\n self._percent_range_check('train_percent_check')\n \n if not _has_len(self.train_dataloader):\n@@ -176,10 +185,10 @@\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n- for dataloader in dataloaders:\n+ for i, dataloader in enumerate(dataloaders):\n+ self._worker_check(dataloader, f'{mode} dataloader {i}')\n if not _has_len(dataloader):\n num_batches = float('inf')\n- break\n \n percent_check = getattr(self, f'{mode}_percent_check')\n", "issue": "Training loop temporarily hangs after every 4 steps\nI am porting some of my code to pytorch lightning, and everything seems to work fine. However, for some reason after every 4 training steps I see some temporary hanging (~1 second), which is severely slowing down my overall training time. Am I missing some obvious configuration? This is my Trainer configuration:\r\n\r\n```\r\n trainer = pl.Trainer(\r\n gpus=8\r\n num_nodes=1,\r\n distributed_backend='ddp',\r\n checkpoint_callback=False,\r\n max_epochs=50,\r\n max_steps=None,\r\n progress_bar_refresh_rate=1,\r\n check_val_every_n_epoch=1,\r\n val_check_interval=1.0,\r\n gradient_clip_val=0.0,\r\n log_save_interval=0,\r\n num_sanity_val_steps=0,\r\n amp_level='O0',\r\n )\r\n```\r\n\r\n\n", "before_files": [{"content": "from abc import ABC, abstractmethod\nfrom typing import Union, List, Tuple, Callable\n\nimport torch.distributed as torch_distrib\nfrom torch.utils.data import SequentialSampler, DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.core import LightningModule\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\n\ndef _has_len(dataloader: DataLoader) -> bool:\n \"\"\" Checks if a given Dataloader has __len__ method implemented i.e. if\n it is a finite dataloader or infinite dataloader \"\"\"\n try:\n # try getting the length\n if len(dataloader) == 0:\n raise ValueError('Dataloader returned 0 length. Please make sure'\n ' that your Dataloader atleast returns 1 batch')\n return True\n except TypeError:\n return False\n\n\nclass TrainerDataLoadingMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n proc_rank: int\n use_ddp: bool\n use_ddp2: bool\n shown_warnings: ...\n val_check_interval: float\n use_tpu: bool\n tpu_local_core_rank: int\n train_dataloader: DataLoader\n num_training_batches: Union[int, float]\n val_check_batch: ...\n val_dataloaders: List[DataLoader]\n num_val_batches: Union[int, float]\n test_dataloaders: List[DataLoader]\n num_test_batches: Union[int, float]\n train_percent_check: float\n val_percent_check: float\n test_percent_check: float\n\n @abstractmethod\n def is_overriden(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def _percent_range_check(self, name: str) -> None:\n value = getattr(self, name)\n msg = f'`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}.'\n if name == 'val_check_interval':\n msg += ' If you want to disable validation set `val_percent_check` to 0.0 instead.'\n\n if not 0. <= value <= 1.:\n raise ValueError(msg)\n\n def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:\n\n # don't do anything if it's not a dataloader\n if not isinstance(dataloader, DataLoader):\n return dataloader\n\n need_dist_sampler = self.use_ddp or self.use_ddp2 or self.use_tpu\n no_sampler_added = dataloader.sampler is None\n\n if need_dist_sampler and no_sampler_added:\n\n skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']\n\n dl_args = {\n k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys\n }\n\n if self.use_tpu:\n sampler = DistributedSampler(\n dataloader.dataset,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal()\n )\n else:\n sampler = DistributedSampler(dataloader.dataset)\n\n dl_args['sampler'] = sampler\n dataloader = type(dataloader)(**dl_args)\n\n return dataloader\n\n def reset_train_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the train dataloader and initialises required variables\n (number of batches, when to validate, etc.).\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n self.train_dataloader = self.request_dataloader(model.train_dataloader)\n self.num_training_batches = 0\n\n # automatically add samplers\n self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)\n\n self._percent_range_check('train_percent_check')\n\n if not _has_len(self.train_dataloader):\n self.num_training_batches = float('inf')\n else:\n # try getting the length\n self.num_training_batches = len(self.train_dataloader)\n self.num_training_batches = int(self.num_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n if self.val_check_batch > self.num_training_batches:\n raise ValueError(\n f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '\n f'to the number of the training batches ({self.num_training_batches}). '\n 'If you want to disable validation set `val_percent_check` to 0.0 instead.')\n else:\n if not _has_len(self.train_dataloader):\n if self.val_check_interval == 1.0:\n self.val_check_batch = float('inf')\n else:\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n 'DataLoader does not implement `__len__`) for `train_dataloader`, '\n '`Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies '\n 'checking validation every k training batches.')\n else:\n self._percent_range_check('val_check_interval')\n\n self.val_check_batch = int(self.num_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n def _reset_eval_dataloader(self, model: LightningModule,\n mode: str) -> Tuple[int, List[DataLoader]]:\n \"\"\"Generic method to reset a dataloader for evaluation.\n\n Args:\n model: The current `LightningModule`\n mode: Either `'val'` or `'test'`\n\n Returns:\n Tuple (num_batches, dataloaders)\n \"\"\"\n dataloaders = self.request_dataloader(getattr(model, f'{mode}_dataloader'))\n\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n # add samplers\n dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl]\n\n num_batches = 0\n\n # determine number of batches\n # datasets could be none, 1 or 2+\n if len(dataloaders) != 0:\n for dataloader in dataloaders:\n if not _has_len(dataloader):\n num_batches = float('inf')\n break\n\n percent_check = getattr(self, f'{mode}_percent_check')\n\n if num_batches != float('inf'):\n self._percent_range_check(f'{mode}_percent_check')\n\n num_batches = sum(len(dataloader) for dataloader in dataloaders)\n num_batches = int(num_batches * percent_check)\n elif percent_check not in (0.0, 1.0):\n raise MisconfigurationException(\n 'When using an infinite DataLoader (e.g. with an IterableDataset or when '\n f'DataLoader does not implement `__len__`) for `{mode}_dataloader`, '\n f'`Trainer({mode}_percent_check)` must be `0.0` or `1.0`.')\n return num_batches, dataloaders\n\n def reset_val_dataloader(self, model: LightningModule) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('validation_step'):\n self.num_val_batches, self.val_dataloaders =\\\n self._reset_eval_dataloader(model, 'val')\n\n def reset_test_dataloader(self, model) -> None:\n \"\"\"Resets the validation dataloader and determines the number of batches.\n\n Args:\n model: The current `LightningModule`\n \"\"\"\n if self.is_overriden('test_step'):\n self.num_test_batches, self.test_dataloaders =\\\n self._reset_eval_dataloader(model, 'test')\n\n def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:\n \"\"\"Handles downloading data in the GPU or TPU case.\n\n Args:\n dataloader_fx: The bound dataloader getter\n\n Returns:\n The dataloader\n \"\"\"\n dataloader = dataloader_fx()\n\n # get the function we'll use to get data\n if self.use_ddp or self.use_ddp2:\n # all processes wait until data download has happened\n torch_distrib.barrier()\n\n # data download/load on TPU\n elif self.use_tpu and XLA_AVAILABLE:\n # all processes wait until data download has happened\n torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')\n\n return dataloader\n\n def determine_data_use_amount(self, train_percent_check: float, val_percent_check: float,\n test_percent_check: float, overfit_pct: float) -> None:\n \"\"\"Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n if overfit_pct > 1:\n raise ValueError(\n f'`overfit_pct` must be not greater than 1.0, but got {overfit_pct:.3f}.')\n\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading.py"}]}
| 3,590 | 539 |
gh_patches_debug_26209
|
rasdani/github-patches
|
git_diff
|
Cog-Creators__Red-DiscordBot-3166
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[p]announce fails if bot belongs to team
# Command bugs
#### Command name
`announce`
#### What cog is this command from?
`Admin`
#### What were you expecting to happen?
Send announcement to all enabled servers, if failed, send message to the one of owners or all owners (like an `[p]contact`)
#### What actually happened?
announcement failed almost immediately with error in console
#### How can we reproduce this issue?
1. Set bot with token belonging to team
2. Create environment, where bot cant send announcement to server
3. Announce an message
4. `[p]announce` silently fails with error:
```py
Traceback (most recent call last):
File "/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py", line 67, in announcer
await channel.send(self.message)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py", line 823, in send
data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py", line 218, in request
raise Forbidden(r, data)
discord.errors.Forbidden: 403 FORBIDDEN (error code: 50001): Missing Access
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py", line 70, in announcer
_("I could not announce to server: {server.id}").format(server=g)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py", line 823, in send
data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py", line 218, in request
raise Forbidden(r, data)
discord.errors.Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user
```
Caused by https://github.com/Cog-Creators/Red-DiscordBot/blob/f0836d7182d99239d1fde24cf2231c6ebf206f72/redbot/cogs/admin/announcer.py#L56
*Kinda related to #2781, i guess*
</issue>
<code>
[start of redbot/cogs/admin/announcer.py]
1 import asyncio
2
3 import discord
4 from redbot.core import commands
5 from redbot.core.i18n import Translator
6
7 _ = Translator("Announcer", __file__)
8
9
10 class Announcer:
11 def __init__(self, ctx: commands.Context, message: str, config=None):
12 """
13 :param ctx:
14 :param message:
15 :param config: Used to determine channel overrides
16 """
17 self.ctx = ctx
18 self.message = message
19 self.config = config
20
21 self.active = None
22
23 def start(self):
24 """
25 Starts an announcement.
26 :return:
27 """
28 if self.active is None:
29 self.active = True
30 self.ctx.bot.loop.create_task(self.announcer())
31
32 def cancel(self):
33 """
34 Cancels a running announcement.
35 :return:
36 """
37 self.active = False
38
39 async def _get_announce_channel(self, guild: discord.Guild) -> discord.TextChannel:
40 channel_id = await self.config.guild(guild).announce_channel()
41 channel = None
42
43 if channel_id is not None:
44 channel = guild.get_channel(channel_id)
45
46 if channel is None:
47 channel = guild.system_channel
48
49 if channel is None:
50 channel = guild.text_channels[0]
51
52 return channel
53
54 async def announcer(self):
55 guild_list = self.ctx.bot.guilds
56 bot_owner = (await self.ctx.bot.application_info()).owner
57 for g in guild_list:
58 if not self.active:
59 return
60
61 if await self.config.guild(g).announce_ignore():
62 continue
63
64 channel = await self._get_announce_channel(g)
65
66 try:
67 await channel.send(self.message)
68 except discord.Forbidden:
69 await bot_owner.send(
70 _("I could not announce to server: {server.id}").format(server=g)
71 )
72 await asyncio.sleep(0.5)
73
74 self.active = False
75
[end of redbot/cogs/admin/announcer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/redbot/cogs/admin/announcer.py b/redbot/cogs/admin/announcer.py
--- a/redbot/cogs/admin/announcer.py
+++ b/redbot/cogs/admin/announcer.py
@@ -3,6 +3,7 @@
import discord
from redbot.core import commands
from redbot.core.i18n import Translator
+from redbot.core.utils.chat_formatting import humanize_list, inline
_ = Translator("Announcer", __file__)
@@ -53,7 +54,7 @@
async def announcer(self):
guild_list = self.ctx.bot.guilds
- bot_owner = (await self.ctx.bot.application_info()).owner
+ failed = []
for g in guild_list:
if not self.active:
return
@@ -66,9 +67,14 @@
try:
await channel.send(self.message)
except discord.Forbidden:
- await bot_owner.send(
- _("I could not announce to server: {server.id}").format(server=g)
- )
+ failed.append(str(g.id))
await asyncio.sleep(0.5)
+ msg = (
+ _("I could not announce to the following server: ")
+ if len(failed) == 1
+ else _("I could not announce to the following servers: ")
+ )
+ msg += humanize_list(tuple(map(inline, failed)))
+ await self.ctx.bot.send_to_owners(msg)
self.active = False
|
{"golden_diff": "diff --git a/redbot/cogs/admin/announcer.py b/redbot/cogs/admin/announcer.py\n--- a/redbot/cogs/admin/announcer.py\n+++ b/redbot/cogs/admin/announcer.py\n@@ -3,6 +3,7 @@\n import discord\n from redbot.core import commands\n from redbot.core.i18n import Translator\n+from redbot.core.utils.chat_formatting import humanize_list, inline\n \n _ = Translator(\"Announcer\", __file__)\n \n@@ -53,7 +54,7 @@\n \n async def announcer(self):\n guild_list = self.ctx.bot.guilds\n- bot_owner = (await self.ctx.bot.application_info()).owner\n+ failed = []\n for g in guild_list:\n if not self.active:\n return\n@@ -66,9 +67,14 @@\n try:\n await channel.send(self.message)\n except discord.Forbidden:\n- await bot_owner.send(\n- _(\"I could not announce to server: {server.id}\").format(server=g)\n- )\n+ failed.append(str(g.id))\n await asyncio.sleep(0.5)\n \n+ msg = (\n+ _(\"I could not announce to the following server: \")\n+ if len(failed) == 1\n+ else _(\"I could not announce to the following servers: \")\n+ )\n+ msg += humanize_list(tuple(map(inline, failed)))\n+ await self.ctx.bot.send_to_owners(msg)\n self.active = False\n", "issue": "[p]announce fails if bot belongs to team\n# Command bugs\r\n\r\n#### Command name\r\n\r\n`announce`\r\n\r\n#### What cog is this command from?\r\n\r\n`Admin`\r\n\r\n#### What were you expecting to happen?\r\n\r\nSend announcement to all enabled servers, if failed, send message to the one of owners or all owners (like an `[p]contact`)\r\n\r\n#### What actually happened?\r\n\r\nannouncement failed almost immediately with error in console \r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Set bot with token belonging to team\r\n2. Create environment, where bot cant send announcement to server\r\n3. Announce an message\r\n4. `[p]announce` silently fails with error:\r\n```py\r\nTraceback (most recent call last):\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py\", line 67, in announcer\r\n await channel.send(self.message)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py\", line 823, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py\", line 218, in request\r\n raise Forbidden(r, data)\r\ndiscord.errors.Forbidden: 403 FORBIDDEN (error code: 50001): Missing Access\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py\", line 70, in announcer\r\n _(\"I could not announce to server: {server.id}\").format(server=g)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py\", line 823, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py\", line 218, in request\r\n raise Forbidden(r, data)\r\ndiscord.errors.Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user\r\n```\r\n\r\nCaused by https://github.com/Cog-Creators/Red-DiscordBot/blob/f0836d7182d99239d1fde24cf2231c6ebf206f72/redbot/cogs/admin/announcer.py#L56\r\n\r\n*Kinda related to #2781, i guess*\n", "before_files": [{"content": "import asyncio\n\nimport discord\nfrom redbot.core import commands\nfrom redbot.core.i18n import Translator\n\n_ = Translator(\"Announcer\", __file__)\n\n\nclass Announcer:\n def __init__(self, ctx: commands.Context, message: str, config=None):\n \"\"\"\n :param ctx:\n :param message:\n :param config: Used to determine channel overrides\n \"\"\"\n self.ctx = ctx\n self.message = message\n self.config = config\n\n self.active = None\n\n def start(self):\n \"\"\"\n Starts an announcement.\n :return:\n \"\"\"\n if self.active is None:\n self.active = True\n self.ctx.bot.loop.create_task(self.announcer())\n\n def cancel(self):\n \"\"\"\n Cancels a running announcement.\n :return:\n \"\"\"\n self.active = False\n\n async def _get_announce_channel(self, guild: discord.Guild) -> discord.TextChannel:\n channel_id = await self.config.guild(guild).announce_channel()\n channel = None\n\n if channel_id is not None:\n channel = guild.get_channel(channel_id)\n\n if channel is None:\n channel = guild.system_channel\n\n if channel is None:\n channel = guild.text_channels[0]\n\n return channel\n\n async def announcer(self):\n guild_list = self.ctx.bot.guilds\n bot_owner = (await self.ctx.bot.application_info()).owner\n for g in guild_list:\n if not self.active:\n return\n\n if await self.config.guild(g).announce_ignore():\n continue\n\n channel = await self._get_announce_channel(g)\n\n try:\n await channel.send(self.message)\n except discord.Forbidden:\n await bot_owner.send(\n _(\"I could not announce to server: {server.id}\").format(server=g)\n )\n await asyncio.sleep(0.5)\n\n self.active = False\n", "path": "redbot/cogs/admin/announcer.py"}]}
| 1,702 | 329 |
gh_patches_debug_41
|
rasdani/github-patches
|
git_diff
|
streamlit__streamlit-3038
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dark theme does not properly adjust markdown tables
### Summary
When I load the latest streamlit in darkmode I cannot see anything in my markdown tables because the text color is changed but not the background color.
### Steps to reproduce
Code snippet:
```
md = """
| Label | Info |
| -------- | --------- |
| Row | Data |
"""
st.markdown(md)
```
**Expected behavior:**
I would expect if the text color get changed to white in the table, the background color should get changed to something dark
**Actual behavior:**
Both the text color and background are white so nothing can be seen.
### Is this a regression?
no, consequence of new theme
### Debug info
- Streamlit version: 0.79.0
- Python version: 3.7.9
- pip
- OS version: MacOS Catalina 10.15.7
- Browser version: Chrome 89.0.4389.90
### Additional information
I'm not sure why markdown tables have different background style but they seem to; perhaps other ui elements would be affected as well.
</issue>
<code>
[start of e2e/scripts/st_markdown.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.markdown("This **markdown** is awesome! :sunglasses:")
18
19 st.markdown("This <b>HTML tag</b> is escaped!")
20
21 st.markdown("This <b>HTML tag</b> is not escaped!", unsafe_allow_html=True)
22
23 st.markdown("[text]")
24
25 st.markdown("[link](href)")
26
27 st.markdown("[][]")
28
29 st.markdown("Inline math with $\KaTeX$")
30
31 st.markdown(
32 """
33 $$
34 ax^2 + bx + c = 0
35 $$
36 """
37 )
38
[end of e2e/scripts/st_markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/e2e/scripts/st_markdown.py b/e2e/scripts/st_markdown.py
--- a/e2e/scripts/st_markdown.py
+++ b/e2e/scripts/st_markdown.py
@@ -35,3 +35,11 @@
$$
"""
)
+
+st.markdown(
+ """
+| Col1 | Col2 |
+| --------- | ----------- |
+| Some | Data |
+"""
+)
|
{"golden_diff": "diff --git a/e2e/scripts/st_markdown.py b/e2e/scripts/st_markdown.py\n--- a/e2e/scripts/st_markdown.py\n+++ b/e2e/scripts/st_markdown.py\n@@ -35,3 +35,11 @@\n $$\n \"\"\"\n )\n+\n+st.markdown(\n+ \"\"\"\n+| Col1 | Col2 |\n+| --------- | ----------- |\n+| Some | Data |\n+\"\"\"\n+)\n", "issue": "Dark theme does not properly adjust markdown tables\n### Summary\r\n\r\nWhen I load the latest streamlit in darkmode I cannot see anything in my markdown tables because the text color is changed but not the background color.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nmd = \"\"\"\r\n| Label | Info |\r\n| -------- | --------- |\r\n| Row | Data |\r\n\"\"\"\r\nst.markdown(md)\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nI would expect if the text color get changed to white in the table, the background color should get changed to something dark\r\n\r\n**Actual behavior:**\r\n\r\nBoth the text color and background are white so nothing can be seen.\r\n\r\n### Is this a regression?\r\n\r\nno, consequence of new theme\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.79.0\r\n- Python version: 3.7.9\r\n- pip\r\n- OS version: MacOS Catalina 10.15.7\r\n- Browser version: Chrome 89.0.4389.90\r\n\r\n### Additional information\r\n\r\nI'm not sure why markdown tables have different background style but they seem to; perhaps other ui elements would be affected as well.\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.markdown(\"This **markdown** is awesome! :sunglasses:\")\n\nst.markdown(\"This <b>HTML tag</b> is escaped!\")\n\nst.markdown(\"This <b>HTML tag</b> is not escaped!\", unsafe_allow_html=True)\n\nst.markdown(\"[text]\")\n\nst.markdown(\"[link](href)\")\n\nst.markdown(\"[][]\")\n\nst.markdown(\"Inline math with $\\KaTeX$\")\n\nst.markdown(\n \"\"\"\n$$\nax^2 + bx + c = 0\n$$\n\"\"\"\n)\n", "path": "e2e/scripts/st_markdown.py"}]}
| 1,110 | 98 |
gh_patches_debug_24888
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-8651
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bfmtv: Unable to extract video block
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting that yt-dlp is broken on a **supported** site
- [X] I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
France
### Provide a description that is worded well enough to be understood
yt-dlp is unable to extract replay video from bfmtv websites, from domain www.bfmtv.com. It was still working a few days ago.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
yt-dlp -vU https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html
[debug] Command-line config: ['-vU', 'https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html']
[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version [email protected] [b634ba742] (zip)
[debug] Python 3.11.2 (CPython x86_64 64bit) - Linux-6.1.0-13-amd64-x86_64-with-glibc2.36 (OpenSSL 3.0.11 19 Sep 2023, glibc 2.36)
[debug] exe versions: ffmpeg 5.1.3-1 (setts), ffprobe 5.1.3-1, phantomjs 2.1.1, rtmpdump 2.4
[debug] Optional libraries: Cryptodome-3.11.0, brotli-1.0.9, certifi-2022.09.24, mutagen-1.46.0, pyxattr-0.8.1, sqlite3-3.40.1, websockets-10.4
[debug] Proxy map: {}
[debug] Loaded 1890 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Available version: [email protected], Current version: [email protected]
Current Build Hash: be5cfb6be8930e1a5f427533ec32f2a481276b3da7b249d0150ce2b740ccf1ce
yt-dlp is up to date ([email protected])
[bfmtv] Extracting URL: https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html
[bfmtv] 202310230671: Downloading webpage
ERROR: [bfmtv] 202310230671: Unable to extract video block; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "/usr/local/bin/yt-dlp/yt_dlp/extractor/common.py", line 715, in extract
ie_result = self._real_extract(url)
^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/bin/yt-dlp/yt_dlp/extractor/bfmtv.py", line 43, in _real_extract
video_block = extract_attributes(self._search_regex(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/bin/yt-dlp/yt_dlp/extractor/common.py", line 1263, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
```
</issue>
<code>
[start of yt_dlp/extractor/bfmtv.py]
1 import re
2
3 from .common import InfoExtractor
4 from ..utils import extract_attributes
5
6
7 class BFMTVBaseIE(InfoExtractor):
8 _VALID_URL_BASE = r'https?://(?:www\.|rmc\.)?bfmtv\.com/'
9 _VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
10 _VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
11 BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
12
13 def _brightcove_url_result(self, video_id, video_block):
14 account_id = video_block.get('accountid') or '876450612001'
15 player_id = video_block.get('playerid') or 'I2qBTln4u'
16 return self.url_result(
17 self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
18 'BrightcoveNew', video_id)
19
20
21 class BFMTVIE(BFMTVBaseIE):
22 IE_NAME = 'bfmtv'
23 _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V'
24 _TESTS = [{
25 'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html',
26 'info_dict': {
27 'id': '6196747868001',
28 'ext': 'mp4',
29 'title': 'Emmanuel Macron: "L\'Islam est une religion qui vit une crise aujourd’hui, partout dans le monde"',
30 'description': 'Le Président s\'exprime sur la question du séparatisme depuis les Mureaux, dans les Yvelines.',
31 'uploader_id': '876450610001',
32 'upload_date': '20201002',
33 'timestamp': 1601629620,
34 'duration': 44.757,
35 'tags': ['bfmactu', 'politique'],
36 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876450610001/5041f4c1-bc48-4af8-a256-1b8300ad8ef0/cf2f9114-e8e2-4494-82b4-ab794ea4bc7d/1920x1080/match/image.jpg',
37 },
38 }]
39
40 def _real_extract(self, url):
41 bfmtv_id = self._match_id(url)
42 webpage = self._download_webpage(url, bfmtv_id)
43 video_block = extract_attributes(self._search_regex(
44 self._VIDEO_BLOCK_REGEX, webpage, 'video block'))
45 return self._brightcove_url_result(video_block['videoid'], video_block)
46
47
48 class BFMTVLiveIE(BFMTVIE): # XXX: Do not subclass from concrete IE
49 IE_NAME = 'bfmtv:live'
50 _VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)'
51 _TESTS = [{
52 'url': 'https://www.bfmtv.com/en-direct/',
53 'info_dict': {
54 'id': '5615950982001',
55 'ext': 'mp4',
56 'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
57 'uploader_id': '876450610001',
58 'upload_date': '20171018',
59 'timestamp': 1508329950,
60 },
61 'params': {
62 'skip_download': True,
63 },
64 }, {
65 'url': 'https://www.bfmtv.com/economie/en-direct/',
66 'only_matching': True,
67 }]
68
69
70 class BFMTVArticleIE(BFMTVBaseIE):
71 IE_NAME = 'bfmtv:article'
72 _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A'
73 _TESTS = [{
74 'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html',
75 'info_dict': {
76 'id': '202101060198',
77 'title': 'Covid-19: un responsable de l\'Institut Pasteur se demande "quand la France va se reconfiner"',
78 'description': 'md5:947974089c303d3ac6196670ae262843',
79 },
80 'playlist_count': 2,
81 }, {
82 'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html',
83 'only_matching': True,
84 }, {
85 'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html',
86 'only_matching': True,
87 }, {
88 'url': 'https://rmc.bfmtv.com/actualites/societe/transports/ce-n-est-plus-tout-rentable-le-bioethanol-e85-depasse-1eu-le-litre-des-automobilistes-regrettent_AV-202301100268.html',
89 'info_dict': {
90 'id': '6318445464112',
91 'ext': 'mp4',
92 'title': 'Le plein de bioéthanol fait de plus en plus mal à la pompe',
93 'description': None,
94 'uploader_id': '876630703001',
95 'upload_date': '20230110',
96 'timestamp': 1673341692,
97 'duration': 109.269,
98 'tags': ['rmc', 'show', 'apolline de malherbe', 'info', 'talk', 'matinale', 'radio'],
99 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876630703001/5bef74b8-9d5e-4480-a21f-60c2e2480c46/96c88b74-f9db-45e1-8040-e199c5da216c/1920x1080/match/image.jpg'
100 }
101 }]
102
103 def _real_extract(self, url):
104 bfmtv_id = self._match_id(url)
105 webpage = self._download_webpage(url, bfmtv_id)
106
107 entries = []
108 for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage):
109 video_block = extract_attributes(video_block_el)
110 video_id = video_block.get('videoid')
111 if not video_id:
112 continue
113 entries.append(self._brightcove_url_result(video_id, video_block))
114
115 return self.playlist_result(
116 entries, bfmtv_id, self._og_search_title(webpage, fatal=False),
117 self._html_search_meta(['og:description', 'description'], webpage))
118
[end of yt_dlp/extractor/bfmtv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/bfmtv.py b/yt_dlp/extractor/bfmtv.py
--- a/yt_dlp/extractor/bfmtv.py
+++ b/yt_dlp/extractor/bfmtv.py
@@ -7,7 +7,7 @@
class BFMTVBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.|rmc\.)?bfmtv\.com/'
_VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
- _VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
+ _VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block[^"]*"[^>]*>)'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
def _brightcove_url_result(self, video_id, video_block):
@@ -55,8 +55,11 @@
'ext': 'mp4',
'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'uploader_id': '876450610001',
- 'upload_date': '20171018',
- 'timestamp': 1508329950,
+ 'upload_date': '20220926',
+ 'timestamp': 1664207191,
+ 'live_status': 'is_live',
+ 'thumbnail': r're:https://.+/image\.jpg',
+ 'tags': [],
},
'params': {
'skip_download': True,
|
{"golden_diff": "diff --git a/yt_dlp/extractor/bfmtv.py b/yt_dlp/extractor/bfmtv.py\n--- a/yt_dlp/extractor/bfmtv.py\n+++ b/yt_dlp/extractor/bfmtv.py\n@@ -7,7 +7,7 @@\n class BFMTVBaseIE(InfoExtractor):\n _VALID_URL_BASE = r'https?://(?:www\\.|rmc\\.)?bfmtv\\.com/'\n _VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\\d{12})\\.html'\n- _VIDEO_BLOCK_REGEX = r'(<div[^>]+class=\"video_block\"[^>]*>)'\n+ _VIDEO_BLOCK_REGEX = r'(<div[^>]+class=\"video_block[^\"]*\"[^>]*>)'\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'\n \n def _brightcove_url_result(self, video_id, video_block):\n@@ -55,8 +55,11 @@\n 'ext': 'mp4',\n 'title': r're:^le direct BFMTV WEB \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}$',\n 'uploader_id': '876450610001',\n- 'upload_date': '20171018',\n- 'timestamp': 1508329950,\n+ 'upload_date': '20220926',\n+ 'timestamp': 1664207191,\n+ 'live_status': 'is_live',\n+ 'thumbnail': r're:https://.+/image\\.jpg',\n+ 'tags': [],\n },\n 'params': {\n 'skip_download': True,\n", "issue": "bfmtv: Unable to extract video block\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I *intentionally* remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting that yt-dlp is broken on a **supported** site\n- [X] I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nFrance\n\n### Provide a description that is worded well enough to be understood\n\nyt-dlp is unable to extract replay video from bfmtv websites, from domain www.bfmtv.com. It was still working a few days ago.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [ ] If using API, add `'verbose': True` to `YoutubeDL` params instead\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\nyt-dlp -vU https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html\r\n[debug] Command-line config: ['-vU', 'https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html']\r\n[debug] Encodings: locale UTF-8, fs utf-8, pref UTF-8, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version [email protected] [b634ba742] (zip)\r\n[debug] Python 3.11.2 (CPython x86_64 64bit) - Linux-6.1.0-13-amd64-x86_64-with-glibc2.36 (OpenSSL 3.0.11 19 Sep 2023, glibc 2.36)\r\n[debug] exe versions: ffmpeg 5.1.3-1 (setts), ffprobe 5.1.3-1, phantomjs 2.1.1, rtmpdump 2.4\r\n[debug] Optional libraries: Cryptodome-3.11.0, brotli-1.0.9, certifi-2022.09.24, mutagen-1.46.0, pyxattr-0.8.1, sqlite3-3.40.1, websockets-10.4\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1890 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nAvailable version: [email protected], Current version: [email protected]\r\nCurrent Build Hash: be5cfb6be8930e1a5f427533ec32f2a481276b3da7b249d0150ce2b740ccf1ce\r\nyt-dlp is up to date ([email protected])\r\n[bfmtv] Extracting URL: https://www.bfmtv.com/alsace/replay-emissions/bonsoir-l-alsace/diabete-le-groupe-lilly-investit-160-millions-d-euros-a-fegersheim_VN-202310230671.html\r\n[bfmtv] 202310230671: Downloading webpage\r\nERROR: [bfmtv] 202310230671: Unable to extract video block; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"/usr/local/bin/yt-dlp/yt_dlp/extractor/common.py\", line 715, in extract\r\n ie_result = self._real_extract(url)\r\n ^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/bin/yt-dlp/yt_dlp/extractor/bfmtv.py\", line 43, in _real_extract\r\n video_block = extract_attributes(self._search_regex(\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"/usr/local/bin/yt-dlp/yt_dlp/extractor/common.py\", line 1263, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\n```\n\n", "before_files": [{"content": "import re\n\nfrom .common import InfoExtractor\nfrom ..utils import extract_attributes\n\n\nclass BFMTVBaseIE(InfoExtractor):\n _VALID_URL_BASE = r'https?://(?:www\\.|rmc\\.)?bfmtv\\.com/'\n _VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\\d{12})\\.html'\n _VIDEO_BLOCK_REGEX = r'(<div[^>]+class=\"video_block\"[^>]*>)'\n BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'\n\n def _brightcove_url_result(self, video_id, video_block):\n account_id = video_block.get('accountid') or '876450612001'\n player_id = video_block.get('playerid') or 'I2qBTln4u'\n return self.url_result(\n self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),\n 'BrightcoveNew', video_id)\n\n\nclass BFMTVIE(BFMTVBaseIE):\n IE_NAME = 'bfmtv'\n _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V'\n _TESTS = [{\n 'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html',\n 'info_dict': {\n 'id': '6196747868001',\n 'ext': 'mp4',\n 'title': 'Emmanuel Macron: \"L\\'Islam est une religion qui vit une crise aujourd\u2019hui, partout dans le monde\"',\n 'description': 'Le Pr\u00e9sident s\\'exprime sur la question du s\u00e9paratisme depuis les Mureaux, dans les Yvelines.',\n 'uploader_id': '876450610001',\n 'upload_date': '20201002',\n 'timestamp': 1601629620,\n 'duration': 44.757,\n 'tags': ['bfmactu', 'politique'],\n 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876450610001/5041f4c1-bc48-4af8-a256-1b8300ad8ef0/cf2f9114-e8e2-4494-82b4-ab794ea4bc7d/1920x1080/match/image.jpg',\n },\n }]\n\n def _real_extract(self, url):\n bfmtv_id = self._match_id(url)\n webpage = self._download_webpage(url, bfmtv_id)\n video_block = extract_attributes(self._search_regex(\n self._VIDEO_BLOCK_REGEX, webpage, 'video block'))\n return self._brightcove_url_result(video_block['videoid'], video_block)\n\n\nclass BFMTVLiveIE(BFMTVIE): # XXX: Do not subclass from concrete IE\n IE_NAME = 'bfmtv:live'\n _VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)'\n _TESTS = [{\n 'url': 'https://www.bfmtv.com/en-direct/',\n 'info_dict': {\n 'id': '5615950982001',\n 'ext': 'mp4',\n 'title': r're:^le direct BFMTV WEB \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}$',\n 'uploader_id': '876450610001',\n 'upload_date': '20171018',\n 'timestamp': 1508329950,\n },\n 'params': {\n 'skip_download': True,\n },\n }, {\n 'url': 'https://www.bfmtv.com/economie/en-direct/',\n 'only_matching': True,\n }]\n\n\nclass BFMTVArticleIE(BFMTVBaseIE):\n IE_NAME = 'bfmtv:article'\n _VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A'\n _TESTS = [{\n 'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html',\n 'info_dict': {\n 'id': '202101060198',\n 'title': 'Covid-19: un responsable de l\\'Institut Pasteur se demande \"quand la France va se reconfiner\"',\n 'description': 'md5:947974089c303d3ac6196670ae262843',\n },\n 'playlist_count': 2,\n }, {\n 'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html',\n 'only_matching': True,\n }, {\n 'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html',\n 'only_matching': True,\n }, {\n 'url': 'https://rmc.bfmtv.com/actualites/societe/transports/ce-n-est-plus-tout-rentable-le-bioethanol-e85-depasse-1eu-le-litre-des-automobilistes-regrettent_AV-202301100268.html',\n 'info_dict': {\n 'id': '6318445464112',\n 'ext': 'mp4',\n 'title': 'Le plein de bio\u00e9thanol fait de plus en plus mal \u00e0 la pompe',\n 'description': None,\n 'uploader_id': '876630703001',\n 'upload_date': '20230110',\n 'timestamp': 1673341692,\n 'duration': 109.269,\n 'tags': ['rmc', 'show', 'apolline de malherbe', 'info', 'talk', 'matinale', 'radio'],\n 'thumbnail': 'https://cf-images.eu-west-1.prod.boltdns.net/v1/static/876630703001/5bef74b8-9d5e-4480-a21f-60c2e2480c46/96c88b74-f9db-45e1-8040-e199c5da216c/1920x1080/match/image.jpg'\n }\n }]\n\n def _real_extract(self, url):\n bfmtv_id = self._match_id(url)\n webpage = self._download_webpage(url, bfmtv_id)\n\n entries = []\n for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage):\n video_block = extract_attributes(video_block_el)\n video_id = video_block.get('videoid')\n if not video_id:\n continue\n entries.append(self._brightcove_url_result(video_id, video_block))\n\n return self.playlist_result(\n entries, bfmtv_id, self._og_search_title(webpage, fatal=False),\n self._html_search_meta(['og:description', 'description'], webpage))\n", "path": "yt_dlp/extractor/bfmtv.py"}]}
| 4,068 | 433 |
gh_patches_debug_15461
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-6508
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python 3.9 compatibility (resolve DeprecationWarning from collections)
### Issue Summary
Wagtail hits the warning `DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working` in various places.
- [x] Jinja2>2.10.1
- [x] html5lib>1.0.1 (see https://github.com/html5lib/html5lib-python/issues/419)
- [x] beautifulsoup4>=4.8.0 (or maybe earlier)
- [x] fix wagtail/utils/l18n/translation.py:5 (#5485)
### Steps to Reproduce
1. run `tox -e py37-dj22-sqlite-noelasticsearch -- --deprecation all`
2. note deprecation warnings:
```
site-packages/jinja2/utils.py:485: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
site-packages/jinja2/runtime.py:318: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
```
resolved by https://github.com/pallets/jinja/pull/867 - the fix is on master not released yet (as of Jinja2==2.10.1)
```
site-packages/html5lib/_trie/_base.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
from collections import Mapping
```
Fixed in https://github.com/html5lib/html5lib-python/issues/402 , but not released yet as of 1.0.1 (see https://github.com/html5lib/html5lib-python/issues/419 )
```
site-packages/bs4/element.py:1134: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
if not isinstance(formatter, collections.Callable):
```
https://bugs.launchpad.net/beautifulsoup/+bug/1778909 - resolved by beautifulsoup4>=4.8.0 (and earlier I think)
I'm also seeing one in Wagtail from my own project tests here, the above tox run didn't seem to hit it:
```
wagtail/utils/l18n/translation.py:5: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working
```
See #5485
* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project:
yes (on master at bb4e2fe2dfe69fc3143f38c6e34dbe6f2f2f01e0 )
### Technical details
* Python version: Run `python --version`.
Python 3.7.3
* Django version: Look in your requirements.txt, or run `pip show django | grep Version`.
Django 2.2
* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.
2.7.0.alpha.0
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from wagtail import __version__
4 from wagtail.utils.setup import assets, check_bdist_egg, sdist
5
6
7 try:
8 from setuptools import find_packages, setup
9 except ImportError:
10 from distutils.core import setup
11
12
13 # Hack to prevent "TypeError: 'NoneType' object is not callable" error
14 # in multiprocessing/util.py _exit_function when setup.py exits
15 # (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
16 try:
17 import multiprocessing # noqa
18 except ImportError:
19 pass
20
21
22 install_requires = [
23 "Django>=2.2,<3.2",
24 "django-modelcluster>=5.1,<6.0",
25 "django-taggit>=1.0,<2.0",
26 "django-treebeard>=4.2.0,<5.0",
27 "djangorestframework>=3.11.1,<4.0",
28 "django-filter>=2.2,<3.0",
29 "draftjs_exporter>=2.1.5,<3.0",
30 "Pillow>=4.0.0,<9.0.0",
31 "beautifulsoup4>=4.8,<4.9",
32 "html5lib>=0.999,<2",
33 "Willow>=1.4,<1.5",
34 "requests>=2.11.1,<3.0",
35 "l18n>=2018.5",
36 "xlsxwriter>=1.2.8,<2.0",
37 "tablib[xls,xlsx]>=0.14.0",
38 "anyascii>=0.1.5",
39 ]
40
41 # Testing dependencies
42 testing_extras = [
43 # Required for running the tests
44 'python-dateutil>=2.2',
45 'pytz>=2014.7',
46 'elasticsearch>=1.0.0,<3.0',
47 'Jinja2>=2.8,<3.0',
48 'boto3>=1.4,<1.5',
49 'freezegun>=0.3.8',
50 'openpyxl>=2.6.4',
51 'Unidecode>=0.04.14,<2.0',
52
53 # For coverage and PEP8 linting
54 'coverage>=3.7.0',
55 'flake8>=3.6.0',
56 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases
57 'flake8-blind-except==0.1.1',
58 'flake8-print==2.0.2',
59 'doc8==0.8.1',
60
61 # For templates linting
62 'jinjalint>=0.5',
63
64 # Pipenv hack to fix broken dependency causing CircleCI failures
65 'docutils==0.15',
66
67 # django-taggit 1.3.0 made changes to verbose_name which affect migrations;
68 # the test suite migrations correspond to >=1.3.0
69 'django-taggit>=1.3.0,<2.0',
70 ]
71
72 # Documentation dependencies
73 documentation_extras = [
74 'pyenchant>=3.1.1,<4',
75 'sphinxcontrib-spelling>=5.4.0,<6',
76 'Sphinx>=1.5.2',
77 'sphinx-autobuild>=0.6.0',
78 'sphinx_rtd_theme>=0.1.9',
79 ]
80
81 setup(
82 name='wagtail',
83 version=__version__,
84 description='A Django content management system.',
85 author='Wagtail core team + contributors',
86 author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html
87 url='https://wagtail.io/',
88 packages=find_packages(),
89 include_package_data=True,
90 license='BSD',
91 long_description="Wagtail is an open source content management \
92 system built on Django, with a strong community and commercial support. \
93 It’s focused on user experience, and offers precise control for \
94 designers and developers.\n\n\
95 For more details, see https://wagtail.io, https://docs.wagtail.io and \
96 https://github.com/wagtail/wagtail/.",
97 classifiers=[
98 'Development Status :: 5 - Production/Stable',
99 'Environment :: Web Environment',
100 'Intended Audience :: Developers',
101 'License :: OSI Approved :: BSD License',
102 'Operating System :: OS Independent',
103 'Programming Language :: Python',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.6',
106 'Programming Language :: Python :: 3.7',
107 'Programming Language :: Python :: 3.8',
108 'Framework :: Django',
109 'Framework :: Django :: 2.2',
110 'Framework :: Django :: 3.0',
111 'Framework :: Django :: 3.1',
112 'Framework :: Wagtail',
113 'Topic :: Internet :: WWW/HTTP :: Site Management',
114 ],
115 python_requires='>=3.6',
116 install_requires=install_requires,
117 extras_require={
118 'testing': testing_extras,
119 'docs': documentation_extras
120 },
121 entry_points="""
122 [console_scripts]
123 wagtail=wagtail.bin.wagtail:main
124 """,
125 zip_safe=False,
126 cmdclass={
127 'sdist': sdist,
128 'bdist_egg': check_bdist_egg,
129 'assets': assets,
130 },
131 )
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@
'pytz>=2014.7',
'elasticsearch>=1.0.0,<3.0',
'Jinja2>=2.8,<3.0',
- 'boto3>=1.4,<1.5',
+ 'boto3>=1.16,<1.17',
'freezegun>=0.3.8',
'openpyxl>=2.6.4',
'Unidecode>=0.04.14,<2.0',
@@ -105,6 +105,7 @@
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
+ 'Programming Language :: Python :: 3.9',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.0',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,7 @@\n 'pytz>=2014.7',\n 'elasticsearch>=1.0.0,<3.0',\n 'Jinja2>=2.8,<3.0',\n- 'boto3>=1.4,<1.5',\n+ 'boto3>=1.16,<1.17',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\n 'Unidecode>=0.04.14,<2.0',\n@@ -105,6 +105,7 @@\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n+ 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n", "issue": "Python 3.9 compatibility (resolve DeprecationWarning from collections)\n### Issue Summary\r\n\r\nWagtail hits the warning `DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working` in various places.\r\n\r\n- [x] Jinja2>2.10.1 \r\n- [x] html5lib>1.0.1 (see https://github.com/html5lib/html5lib-python/issues/419)\r\n- [x] beautifulsoup4>=4.8.0 (or maybe earlier)\r\n- [x] fix wagtail/utils/l18n/translation.py:5 (#5485)\r\n\r\n### Steps to Reproduce\r\n\r\n1. run `tox -e py37-dj22-sqlite-noelasticsearch -- --deprecation all`\r\n2. note deprecation warnings:\r\n\r\n\r\n```\r\nsite-packages/jinja2/utils.py:485: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\nsite-packages/jinja2/runtime.py:318: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n```\r\nresolved by https://github.com/pallets/jinja/pull/867 - the fix is on master not released yet (as of Jinja2==2.10.1)\r\n\r\n```\r\nsite-packages/html5lib/_trie/_base.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import Mapping\r\n```\r\n\r\nFixed in https://github.com/html5lib/html5lib-python/issues/402 , but not released yet as of 1.0.1 (see https://github.com/html5lib/html5lib-python/issues/419 )\r\n\r\n```\r\nsite-packages/bs4/element.py:1134: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n if not isinstance(formatter, collections.Callable):\r\n```\r\nhttps://bugs.launchpad.net/beautifulsoup/+bug/1778909 - resolved by beautifulsoup4>=4.8.0 (and earlier I think)\r\n\r\nI'm also seeing one in Wagtail from my own project tests here, the above tox run didn't seem to hit it:\r\n\r\n```\r\nwagtail/utils/l18n/translation.py:5: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n```\r\n\r\nSee #5485 \r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: \r\n\r\nyes (on master at bb4e2fe2dfe69fc3143f38c6e34dbe6f2f2f01e0 )\r\n\r\n### Technical details\r\n\r\n* Python version: Run `python --version`.\r\n\r\nPython 3.7.3\r\n\r\n* Django version: Look in your requirements.txt, or run `pip show django | grep Version`.\r\n\r\nDjango 2.2\r\n\r\n* Wagtail version: Look at the bottom of the Settings menu in the Wagtail admin, or run `pip show wagtail | grep Version:`.\r\n\r\n2.7.0.alpha.0\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom wagtail import __version__\nfrom wagtail.utils.setup import assets, check_bdist_egg, sdist\n\n\ntry:\n from setuptools import find_packages, setup\nexcept ImportError:\n from distutils.core import setup\n\n\n# Hack to prevent \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when setup.py exits\n# (see http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing # noqa\nexcept ImportError:\n pass\n\n\ninstall_requires = [\n \"Django>=2.2,<3.2\",\n \"django-modelcluster>=5.1,<6.0\",\n \"django-taggit>=1.0,<2.0\",\n \"django-treebeard>=4.2.0,<5.0\",\n \"djangorestframework>=3.11.1,<4.0\",\n \"django-filter>=2.2,<3.0\",\n \"draftjs_exporter>=2.1.5,<3.0\",\n \"Pillow>=4.0.0,<9.0.0\",\n \"beautifulsoup4>=4.8,<4.9\",\n \"html5lib>=0.999,<2\",\n \"Willow>=1.4,<1.5\",\n \"requests>=2.11.1,<3.0\",\n \"l18n>=2018.5\",\n \"xlsxwriter>=1.2.8,<2.0\",\n \"tablib[xls,xlsx]>=0.14.0\",\n \"anyascii>=0.1.5\",\n]\n\n# Testing dependencies\ntesting_extras = [\n # Required for running the tests\n 'python-dateutil>=2.2',\n 'pytz>=2014.7',\n 'elasticsearch>=1.0.0,<3.0',\n 'Jinja2>=2.8,<3.0',\n 'boto3>=1.4,<1.5',\n 'freezegun>=0.3.8',\n 'openpyxl>=2.6.4',\n 'Unidecode>=0.04.14,<2.0',\n\n # For coverage and PEP8 linting\n 'coverage>=3.7.0',\n 'flake8>=3.6.0',\n 'isort==5.6.4', # leave this pinned - it tends to change rules between patch releases\n 'flake8-blind-except==0.1.1',\n 'flake8-print==2.0.2',\n 'doc8==0.8.1',\n\n # For templates linting\n 'jinjalint>=0.5',\n\n # Pipenv hack to fix broken dependency causing CircleCI failures\n 'docutils==0.15',\n\n # django-taggit 1.3.0 made changes to verbose_name which affect migrations;\n # the test suite migrations correspond to >=1.3.0\n 'django-taggit>=1.3.0,<2.0',\n]\n\n# Documentation dependencies\ndocumentation_extras = [\n 'pyenchant>=3.1.1,<4',\n 'sphinxcontrib-spelling>=5.4.0,<6',\n 'Sphinx>=1.5.2',\n 'sphinx-autobuild>=0.6.0',\n 'sphinx_rtd_theme>=0.1.9',\n]\n\nsetup(\n name='wagtail',\n version=__version__,\n description='A Django content management system.',\n author='Wagtail core team + contributors',\n author_email='[email protected]', # For support queries, please see https://docs.wagtail.io/en/stable/support.html\n url='https://wagtail.io/',\n packages=find_packages(),\n include_package_data=True,\n license='BSD',\n long_description=\"Wagtail is an open source content management \\\nsystem built on Django, with a strong community and commercial support. \\\nIt\u2019s focused on user experience, and offers precise control for \\\ndesigners and developers.\\n\\n\\\nFor more details, see https://wagtail.io, https://docs.wagtail.io and \\\nhttps://github.com/wagtail/wagtail/.\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Framework :: Django',\n 'Framework :: Django :: 2.2',\n 'Framework :: Django :: 3.0',\n 'Framework :: Django :: 3.1',\n 'Framework :: Wagtail',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require={\n 'testing': testing_extras,\n 'docs': documentation_extras\n },\n entry_points=\"\"\"\n [console_scripts]\n wagtail=wagtail.bin.wagtail:main\n \"\"\",\n zip_safe=False,\n cmdclass={\n 'sdist': sdist,\n 'bdist_egg': check_bdist_egg,\n 'assets': assets,\n },\n)\n", "path": "setup.py"}]}
| 2,821 | 239 |
gh_patches_debug_43494
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-299
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
date_resolver_dialog - test of "definite"
## Version SDK v4.5.0b3
in botbuilder-python/samples/13.core-bot/dialogs/date_resolver_dialog.py
## Describe the bug
Line : _if "definite" in Timex(timex).types:_
The goal of this line is to treat ambiguous date such as timex date = XXXX-05-17
so the test must be "not in" instead of "in" ("definite" = type for an unambiguous date)
[bug]
</issue>
<code>
[start of samples/13.core-bot/dialogs/date_resolver_dialog.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from botbuilder.core import MessageFactory
5 from botbuilder.dialogs import WaterfallDialog, DialogTurnResult, WaterfallStepContext
6 from botbuilder.dialogs.prompts import (
7 DateTimePrompt,
8 PromptValidatorContext,
9 PromptOptions,
10 DateTimeResolution,
11 )
12 from botbuilder.schema import InputHints
13 from .cancel_and_help_dialog import CancelAndHelpDialog
14
15 from datatypes_date_time.timex import Timex
16
17
18 class DateResolverDialog(CancelAndHelpDialog):
19 def __init__(self, dialog_id: str = None):
20 super(DateResolverDialog, self).__init__(
21 dialog_id or DateResolverDialog.__name__
22 )
23
24 self.add_dialog(
25 DateTimePrompt(
26 DateTimePrompt.__name__, DateResolverDialog.datetime_prompt_validator
27 )
28 )
29 self.add_dialog(
30 WaterfallDialog(
31 WaterfallDialog.__name__ + "2", [self.initial_step, self.final_step]
32 )
33 )
34
35 self.initial_dialog_id = WaterfallDialog.__name__ + "2"
36
37 async def initial_step(
38 self, step_context: WaterfallStepContext
39 ) -> DialogTurnResult:
40 timex = step_context.options
41
42 prompt_msg_text = "On what date would you like to travel?"
43 prompt_msg = MessageFactory.text(
44 prompt_msg_text, prompt_msg_text, InputHints.expecting_input
45 )
46
47 reprompt_msg_text = "I'm sorry, for best results, please enter your travel date including the month, day and year."
48 reprompt_msg = MessageFactory.text(
49 reprompt_msg_text, reprompt_msg_text, InputHints.expecting_input
50 )
51
52 if timex is None:
53 # We were not given any date at all so prompt the user.
54 return await step_context.prompt(
55 DateTimePrompt.__name__,
56 PromptOptions(prompt=prompt_msg, retry_prompt=reprompt_msg),
57 )
58 # We have a Date we just need to check it is unambiguous.
59 if "definite" in Timex(timex).types:
60 # This is essentially a "reprompt" of the data we were given up front.
61 return await step_context.prompt(
62 DateTimePrompt.__name__, PromptOptions(prompt=reprompt_msg)
63 )
64
65 return await step_context.next(DateTimeResolution(timex=timex))
66
67 async def final_step(self, step_context: WaterfallStepContext):
68 timex = step_context.result[0].timex
69 return await step_context.end_dialog(timex)
70
71 @staticmethod
72 async def datetime_prompt_validator(prompt_context: PromptValidatorContext) -> bool:
73 if prompt_context.recognized.succeeded:
74 timex = prompt_context.recognized.value[0].timex.split("T")[0]
75
76 # TODO: Needs TimexProperty
77 return "definite" in Timex(timex).types
78
79 return False
80
[end of samples/13.core-bot/dialogs/date_resolver_dialog.py]
[start of samples/13.core-bot/dialogs/main_dialog.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from datetime import datetime
5 from typing import Dict
6 from botbuilder.dialogs import (
7 ComponentDialog,
8 DialogSet,
9 DialogTurnStatus,
10 WaterfallDialog,
11 WaterfallStepContext,
12 DialogTurnResult,
13 )
14 from botbuilder.dialogs.prompts import TextPrompt, ConfirmPrompt, PromptOptions
15 from botbuilder.core import MessageFactory, TurnContext
16 from botbuilder.schema import InputHints
17
18 from .booking_dialog import BookingDialog
19 from booking_details import BookingDetails
20 from flight_booking_recognizer import FlightBookingRecognizer
21 from helpers.luis_helper import LuisHelper, Intent
22
23
24 class MainDialog(ComponentDialog):
25 def __init__(
26 self, luis_recognizer: FlightBookingRecognizer, booking_dialog: BookingDialog
27 ):
28 super(MainDialog, self).__init__(MainDialog.__name__)
29
30 self._luis_recognizer = luis_recognizer
31 self._booking_dialog_id = booking_dialog.id
32
33 self.add_dialog(TextPrompt(TextPrompt.__name__))
34 self.add_dialog(booking_dialog)
35 self.add_dialog(
36 WaterfallDialog(
37 "WFDialog", [self.intro_step, self.act_step, self.final_step]
38 )
39 )
40
41 self.initial_dialog_id = "WFDialog"
42
43 async def intro_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
44 if not self._luis_recognizer.is_configured:
45 await step_context.context.send_activity(
46 MessageFactory.text(
47 "NOTE: LUIS is not configured. To enable all capabilities, add 'LuisAppId', 'LuisAPIKey' and "
48 "'LuisAPIHostName' to the appsettings.json file.",
49 input_hint=InputHints.ignoring_input,
50 )
51 )
52
53 return await step_context.next(None)
54 message_text = (
55 str(step_context.options)
56 if step_context.options
57 else "What can I help you with today?"
58 )
59 prompt_message = MessageFactory.text(
60 message_text, message_text, InputHints.expecting_input
61 )
62
63 return await step_context.prompt(
64 TextPrompt.__name__, PromptOptions(prompt=prompt_message)
65 )
66
67 async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
68 if not self._luis_recognizer.is_configured:
69 # LUIS is not configured, we just run the BookingDialog path with an empty BookingDetailsInstance.
70 return await step_context.begin_dialog(
71 self._booking_dialog_id, BookingDetails()
72 )
73
74 # Call LUIS and gather any potential booking details. (Note the TurnContext has the response to the prompt.)
75 intent, luis_result = await LuisHelper.execute_luis_query(
76 self._luis_recognizer, step_context.context
77 )
78
79 # top_intent = cognitive_models_helper.top_intent(luis_result['intents'])
80
81 if intent == Intent.BOOK_FLIGHT.value and luis_result:
82 await MainDialog._show_warning_for_unsupported_cities(
83 step_context.context, luis_result
84 )
85
86 # Run the BookingDialog giving it whatever details we have from the LUIS call.
87 return await step_context.begin_dialog(self._booking_dialog_id, luis_result)
88
89 elif intent == Intent.GET_WEATHER.value:
90 get_weather_text = "TODO: get weather flow here"
91 get_weather_message = MessageFactory.text(
92 get_weather_text, get_weather_text, InputHints.ignoring_input
93 )
94 await step_context.context.send_activity(get_weather_message)
95
96 else:
97 didnt_understand_text = (
98 "Sorry, I didn't get that. Please try asking in a different way"
99 )
100 didnt_understand_message = MessageFactory.text(
101 didnt_understand_text, didnt_understand_text, InputHints.ignoring_input
102 )
103 await step_context.context.send_activity(didnt_understand_message)
104
105 return await step_context.next(None)
106
107 async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
108 # If the child dialog ("BookingDialog") was cancelled or the user failed to confirm,
109 # the Result here will be null.
110 if step_context.result is not None:
111 result = step_context.result
112
113 # Now we have all the booking details call the booking service.
114
115 # If the call to the booking service was successful tell the user.
116 # time_property = Timex(result.travel_date)
117 # travel_date_msg = time_property.to_natural_language(datetime.now())
118 msg_txt = f"I have you booked to {result.destination} from {result.origin} on {result.travel_date}"
119 message = MessageFactory.text(msg_txt, msg_txt, InputHints.ignoring_input)
120 await step_context.context.send_activity(message)
121
122 prompt_message = "What else can I do for you?"
123 return await step_context.replace_dialog(self.id, prompt_message)
124
125 @staticmethod
126 async def _show_warning_for_unsupported_cities(
127 context: TurnContext, luis_result: BookingDetails
128 ) -> None:
129 if luis_result.unsupported_airports:
130 message_text = (
131 f"Sorry but the following airports are not supported:"
132 f" {', '.join(luis_result.unsupported_airports)}"
133 )
134 message = MessageFactory.text(
135 message_text, message_text, InputHints.ignoring_input
136 )
137 await context.send_activity(message)
138
[end of samples/13.core-bot/dialogs/main_dialog.py]
[start of samples/13.core-bot/helpers/luis_helper.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 from enum import Enum
4 from typing import Dict
5 from botbuilder.ai.luis import LuisRecognizer, LuisApplication
6 from botbuilder.core import IntentScore, TopIntent, TurnContext
7
8 from booking_details import BookingDetails
9
10
11 class Intent(Enum):
12 BOOK_FLIGHT = "BookFlight"
13 CANCEL = "Cancel"
14 GET_WEATHER = "GetWeather"
15 NONE_INTENT = "NoneIntent"
16
17
18 def top_intent(intents: Dict[Intent, dict]) -> TopIntent:
19 max_intent = Intent.NONE_INTENT
20 max_value = 0.0
21
22 for intent, value in intents:
23 intent_score = IntentScore(value)
24 if intent_score.score > max_value:
25 max_intent, max_value = intent, intent_score.score
26
27 return TopIntent(max_intent, max_value)
28
29
30 class LuisHelper:
31 @staticmethod
32 async def execute_luis_query(
33 luis_recognizer: LuisRecognizer, turn_context: TurnContext
34 ) -> (Intent, object):
35 result = None
36 intent = None
37
38 try:
39 recognizer_result = await luis_recognizer.recognize(turn_context)
40
41 intent = (
42 sorted(
43 recognizer_result.intents,
44 key=recognizer_result.intents.get,
45 reverse=True,
46 )[:1][0]
47 if recognizer_result.intents
48 else None
49 )
50
51 if intent == Intent.BOOK_FLIGHT.value:
52 result = BookingDetails()
53
54 # We need to get the result from the LUIS JSON which at every level returns an array.
55 to_entities = recognizer_result.entities.get("$instance", {}).get(
56 "To", []
57 )
58 if len(to_entities) > 0:
59 if recognizer_result.entities.get("To", [{"$instance": {}}])[0][
60 "$instance"
61 ]:
62 result.destination = to_entities[0]["text"].capitalize()
63 else:
64 result.unsupported_airports.append(
65 to_entities[0]["text"].capitalize()
66 )
67
68 from_entities = recognizer_result.entities.get("$instance", {}).get(
69 "From", []
70 )
71 if len(from_entities) > 0:
72 if recognizer_result.entities.get("From", [{"$instance": {}}])[0][
73 "$instance"
74 ]:
75 result.origin = from_entities[0]["text"].capitalize()
76 else:
77 result.unsupported_airports.append(
78 from_entities[0]["text"].capitalize()
79 )
80
81 # TODO: This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.
82 # TIMEX is a format that represents DateTime expressions that include some ambiguity. e.g. missing a Year.
83 date_entities = recognizer_result.entities.get("$instance", {}).get(
84 "datetime", []
85 )
86 if len(date_entities) > 0:
87 result.travel_date = None # TODO: Set when we get a timex format
88 except Exception as e:
89 print(e)
90
91 return intent, result
92
[end of samples/13.core-bot/helpers/luis_helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/samples/13.core-bot/dialogs/date_resolver_dialog.py b/samples/13.core-bot/dialogs/date_resolver_dialog.py
--- a/samples/13.core-bot/dialogs/date_resolver_dialog.py
+++ b/samples/13.core-bot/dialogs/date_resolver_dialog.py
@@ -56,7 +56,7 @@
PromptOptions(prompt=prompt_msg, retry_prompt=reprompt_msg),
)
# We have a Date we just need to check it is unambiguous.
- if "definite" in Timex(timex).types:
+ if "definite" not in Timex(timex).types:
# This is essentially a "reprompt" of the data we were given up front.
return await step_context.prompt(
DateTimePrompt.__name__, PromptOptions(prompt=reprompt_msg)
diff --git a/samples/13.core-bot/dialogs/main_dialog.py b/samples/13.core-bot/dialogs/main_dialog.py
--- a/samples/13.core-bot/dialogs/main_dialog.py
+++ b/samples/13.core-bot/dialogs/main_dialog.py
@@ -1,17 +1,13 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
-from datetime import datetime
-from typing import Dict
from botbuilder.dialogs import (
ComponentDialog,
- DialogSet,
- DialogTurnStatus,
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
)
-from botbuilder.dialogs.prompts import TextPrompt, ConfirmPrompt, PromptOptions
+from botbuilder.dialogs.prompts import TextPrompt, PromptOptions
from botbuilder.core import MessageFactory, TurnContext
from botbuilder.schema import InputHints
@@ -76,9 +72,8 @@
self._luis_recognizer, step_context.context
)
- # top_intent = cognitive_models_helper.top_intent(luis_result['intents'])
-
if intent == Intent.BOOK_FLIGHT.value and luis_result:
+ # Show a warning for Origin and Destination if we can't resolve them.
await MainDialog._show_warning_for_unsupported_cities(
step_context.context, luis_result
)
diff --git a/samples/13.core-bot/helpers/luis_helper.py b/samples/13.core-bot/helpers/luis_helper.py
--- a/samples/13.core-bot/helpers/luis_helper.py
+++ b/samples/13.core-bot/helpers/luis_helper.py
@@ -2,7 +2,7 @@
# Licensed under the MIT License.
from enum import Enum
from typing import Dict
-from botbuilder.ai.luis import LuisRecognizer, LuisApplication
+from botbuilder.ai.luis import LuisRecognizer
from botbuilder.core import IntentScore, TopIntent, TurnContext
from booking_details import BookingDetails
@@ -32,6 +32,9 @@
async def execute_luis_query(
luis_recognizer: LuisRecognizer, turn_context: TurnContext
) -> (Intent, object):
+ """
+ Returns an object with preformatted LUIS results for the bot's dialogs to consume.
+ """
result = None
intent = None
@@ -78,13 +81,20 @@
from_entities[0]["text"].capitalize()
)
- # TODO: This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.
+ # This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.
# TIMEX is a format that represents DateTime expressions that include some ambiguity. e.g. missing a Year.
- date_entities = recognizer_result.entities.get("$instance", {}).get(
- "datetime", []
- )
- if len(date_entities) > 0:
- result.travel_date = None # TODO: Set when we get a timex format
+ date_entities = recognizer_result.entities.get("datetime", [])
+ if date_entities:
+ timex = date_entities[0]["timex"]
+
+ if timex:
+ datetime = timex[0].split("T")[0]
+
+ result.travel_date = datetime
+
+ else:
+ result.travel_date = None
+
except Exception as e:
print(e)
|
{"golden_diff": "diff --git a/samples/13.core-bot/dialogs/date_resolver_dialog.py b/samples/13.core-bot/dialogs/date_resolver_dialog.py\n--- a/samples/13.core-bot/dialogs/date_resolver_dialog.py\n+++ b/samples/13.core-bot/dialogs/date_resolver_dialog.py\n@@ -56,7 +56,7 @@\n PromptOptions(prompt=prompt_msg, retry_prompt=reprompt_msg),\n )\n # We have a Date we just need to check it is unambiguous.\n- if \"definite\" in Timex(timex).types:\n+ if \"definite\" not in Timex(timex).types:\n # This is essentially a \"reprompt\" of the data we were given up front.\n return await step_context.prompt(\n DateTimePrompt.__name__, PromptOptions(prompt=reprompt_msg)\ndiff --git a/samples/13.core-bot/dialogs/main_dialog.py b/samples/13.core-bot/dialogs/main_dialog.py\n--- a/samples/13.core-bot/dialogs/main_dialog.py\n+++ b/samples/13.core-bot/dialogs/main_dialog.py\n@@ -1,17 +1,13 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \n-from datetime import datetime\n-from typing import Dict\n from botbuilder.dialogs import (\n ComponentDialog,\n- DialogSet,\n- DialogTurnStatus,\n WaterfallDialog,\n WaterfallStepContext,\n DialogTurnResult,\n )\n-from botbuilder.dialogs.prompts import TextPrompt, ConfirmPrompt, PromptOptions\n+from botbuilder.dialogs.prompts import TextPrompt, PromptOptions\n from botbuilder.core import MessageFactory, TurnContext\n from botbuilder.schema import InputHints\n \n@@ -76,9 +72,8 @@\n self._luis_recognizer, step_context.context\n )\n \n- # top_intent = cognitive_models_helper.top_intent(luis_result['intents'])\n-\n if intent == Intent.BOOK_FLIGHT.value and luis_result:\n+ # Show a warning for Origin and Destination if we can't resolve them.\n await MainDialog._show_warning_for_unsupported_cities(\n step_context.context, luis_result\n )\ndiff --git a/samples/13.core-bot/helpers/luis_helper.py b/samples/13.core-bot/helpers/luis_helper.py\n--- a/samples/13.core-bot/helpers/luis_helper.py\n+++ b/samples/13.core-bot/helpers/luis_helper.py\n@@ -2,7 +2,7 @@\n # Licensed under the MIT License.\n from enum import Enum\n from typing import Dict\n-from botbuilder.ai.luis import LuisRecognizer, LuisApplication\n+from botbuilder.ai.luis import LuisRecognizer\n from botbuilder.core import IntentScore, TopIntent, TurnContext\n \n from booking_details import BookingDetails\n@@ -32,6 +32,9 @@\n async def execute_luis_query(\n luis_recognizer: LuisRecognizer, turn_context: TurnContext\n ) -> (Intent, object):\n+ \"\"\"\n+ Returns an object with preformatted LUIS results for the bot's dialogs to consume.\n+ \"\"\"\n result = None\n intent = None\n \n@@ -78,13 +81,20 @@\n from_entities[0][\"text\"].capitalize()\n )\n \n- # TODO: This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.\n+ # This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.\n # TIMEX is a format that represents DateTime expressions that include some ambiguity. e.g. missing a Year.\n- date_entities = recognizer_result.entities.get(\"$instance\", {}).get(\n- \"datetime\", []\n- )\n- if len(date_entities) > 0:\n- result.travel_date = None # TODO: Set when we get a timex format\n+ date_entities = recognizer_result.entities.get(\"datetime\", [])\n+ if date_entities:\n+ timex = date_entities[0][\"timex\"]\n+\n+ if timex:\n+ datetime = timex[0].split(\"T\")[0]\n+\n+ result.travel_date = datetime\n+\n+ else:\n+ result.travel_date = None\n+\n except Exception as e:\n print(e)\n", "issue": "date_resolver_dialog - test of \"definite\"\n## Version SDK v4.5.0b3\r\nin botbuilder-python/samples/13.core-bot/dialogs/date_resolver_dialog.py\r\n\r\n## Describe the bug\r\nLine : _if \"definite\" in Timex(timex).types:_\r\nThe goal of this line is to treat ambiguous date such as timex date = XXXX-05-17\r\nso the test must be \"not in\" instead of \"in\" (\"definite\" = type for an unambiguous date)\r\n\r\n[bug]\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.core import MessageFactory\nfrom botbuilder.dialogs import WaterfallDialog, DialogTurnResult, WaterfallStepContext\nfrom botbuilder.dialogs.prompts import (\n DateTimePrompt,\n PromptValidatorContext,\n PromptOptions,\n DateTimeResolution,\n)\nfrom botbuilder.schema import InputHints\nfrom .cancel_and_help_dialog import CancelAndHelpDialog\n\nfrom datatypes_date_time.timex import Timex\n\n\nclass DateResolverDialog(CancelAndHelpDialog):\n def __init__(self, dialog_id: str = None):\n super(DateResolverDialog, self).__init__(\n dialog_id or DateResolverDialog.__name__\n )\n\n self.add_dialog(\n DateTimePrompt(\n DateTimePrompt.__name__, DateResolverDialog.datetime_prompt_validator\n )\n )\n self.add_dialog(\n WaterfallDialog(\n WaterfallDialog.__name__ + \"2\", [self.initial_step, self.final_step]\n )\n )\n\n self.initial_dialog_id = WaterfallDialog.__name__ + \"2\"\n\n async def initial_step(\n self, step_context: WaterfallStepContext\n ) -> DialogTurnResult:\n timex = step_context.options\n\n prompt_msg_text = \"On what date would you like to travel?\"\n prompt_msg = MessageFactory.text(\n prompt_msg_text, prompt_msg_text, InputHints.expecting_input\n )\n\n reprompt_msg_text = \"I'm sorry, for best results, please enter your travel date including the month, day and year.\"\n reprompt_msg = MessageFactory.text(\n reprompt_msg_text, reprompt_msg_text, InputHints.expecting_input\n )\n\n if timex is None:\n # We were not given any date at all so prompt the user.\n return await step_context.prompt(\n DateTimePrompt.__name__,\n PromptOptions(prompt=prompt_msg, retry_prompt=reprompt_msg),\n )\n # We have a Date we just need to check it is unambiguous.\n if \"definite\" in Timex(timex).types:\n # This is essentially a \"reprompt\" of the data we were given up front.\n return await step_context.prompt(\n DateTimePrompt.__name__, PromptOptions(prompt=reprompt_msg)\n )\n\n return await step_context.next(DateTimeResolution(timex=timex))\n\n async def final_step(self, step_context: WaterfallStepContext):\n timex = step_context.result[0].timex\n return await step_context.end_dialog(timex)\n\n @staticmethod\n async def datetime_prompt_validator(prompt_context: PromptValidatorContext) -> bool:\n if prompt_context.recognized.succeeded:\n timex = prompt_context.recognized.value[0].timex.split(\"T\")[0]\n\n # TODO: Needs TimexProperty\n return \"definite\" in Timex(timex).types\n\n return False\n", "path": "samples/13.core-bot/dialogs/date_resolver_dialog.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import datetime\nfrom typing import Dict\nfrom botbuilder.dialogs import (\n ComponentDialog,\n DialogSet,\n DialogTurnStatus,\n WaterfallDialog,\n WaterfallStepContext,\n DialogTurnResult,\n)\nfrom botbuilder.dialogs.prompts import TextPrompt, ConfirmPrompt, PromptOptions\nfrom botbuilder.core import MessageFactory, TurnContext\nfrom botbuilder.schema import InputHints\n\nfrom .booking_dialog import BookingDialog\nfrom booking_details import BookingDetails\nfrom flight_booking_recognizer import FlightBookingRecognizer\nfrom helpers.luis_helper import LuisHelper, Intent\n\n\nclass MainDialog(ComponentDialog):\n def __init__(\n self, luis_recognizer: FlightBookingRecognizer, booking_dialog: BookingDialog\n ):\n super(MainDialog, self).__init__(MainDialog.__name__)\n\n self._luis_recognizer = luis_recognizer\n self._booking_dialog_id = booking_dialog.id\n\n self.add_dialog(TextPrompt(TextPrompt.__name__))\n self.add_dialog(booking_dialog)\n self.add_dialog(\n WaterfallDialog(\n \"WFDialog\", [self.intro_step, self.act_step, self.final_step]\n )\n )\n\n self.initial_dialog_id = \"WFDialog\"\n\n async def intro_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\n if not self._luis_recognizer.is_configured:\n await step_context.context.send_activity(\n MessageFactory.text(\n \"NOTE: LUIS is not configured. To enable all capabilities, add 'LuisAppId', 'LuisAPIKey' and \"\n \"'LuisAPIHostName' to the appsettings.json file.\",\n input_hint=InputHints.ignoring_input,\n )\n )\n\n return await step_context.next(None)\n message_text = (\n str(step_context.options)\n if step_context.options\n else \"What can I help you with today?\"\n )\n prompt_message = MessageFactory.text(\n message_text, message_text, InputHints.expecting_input\n )\n\n return await step_context.prompt(\n TextPrompt.__name__, PromptOptions(prompt=prompt_message)\n )\n\n async def act_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\n if not self._luis_recognizer.is_configured:\n # LUIS is not configured, we just run the BookingDialog path with an empty BookingDetailsInstance.\n return await step_context.begin_dialog(\n self._booking_dialog_id, BookingDetails()\n )\n\n # Call LUIS and gather any potential booking details. (Note the TurnContext has the response to the prompt.)\n intent, luis_result = await LuisHelper.execute_luis_query(\n self._luis_recognizer, step_context.context\n )\n\n # top_intent = cognitive_models_helper.top_intent(luis_result['intents'])\n\n if intent == Intent.BOOK_FLIGHT.value and luis_result:\n await MainDialog._show_warning_for_unsupported_cities(\n step_context.context, luis_result\n )\n\n # Run the BookingDialog giving it whatever details we have from the LUIS call.\n return await step_context.begin_dialog(self._booking_dialog_id, luis_result)\n\n elif intent == Intent.GET_WEATHER.value:\n get_weather_text = \"TODO: get weather flow here\"\n get_weather_message = MessageFactory.text(\n get_weather_text, get_weather_text, InputHints.ignoring_input\n )\n await step_context.context.send_activity(get_weather_message)\n\n else:\n didnt_understand_text = (\n \"Sorry, I didn't get that. Please try asking in a different way\"\n )\n didnt_understand_message = MessageFactory.text(\n didnt_understand_text, didnt_understand_text, InputHints.ignoring_input\n )\n await step_context.context.send_activity(didnt_understand_message)\n\n return await step_context.next(None)\n\n async def final_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:\n # If the child dialog (\"BookingDialog\") was cancelled or the user failed to confirm,\n # the Result here will be null.\n if step_context.result is not None:\n result = step_context.result\n\n # Now we have all the booking details call the booking service.\n\n # If the call to the booking service was successful tell the user.\n # time_property = Timex(result.travel_date)\n # travel_date_msg = time_property.to_natural_language(datetime.now())\n msg_txt = f\"I have you booked to {result.destination} from {result.origin} on {result.travel_date}\"\n message = MessageFactory.text(msg_txt, msg_txt, InputHints.ignoring_input)\n await step_context.context.send_activity(message)\n\n prompt_message = \"What else can I do for you?\"\n return await step_context.replace_dialog(self.id, prompt_message)\n\n @staticmethod\n async def _show_warning_for_unsupported_cities(\n context: TurnContext, luis_result: BookingDetails\n ) -> None:\n if luis_result.unsupported_airports:\n message_text = (\n f\"Sorry but the following airports are not supported:\"\n f\" {', '.join(luis_result.unsupported_airports)}\"\n )\n message = MessageFactory.text(\n message_text, message_text, InputHints.ignoring_input\n )\n await context.send_activity(message)\n", "path": "samples/13.core-bot/dialogs/main_dialog.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nfrom enum import Enum\nfrom typing import Dict\nfrom botbuilder.ai.luis import LuisRecognizer, LuisApplication\nfrom botbuilder.core import IntentScore, TopIntent, TurnContext\n\nfrom booking_details import BookingDetails\n\n\nclass Intent(Enum):\n BOOK_FLIGHT = \"BookFlight\"\n CANCEL = \"Cancel\"\n GET_WEATHER = \"GetWeather\"\n NONE_INTENT = \"NoneIntent\"\n\n\ndef top_intent(intents: Dict[Intent, dict]) -> TopIntent:\n max_intent = Intent.NONE_INTENT\n max_value = 0.0\n\n for intent, value in intents:\n intent_score = IntentScore(value)\n if intent_score.score > max_value:\n max_intent, max_value = intent, intent_score.score\n\n return TopIntent(max_intent, max_value)\n\n\nclass LuisHelper:\n @staticmethod\n async def execute_luis_query(\n luis_recognizer: LuisRecognizer, turn_context: TurnContext\n ) -> (Intent, object):\n result = None\n intent = None\n\n try:\n recognizer_result = await luis_recognizer.recognize(turn_context)\n\n intent = (\n sorted(\n recognizer_result.intents,\n key=recognizer_result.intents.get,\n reverse=True,\n )[:1][0]\n if recognizer_result.intents\n else None\n )\n\n if intent == Intent.BOOK_FLIGHT.value:\n result = BookingDetails()\n\n # We need to get the result from the LUIS JSON which at every level returns an array.\n to_entities = recognizer_result.entities.get(\"$instance\", {}).get(\n \"To\", []\n )\n if len(to_entities) > 0:\n if recognizer_result.entities.get(\"To\", [{\"$instance\": {}}])[0][\n \"$instance\"\n ]:\n result.destination = to_entities[0][\"text\"].capitalize()\n else:\n result.unsupported_airports.append(\n to_entities[0][\"text\"].capitalize()\n )\n\n from_entities = recognizer_result.entities.get(\"$instance\", {}).get(\n \"From\", []\n )\n if len(from_entities) > 0:\n if recognizer_result.entities.get(\"From\", [{\"$instance\": {}}])[0][\n \"$instance\"\n ]:\n result.origin = from_entities[0][\"text\"].capitalize()\n else:\n result.unsupported_airports.append(\n from_entities[0][\"text\"].capitalize()\n )\n\n # TODO: This value will be a TIMEX. And we are only interested in a Date so grab the first result and drop the Time part.\n # TIMEX is a format that represents DateTime expressions that include some ambiguity. e.g. missing a Year.\n date_entities = recognizer_result.entities.get(\"$instance\", {}).get(\n \"datetime\", []\n )\n if len(date_entities) > 0:\n result.travel_date = None # TODO: Set when we get a timex format\n except Exception as e:\n print(e)\n\n return intent, result\n", "path": "samples/13.core-bot/helpers/luis_helper.py"}]}
| 3,798 | 942 |
gh_patches_debug_30202
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-2831
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
gTTS needs to be upgraded to 2.2.2 on dev branch
**Describe the bug**
When using Google TTS, the audio service returns an error and never returns audio.
```
2021-02-06 02:39:24.895 | ERROR | 1 | mycroft.audio.speech:mute_and_speak:134 | TTS execution failed (gTTSError('200 (OK) from TTS API. Probable cause: Unknown'))
```
I had to upgrade `gTTS` from `2.2.0` to `2.2.2` to fix the issue.
```
pip install gTTS -U
Collecting gTTS
Downloading https://files.pythonhosted.org/packages/5f/b9/94e59337107be134b21ce395a29fc0715b707b560108d6797de2d93e1178/gTTS-2.2.2-py3-none-any.whl
Requirement already satisfied, skipping upgrade: click in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (7.1.2)
Requirement already satisfied, skipping upgrade: six in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (1.15.0)
Requirement already satisfied, skipping upgrade: requests in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (2.20.0)
Requirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (1.24.3)
Requirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (3.0.4)
Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (2020.12.5)
Requirement already satisfied, skipping upgrade: idna<2.8,>=2.5 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (2.7)
Installing collected packages: gTTS
Found existing installation: gTTS 2.2.0
Uninstalling gTTS-2.2.0:
Successfully uninstalled gTTS-2.2.0
Successfully installed gTTS-2.2.2
```
**Environment:**
- Device type: Raspberry Pi 4
- OS: Raspberry OS 64-bit
- Mycroft-core version: dev
</issue>
<code>
[start of mycroft/tts/google_tts.py]
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from gtts import gTTS
16 from gtts.lang import tts_langs
17
18 from .tts import TTS, TTSValidator
19
20 from mycroft.util.log import LOG
21
22 # Live list of languages
23 # Cached list of supported languages (2020-05-27)
24 _default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',
25 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',
26 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',
27 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',
28 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',
29 'fi': 'Finnish', 'fr': 'French', 'de': 'German',
30 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',
31 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',
32 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',
33 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',
34 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',
35 'ml': 'Malayalam', 'mr': 'Marathi',
36 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',
37 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',
38 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',
39 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',
40 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
41 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',
42 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',
43 'cy': 'Welsh', 'zh-cn': 'Chinese (Mandarin/China)',
44 'zh-tw': 'Chinese (Mandarin/Taiwan)',
45 'en-us': 'English (US)', 'en-ca': 'English (Canada)',
46 'en-uk': 'English (UK)', 'en-gb': 'English (UK)',
47 'en-au': 'English (Australia)', 'en-gh': 'English (Ghana)',
48 'en-in': 'English (India)', 'en-ie': 'English (Ireland)',
49 'en-nz': 'English (New Zealand)',
50 'en-ng': 'English (Nigeria)',
51 'en-ph': 'English (Philippines)',
52 'en-za': 'English (South Africa)',
53 'en-tz': 'English (Tanzania)', 'fr-ca': 'French (Canada)',
54 'fr-fr': 'French (France)', 'pt-br': 'Portuguese (Brazil)',
55 'pt-pt': 'Portuguese (Portugal)', 'es-es': 'Spanish (Spain)',
56 'es-us': 'Spanish (United States)'
57 }
58
59
60 _supported_langs = None
61
62
63 def get_supported_langs():
64 """Get dict of supported languages.
65
66 Tries to fetch remote list, if that fails a local cache will be used.
67
68 Returns:
69 (dict): Lang code to lang name map.
70 """
71 global _supported_langs
72 if not _supported_langs:
73 try:
74 _supported_langs = tts_langs()
75 except Exception:
76 LOG.warning('Couldn\'t fetch upto date language codes')
77 return _supported_langs or _default_langs
78
79
80 class GoogleTTS(TTS):
81 """Interface to google TTS."""
82 def __init__(self, lang, config):
83 self._google_lang = None
84 super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
85 self), 'mp3')
86
87 @property
88 def google_lang(self):
89 """Property containing a converted language code suitable for gTTS."""
90 supported_langs = get_supported_langs()
91 if not self._google_lang:
92 if self.lang.lower() in supported_langs:
93 self._google_lang = self.lang.lower()
94 elif self.lang[:2].lower() in supported_langs:
95 self._google_lang = self.lang[:2]
96 return self._google_lang or self.lang.lower()
97
98 def get_tts(self, sentence, wav_file):
99 """Fetch tts audio using gTTS.
100
101 Arguments:
102 sentence (str): Sentence to generate audio for
103 wav_file (str): output file path
104 Returns:
105 Tuple ((str) written file, None)
106 """
107 tts = gTTS(text=sentence, lang=self.google_lang)
108 tts.save(wav_file)
109 return (wav_file, None) # No phonemes
110
111
112 class GoogleTTSValidator(TTSValidator):
113 def __init__(self, tts):
114 super(GoogleTTSValidator, self).__init__(tts)
115
116 def validate_lang(self):
117 lang = self.tts.google_lang
118 if lang.lower() not in get_supported_langs():
119 raise ValueError("Language not supported by gTTS: {}".format(lang))
120
121 def validate_connection(self):
122 try:
123 gTTS(text='Hi').save(self.tts.filename)
124 except Exception:
125 raise Exception(
126 'GoogleTTS server could not be verified. Please check your '
127 'internet connection.')
128
129 def get_tts_class(self):
130 return GoogleTTS
131
[end of mycroft/tts/google_tts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py
--- a/mycroft/tts/google_tts.py
+++ b/mycroft/tts/google_tts.py
@@ -20,7 +20,7 @@
from mycroft.util.log import LOG
# Live list of languages
-# Cached list of supported languages (2020-05-27)
+# Cached list of supported languages (2021-02-09)
_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',
'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',
'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',
@@ -40,20 +40,7 @@
'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',
'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',
- 'cy': 'Welsh', 'zh-cn': 'Chinese (Mandarin/China)',
- 'zh-tw': 'Chinese (Mandarin/Taiwan)',
- 'en-us': 'English (US)', 'en-ca': 'English (Canada)',
- 'en-uk': 'English (UK)', 'en-gb': 'English (UK)',
- 'en-au': 'English (Australia)', 'en-gh': 'English (Ghana)',
- 'en-in': 'English (India)', 'en-ie': 'English (Ireland)',
- 'en-nz': 'English (New Zealand)',
- 'en-ng': 'English (Nigeria)',
- 'en-ph': 'English (Philippines)',
- 'en-za': 'English (South Africa)',
- 'en-tz': 'English (Tanzania)', 'fr-ca': 'French (Canada)',
- 'fr-fr': 'French (France)', 'pt-br': 'Portuguese (Brazil)',
- 'pt-pt': 'Portuguese (Portugal)', 'es-es': 'Spanish (Spain)',
- 'es-us': 'Spanish (United States)'
+ 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'
}
|
{"golden_diff": "diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py\n--- a/mycroft/tts/google_tts.py\n+++ b/mycroft/tts/google_tts.py\n@@ -20,7 +20,7 @@\n from mycroft.util.log import LOG\n \n # Live list of languages\n-# Cached list of supported languages (2020-05-27)\n+# Cached list of supported languages (2021-02-09)\n _default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',\n 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',\n 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',\n@@ -40,20 +40,7 @@\n 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',\n 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',\n 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',\n- 'cy': 'Welsh', 'zh-cn': 'Chinese (Mandarin/China)',\n- 'zh-tw': 'Chinese (Mandarin/Taiwan)',\n- 'en-us': 'English (US)', 'en-ca': 'English (Canada)',\n- 'en-uk': 'English (UK)', 'en-gb': 'English (UK)',\n- 'en-au': 'English (Australia)', 'en-gh': 'English (Ghana)',\n- 'en-in': 'English (India)', 'en-ie': 'English (Ireland)',\n- 'en-nz': 'English (New Zealand)',\n- 'en-ng': 'English (Nigeria)',\n- 'en-ph': 'English (Philippines)',\n- 'en-za': 'English (South Africa)',\n- 'en-tz': 'English (Tanzania)', 'fr-ca': 'French (Canada)',\n- 'fr-fr': 'French (France)', 'pt-br': 'Portuguese (Brazil)',\n- 'pt-pt': 'Portuguese (Portugal)', 'es-es': 'Spanish (Spain)',\n- 'es-us': 'Spanish (United States)'\n+ 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'\n }\n", "issue": "gTTS needs to be upgraded to 2.2.2 on dev branch\n**Describe the bug**\r\nWhen using Google TTS, the audio service returns an error and never returns audio.\r\n```\r\n2021-02-06 02:39:24.895 | ERROR | 1 | mycroft.audio.speech:mute_and_speak:134 | TTS execution failed (gTTSError('200 (OK) from TTS API. Probable cause: Unknown'))\r\n```\r\n\r\nI had to upgrade `gTTS` from `2.2.0` to `2.2.2` to fix the issue.\r\n\r\n```\r\npip install gTTS -U\r\nCollecting gTTS\r\n Downloading https://files.pythonhosted.org/packages/5f/b9/94e59337107be134b21ce395a29fc0715b707b560108d6797de2d93e1178/gTTS-2.2.2-py3-none-any.whl\r\nRequirement already satisfied, skipping upgrade: click in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (7.1.2)\r\nRequirement already satisfied, skipping upgrade: six in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (1.15.0)\r\nRequirement already satisfied, skipping upgrade: requests in /opt/mycroft-venv/lib/python3.7/site-packages (from gTTS) (2.20.0)\r\nRequirement already satisfied, skipping upgrade: urllib3<1.25,>=1.21.1 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (1.24.3)\r\nRequirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (3.0.4)\r\nRequirement already satisfied, skipping upgrade: certifi>=2017.4.17 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (2020.12.5)\r\nRequirement already satisfied, skipping upgrade: idna<2.8,>=2.5 in /opt/mycroft-venv/lib/python3.7/site-packages (from requests->gTTS) (2.7)\r\nInstalling collected packages: gTTS\r\n Found existing installation: gTTS 2.2.0\r\n Uninstalling gTTS-2.2.0:\r\n Successfully uninstalled gTTS-2.2.0\r\nSuccessfully installed gTTS-2.2.2\r\n```\r\n\r\n**Environment:**\r\n - Device type: Raspberry Pi 4\r\n - OS: Raspberry OS 64-bit\r\n - Mycroft-core version: dev\r\n\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\nfrom gtts.lang import tts_langs\n\nfrom .tts import TTS, TTSValidator\n\nfrom mycroft.util.log import LOG\n\n# Live list of languages\n# Cached list of supported languages (2020-05-27)\n_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',\n 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',\n 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',\n 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',\n 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',\n 'fi': 'Finnish', 'fr': 'French', 'de': 'German',\n 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',\n 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',\n 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',\n 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',\n 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',\n 'ml': 'Malayalam', 'mr': 'Marathi',\n 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',\n 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',\n 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',\n 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',\n 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',\n 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',\n 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',\n 'cy': 'Welsh', 'zh-cn': 'Chinese (Mandarin/China)',\n 'zh-tw': 'Chinese (Mandarin/Taiwan)',\n 'en-us': 'English (US)', 'en-ca': 'English (Canada)',\n 'en-uk': 'English (UK)', 'en-gb': 'English (UK)',\n 'en-au': 'English (Australia)', 'en-gh': 'English (Ghana)',\n 'en-in': 'English (India)', 'en-ie': 'English (Ireland)',\n 'en-nz': 'English (New Zealand)',\n 'en-ng': 'English (Nigeria)',\n 'en-ph': 'English (Philippines)',\n 'en-za': 'English (South Africa)',\n 'en-tz': 'English (Tanzania)', 'fr-ca': 'French (Canada)',\n 'fr-fr': 'French (France)', 'pt-br': 'Portuguese (Brazil)',\n 'pt-pt': 'Portuguese (Portugal)', 'es-es': 'Spanish (Spain)',\n 'es-us': 'Spanish (United States)'\n }\n\n\n_supported_langs = None\n\n\ndef get_supported_langs():\n \"\"\"Get dict of supported languages.\n\n Tries to fetch remote list, if that fails a local cache will be used.\n\n Returns:\n (dict): Lang code to lang name map.\n \"\"\"\n global _supported_langs\n if not _supported_langs:\n try:\n _supported_langs = tts_langs()\n except Exception:\n LOG.warning('Couldn\\'t fetch upto date language codes')\n return _supported_langs or _default_langs\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n @property\n def google_lang(self):\n \"\"\"Property containing a converted language code suitable for gTTS.\"\"\"\n supported_langs = get_supported_langs()\n if not self._google_lang:\n if self.lang.lower() in supported_langs:\n self._google_lang = self.lang.lower()\n elif self.lang[:2].lower() in supported_langs:\n self._google_lang = self.lang[:2]\n return self._google_lang or self.lang.lower()\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Arguments:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.google_lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n lang = self.tts.google_lang\n if lang.lower() not in get_supported_langs():\n raise ValueError(\"Language not supported by gTTS: {}\".format(lang))\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}]}
| 2,894 | 551 |
gh_patches_debug_830
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-4591
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adding to lists broken
Adding an item to a list no longer works as of 12-02-2021.
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...an edition, etc.
2. Do ...add item to list.
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: List link loads list page.
* Expected: Item should be added to list.
### Details
- **Logged in (Y/N)?** Y
- **Browser type/version?** Chrome Version 88.0.4324.150 (Official Build) (x86_64)
- **Operating system?** Mac Big Sur
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
@cclauss
</issue>
<code>
[start of openlibrary/core/helpers.py]
1 """Generic helper functions to use in the templates and the webapp.
2 """
3 import web
4 from datetime import datetime
5 import re
6
7 import six
8 from six.moves.urllib.parse import urlsplit
9
10 if six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2
11 import simplejson as json
12 else:
13 import json
14
15 import babel
16 import babel.core
17 import babel.dates
18 import babel.numbers
19
20 try:
21 import genshi
22 import genshi.filters
23 except ImportError:
24 genshi = None
25
26 try:
27 from bs4 import BeautifulSoup
28 except ImportError:
29 BeautifulSoup = None
30
31 from infogami import config
32
33 # handy utility to parse ISO date strings
34 from infogami.infobase.utils import parse_datetime
35 from infogami.utils.view import safeint
36
37 # TODO: i18n should be moved to core or infogami
38 from openlibrary.i18n import gettext as _ # noqa: F401
39
40 __all__ = [
41 "sanitize",
42 "json_encode",
43 "safesort",
44 "days_since", "datestr", "format_date",
45 "sprintf", "cond", "commify", "truncate", "datetimestr_utc",
46 "urlsafe", "texsafe",
47 "percentage", "affiliate_id", "bookreader_host",
48 "private_collections", "private_collection_in",
49
50 # functions imported from elsewhere
51 "parse_datetime", "safeint"
52 ]
53 __docformat__ = "restructuredtext en"
54
55 def sanitize(html, encoding='utf8'):
56 """Removes unsafe tags and attributes from html and adds
57 ``rel="nofollow"`` attribute to all external links.
58 Using encoding=None if passing unicode strings e.g. for Python 3.
59 encoding="utf8" matches default format for earlier versions of Genshi
60 https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version
61 """
62
63 # Can't sanitize unless genshi module is available
64 if genshi is None:
65 return html
66
67 def get_nofollow(name, event):
68 attrs = event[1][1]
69 href = attrs.get('href', '')
70
71 if href:
72 # add rel=nofollow to all absolute links
73 _, host, _, _, _ = urlsplit(href)
74 if host:
75 return 'nofollow'
76
77 try:
78 html = genshi.HTML(html, encoding=encoding)
79
80 # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:
81 # don't catch Unicode errors so we can tell if we're getting bytes
82 except genshi.ParseError:
83 if BeautifulSoup:
84 # Bad html. Tidy it up using BeautifulSoup
85 html = str(BeautifulSoup(html, "lxml"))
86 try:
87 html = genshi.HTML(html)
88 except Exception:
89 # Failed to sanitize.
90 # We can't do any better than returning the original HTML, without sanitizing.
91 return html
92 else:
93 raise
94
95 stream = html \
96 | genshi.filters.HTMLSanitizer() \
97 | genshi.filters.Transformer("//a").attr("rel", get_nofollow)
98 return stream.render()
99
100
101 def json_encode(d, **kw):
102 """Same as json.dumps.
103 """
104 return json.dumps(d or {}, **kw)
105
106
107 def safesort(iterable, key=None, reverse=False):
108 """Sorts heterogeneous of objects without raising errors.
109
110 Sorting heterogeneous objects sometimes causes error. For example,
111 datetime and Nones don't go well together. This function takes special
112 care to make that work.
113 """
114 key = key or (lambda x: x)
115 def safekey(x):
116 k = key(x)
117 return (k.__class__.__name__, k)
118 return sorted(iterable, key=safekey, reverse=reverse)
119
120
121 def days_since(then, now=None):
122 delta = then - (now or datetime.now())
123 return abs(delta.days)
124
125
126 def datestr(then, now=None, lang=None, relative=True):
127 """Internationalized version of web.datestr."""
128 lang = lang or web.ctx.get('lang') or "en"
129 if relative:
130 if now is None:
131 now = datetime.now()
132 delta = then - now
133 if abs(delta.days) < 4: # Threshold from web.py
134 return babel.dates.format_timedelta(delta,
135 add_direction=True,
136 locale=_get_babel_locale(lang))
137 return format_date(then, lang=lang)
138
139
140 def datetimestr_utc(then):
141 return then.strftime("%Y-%m-%dT%H:%M:%SZ")
142
143 def format_date(date, lang=None):
144 lang = lang or web.ctx.get('lang') or "en"
145 locale = _get_babel_locale(lang)
146 return babel.dates.format_date(date, format="long", locale=locale)
147
148 def _get_babel_locale(lang):
149 try:
150 return babel.Locale(lang)
151 except babel.core.UnknownLocaleError:
152 return babel.Locale("en")
153
154
155 def sprintf(s, *a, **kw):
156 """Handy utility for string replacements.
157
158 >>> sprintf('hello %s', 'python')
159 'hello python'
160 >>> sprintf('hello %(name)s', name='python')
161 'hello python'
162 """
163 args = kw or a
164 if args:
165 return s % args
166 else:
167 return s
168
169
170 def cond(pred, true_value, false_value=""):
171 """Lisp style cond function.
172
173 Hanly to use instead of if-else expression.
174 """
175 if pred:
176 return true_value
177 else:
178 return false_value
179
180
181 def commify(number, lang=None):
182 """localized version of web.commify"""
183 try:
184 lang = lang or web.ctx.get("lang") or "en"
185 return babel.numbers.format_number(int(number), lang)
186 except:
187 return six.text_type(number)
188
189
190 def truncate(text, limit):
191 """Truncate text and add ellipses if it longer than specified limit."""
192 if not text:
193 return ''
194 if len(text) <= limit:
195 return text
196 return text[:limit] + "..."
197
198
199 def urlsafe(path):
200 """Replaces the unsafe chars from path with underscores.
201 """
202 return _get_safepath_re().sub('_', path).strip('_')[:100]
203
204 @web.memoize
205 def _get_safepath_re():
206 """Make regular expression that matches all unsafe chars."""
207 # unsafe chars according to RFC 2396
208 reserved = ";/?:@&=+$,"
209 delims = '<>#%"'
210 unwise = "{}|\\^[]`"
211 space = ' \n\r'
212
213 unsafe = reserved + delims + unwise + space
214 pattern = '[%s]+' % "".join(re.escape(c) for c in unsafe)
215 return re.compile(pattern)
216
217
218 def get_coverstore_url():
219 """Returns the base url of coverstore by looking at the config."""
220 return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')
221
222
223 _texsafe_map = {
224 '"': r'\textquotedbl{}',
225 '#': r'\#',
226 '$': r'\$',
227 '%': r'\%',
228 '&': r'\&',
229 '<': r'\textless{}',
230 '>': r'\textgreater{}',
231 '\\': r'\textbackslash{}',
232 '^': r'\^{}',
233 '_': r'\_{}',
234 '{': r'\{',
235 '}': r'\}',
236 '|': r'\textbar{}',
237 '~': r'\~{}',
238 }
239
240 _texsafe_re = None
241
242 def texsafe(text):
243 """Escapes the special characters in the given text for using it in tex type setting.
244
245 Tex (or Latex) uses some characters in the ascii character range for
246 special notations. These characters must be escaped when occur in the
247 regular text. This function escapes those special characters.
248
249 The list of special characters and the latex command to typeset them can
250 be found in `The Comprehensive LaTeX Symbol List`_.
251
252 .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf
253 """
254 global _texsafe_re
255 if _texsafe_re is None:
256 pattern = "[%s]" % re.escape("".join(list(_texsafe_map)))
257 _texsafe_re = re.compile(pattern)
258
259 return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)
260
261 def percentage(value, total):
262 """Computes percentage.
263
264 >>> percentage(1, 10)
265 10.0
266 >>> percentage(0, 0)
267 0.0
268 """
269 return (value * 100.0) / total if total else 0.0
270
271 def uniq(values, key=None):
272 """Returns the unique entries from the given values in the original order.
273
274 The value of the optional `key` parameter should be a function that takes
275 a single argument and returns a key to test the uniqueness.
276 """
277 key = key or (lambda x: x)
278 s = set()
279 result = []
280 for v in values:
281 k = key(v)
282 if k not in s:
283 s.add(k)
284 result.append(v)
285 return result
286
287 def affiliate_id(affiliate):
288 return config.get('affiliate_ids', {}).get(affiliate, '')
289
290 def bookreader_host():
291 return config.get('bookreader_host', '')
292
293 def private_collections():
294 """Collections which are lendable but should not be linked from OL
295 TODO: Remove when we can handle institutional books"""
296 return ['georgetown-university-law-library-rr']
297
298 def private_collection_in(collections):
299 return any(x in private_collections() for x in collections)
300
301 def _get_helpers():
302 _globals = globals()
303 return web.storage((k, _globals[k]) for k in __all__)
304
305
306 ## This must be at the end of this module
307 helpers = _get_helpers()
308
[end of openlibrary/core/helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py
--- a/openlibrary/core/helpers.py
+++ b/openlibrary/core/helpers.py
@@ -101,7 +101,7 @@
def json_encode(d, **kw):
"""Same as json.dumps.
"""
- return json.dumps(d or {}, **kw)
+ return json.dumps(d, **kw)
def safesort(iterable, key=None, reverse=False):
|
{"golden_diff": "diff --git a/openlibrary/core/helpers.py b/openlibrary/core/helpers.py\n--- a/openlibrary/core/helpers.py\n+++ b/openlibrary/core/helpers.py\n@@ -101,7 +101,7 @@\n def json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n- return json.dumps(d or {}, **kw)\n+ return json.dumps(d, **kw)\n \n \n def safesort(iterable, key=None, reverse=False):\n", "issue": "Adding to lists broken\nAdding an item to a list no longer works as of 12-02-2021.\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...an edition, etc.\r\n2. Do ...add item to list.\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: List link loads list page.\r\n* Expected: Item should be added to list.\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?** Y\r\n- **Browser type/version?** Chrome Version 88.0.4324.150 (Official Build) (x86_64)\r\n- **Operating system?** Mac Big Sur\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n@cclauss \n", "before_files": [{"content": "\"\"\"Generic helper functions to use in the templates and the webapp.\n\"\"\"\nimport web\nfrom datetime import datetime\nimport re\n\nimport six\nfrom six.moves.urllib.parse import urlsplit\n\nif six.PY2: # See #4525 json.dump(indent) MUST be an int on PY2\n import simplejson as json\nelse:\n import json\n\nimport babel\nimport babel.core\nimport babel.dates\nimport babel.numbers\n\ntry:\n import genshi\n import genshi.filters\nexcept ImportError:\n genshi = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\nfrom infogami import config\n\n# handy utility to parse ISO date strings\nfrom infogami.infobase.utils import parse_datetime\nfrom infogami.utils.view import safeint\n\n# TODO: i18n should be moved to core or infogami\nfrom openlibrary.i18n import gettext as _ # noqa: F401\n\n__all__ = [\n \"sanitize\",\n \"json_encode\",\n \"safesort\",\n \"days_since\", \"datestr\", \"format_date\",\n \"sprintf\", \"cond\", \"commify\", \"truncate\", \"datetimestr_utc\",\n \"urlsafe\", \"texsafe\",\n \"percentage\", \"affiliate_id\", \"bookreader_host\",\n \"private_collections\", \"private_collection_in\",\n\n # functions imported from elsewhere\n \"parse_datetime\", \"safeint\"\n]\n__docformat__ = \"restructuredtext en\"\n\ndef sanitize(html, encoding='utf8'):\n \"\"\"Removes unsafe tags and attributes from html and adds\n ``rel=\"nofollow\"`` attribute to all external links.\n Using encoding=None if passing unicode strings e.g. for Python 3.\n encoding=\"utf8\" matches default format for earlier versions of Genshi\n https://genshi.readthedocs.io/en/latest/upgrade/#upgrading-from-genshi-0-6-x-to-the-development-version\n \"\"\"\n\n # Can't sanitize unless genshi module is available\n if genshi is None:\n return html\n\n def get_nofollow(name, event):\n attrs = event[1][1]\n href = attrs.get('href', '')\n\n if href:\n # add rel=nofollow to all absolute links\n _, host, _, _, _ = urlsplit(href)\n if host:\n return 'nofollow'\n\n try:\n html = genshi.HTML(html, encoding=encoding)\n\n # except (genshi.ParseError, UnicodeDecodeError, UnicodeError) as e:\n # don't catch Unicode errors so we can tell if we're getting bytes\n except genshi.ParseError:\n if BeautifulSoup:\n # Bad html. Tidy it up using BeautifulSoup\n html = str(BeautifulSoup(html, \"lxml\"))\n try:\n html = genshi.HTML(html)\n except Exception:\n # Failed to sanitize.\n # We can't do any better than returning the original HTML, without sanitizing.\n return html\n else:\n raise\n\n stream = html \\\n | genshi.filters.HTMLSanitizer() \\\n | genshi.filters.Transformer(\"//a\").attr(\"rel\", get_nofollow)\n return stream.render()\n\n\ndef json_encode(d, **kw):\n \"\"\"Same as json.dumps.\n \"\"\"\n return json.dumps(d or {}, **kw)\n\n\ndef safesort(iterable, key=None, reverse=False):\n \"\"\"Sorts heterogeneous of objects without raising errors.\n\n Sorting heterogeneous objects sometimes causes error. For example,\n datetime and Nones don't go well together. This function takes special\n care to make that work.\n \"\"\"\n key = key or (lambda x: x)\n def safekey(x):\n k = key(x)\n return (k.__class__.__name__, k)\n return sorted(iterable, key=safekey, reverse=reverse)\n\n\ndef days_since(then, now=None):\n delta = then - (now or datetime.now())\n return abs(delta.days)\n\n\ndef datestr(then, now=None, lang=None, relative=True):\n \"\"\"Internationalized version of web.datestr.\"\"\"\n lang = lang or web.ctx.get('lang') or \"en\"\n if relative:\n if now is None:\n now = datetime.now()\n delta = then - now\n if abs(delta.days) < 4: # Threshold from web.py\n return babel.dates.format_timedelta(delta,\n add_direction=True,\n locale=_get_babel_locale(lang))\n return format_date(then, lang=lang)\n\n\ndef datetimestr_utc(then):\n return then.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\ndef format_date(date, lang=None):\n lang = lang or web.ctx.get('lang') or \"en\"\n locale = _get_babel_locale(lang)\n return babel.dates.format_date(date, format=\"long\", locale=locale)\n\ndef _get_babel_locale(lang):\n try:\n return babel.Locale(lang)\n except babel.core.UnknownLocaleError:\n return babel.Locale(\"en\")\n\n\ndef sprintf(s, *a, **kw):\n \"\"\"Handy utility for string replacements.\n\n >>> sprintf('hello %s', 'python')\n 'hello python'\n >>> sprintf('hello %(name)s', name='python')\n 'hello python'\n \"\"\"\n args = kw or a\n if args:\n return s % args\n else:\n return s\n\n\ndef cond(pred, true_value, false_value=\"\"):\n \"\"\"Lisp style cond function.\n\n Hanly to use instead of if-else expression.\n \"\"\"\n if pred:\n return true_value\n else:\n return false_value\n\n\ndef commify(number, lang=None):\n \"\"\"localized version of web.commify\"\"\"\n try:\n lang = lang or web.ctx.get(\"lang\") or \"en\"\n return babel.numbers.format_number(int(number), lang)\n except:\n return six.text_type(number)\n\n\ndef truncate(text, limit):\n \"\"\"Truncate text and add ellipses if it longer than specified limit.\"\"\"\n if not text:\n return ''\n if len(text) <= limit:\n return text\n return text[:limit] + \"...\"\n\n\ndef urlsafe(path):\n \"\"\"Replaces the unsafe chars from path with underscores.\n \"\"\"\n return _get_safepath_re().sub('_', path).strip('_')[:100]\n\[email protected]\ndef _get_safepath_re():\n \"\"\"Make regular expression that matches all unsafe chars.\"\"\"\n # unsafe chars according to RFC 2396\n reserved = \";/?:@&=+$,\"\n delims = '<>#%\"'\n unwise = \"{}|\\\\^[]`\"\n space = ' \\n\\r'\n\n unsafe = reserved + delims + unwise + space\n pattern = '[%s]+' % \"\".join(re.escape(c) for c in unsafe)\n return re.compile(pattern)\n\n\ndef get_coverstore_url():\n \"\"\"Returns the base url of coverstore by looking at the config.\"\"\"\n return config.get('coverstore_url', 'https://covers.openlibrary.org').rstrip('/')\n\n\n_texsafe_map = {\n '\"': r'\\textquotedbl{}',\n '#': r'\\#',\n '$': r'\\$',\n '%': r'\\%',\n '&': r'\\&',\n '<': r'\\textless{}',\n '>': r'\\textgreater{}',\n '\\\\': r'\\textbackslash{}',\n '^': r'\\^{}',\n '_': r'\\_{}',\n '{': r'\\{',\n '}': r'\\}',\n '|': r'\\textbar{}',\n '~': r'\\~{}',\n}\n\n_texsafe_re = None\n\ndef texsafe(text):\n \"\"\"Escapes the special characters in the given text for using it in tex type setting.\n\n Tex (or Latex) uses some characters in the ascii character range for\n special notations. These characters must be escaped when occur in the\n regular text. This function escapes those special characters.\n\n The list of special characters and the latex command to typeset them can\n be found in `The Comprehensive LaTeX Symbol List`_.\n\n .. _The Comprehensive LaTeX Symbol List: http://www.ctan.org/tex-archive/info/symbols/comprehensive/symbols-a4.pdf\n \"\"\"\n global _texsafe_re\n if _texsafe_re is None:\n pattern = \"[%s]\" % re.escape(\"\".join(list(_texsafe_map)))\n _texsafe_re = re.compile(pattern)\n\n return _texsafe_re.sub(lambda m: _texsafe_map[m.group(0)], text)\n\ndef percentage(value, total):\n \"\"\"Computes percentage.\n\n >>> percentage(1, 10)\n 10.0\n >>> percentage(0, 0)\n 0.0\n \"\"\"\n return (value * 100.0) / total if total else 0.0\n\ndef uniq(values, key=None):\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\ndef affiliate_id(affiliate):\n return config.get('affiliate_ids', {}).get(affiliate, '')\n\ndef bookreader_host():\n return config.get('bookreader_host', '')\n\ndef private_collections():\n \"\"\"Collections which are lendable but should not be linked from OL\n TODO: Remove when we can handle institutional books\"\"\"\n return ['georgetown-university-law-library-rr']\n\ndef private_collection_in(collections):\n return any(x in private_collections() for x in collections)\n\ndef _get_helpers():\n _globals = globals()\n return web.storage((k, _globals[k]) for k in __all__)\n\n\n## This must be at the end of this module\nhelpers = _get_helpers()\n", "path": "openlibrary/core/helpers.py"}]}
| 3,837 | 98 |
gh_patches_debug_8173
|
rasdani/github-patches
|
git_diff
|
google__personfinder-407
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
sms_number_to_repo should have default value
Should be initialized here:
https://github.com/google/personfinder/blob/546f238fab407145292cc81c5e5682ad952f92f6/app/setup_pf.py#L62
Otherwise it shows error on save of global admin page unless the user fills it manually.
</issue>
<code>
[start of app/setup_pf.py]
1 # Copyright 2009-2010 by Ka-Ping Yee
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from datetime import datetime
16
17 import const
18 from model import *
19 from utils import *
20
21 def setup_datastore():
22 """Sets up the subject types and translations in a datastore. (Existing
23 subject types and messages will be updated; existing Subject or Report
24 information will not be changed or deleted.)"""
25 setup_repos()
26 setup_configs()
27
28 def wipe_datastore(delete=None, keep=None):
29 """Deletes everything in the datastore. If 'delete' is given (a list of
30 kind names), deletes only those kinds of entities. If 'keep' is given,
31 skips deleting those kinds of entities."""
32 query = db.Query(keys_only=True)
33 keys = query.fetch(1000)
34 while keys:
35 db.delete([key for key in keys
36 if delete is None or key.kind() in delete
37 if keep is None or key.kind() not in keep])
38 keys = query.with_cursor(query.cursor()).fetch(1000)
39
40 def reset_datastore():
41 """Wipes everything in the datastore except Accounts,
42 then sets up the datastore for new data."""
43 wipe_datastore(keep=['Account'])
44 setup_datastore()
45
46 def setup_repos():
47 db.put([Repo(key_name='haiti'),
48 Repo(key_name='japan'),
49 Repo(key_name='pakistan')])
50 # Set some repositories active so they show on the main page.
51 config.set_for_repo('japan', launched=True)
52 config.set_for_repo('haiti', launched=True)
53
54 def setup_configs():
55 """Installs configuration settings used for testing by server_tests."""
56 COMMON_KEYWORDS = ['person', 'people', 'finder', 'person finder',
57 'people finder', 'crisis', 'survivor', 'family']
58
59 # NOTE: the following two CAPTCHA keys are dummy keys for testing only.
60 # (https://developers.google.com/recaptcha/docs/faq)
61 # They should be replaced with real keys upon launch.
62 config.set(captcha_site_key='6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI',
63 captcha_secret_key='6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe',
64 # A Google Translate API key with a very low quota, just for testing.
65 translate_api_key='AIzaSyCXdz9x7LDL3BvieEP8Wcze64CC_iqslSE',
66 repo_aliases={},
67 referrer_whitelist=[],
68 initialized=True,
69 notification_email=const.DEFAULT_NOTIFICATION_EMAIL,
70 unreviewed_notes_threshold=(
71 const.DEFAULT_UNREVIEWED_NOTES_THRESHOLD),
72 )
73
74 config.set_for_repo(
75 'haiti',
76 # Appended to "Google Person Finder" in page titles.
77 repo_titles={
78 'en': 'Haiti Earthquake',
79 'fr': u'S\xe9isme en Ha\xefti',
80 'ht': u'Tranbleman T\xe8 an Ayiti',
81 'es': u'Terremoto en Hait\xed'
82 },
83 # List of language codes that appear in the language menu.
84 language_menu_options=['en', 'ht', 'fr', 'es'],
85 # Content for the <meta name="keywords"> tag.
86 keywords=', '.join([
87 'haiti', 'earthquake', 'haiti earthquake', 'haitian',
88 u'ha\xefti', u's\xe9isme', 'tremblement', 'tremblement de terre',
89 'famille', 'recherche de personnes', 'terremoto'
90 ] + COMMON_KEYWORDS),
91 # If false, hide the family_name field and use only given_name.
92 use_family_name=True,
93 # Presentation order for the given name and family name.
94 family_name_first=False,
95 # If true, show extra fields for alternate names.
96 use_alternate_names=True,
97 # If false, hide the home_zip field.
98 use_postal_code=True,
99 # Require at least this many letters in each word of a text query.
100 min_query_word_length=2,
101 # Show input fields for profile URLs in create page.
102 show_profile_entry=True,
103 # Default list of profile websites to show in create page.
104 profile_websites=const.DEFAULT_PROFILE_WEBSITES,
105 # Default map viewport for the location field in the note form.
106 map_default_zoom=7,
107 map_default_center=[18.968637, -72.284546],
108 map_size_pixels=[400, 280],
109 # If true, the feeds and read API require an authorization key.
110 read_auth_key_required=False,
111 # If true, the search API requires an authorization key.
112 search_auth_key_required=False,
113 # If true, show "believed dead" option in the note status dropdown
114 allow_believed_dead_via_ui=True,
115 # Custom html messages to show on main page, results page, view page,
116 # and query form, keyed by language codes.
117 start_page_custom_htmls={'en': '', 'fr': ''},
118 results_page_custom_htmls={'en': '', 'fr': ''},
119 view_page_custom_htmls={'en': '', 'fr': ''},
120 seek_query_form_custom_htmls={'en': '', 'fr': ''},
121 time_zone_offset=0,
122 time_zone_abbreviation='UTC',
123 published_date=get_timestamp(datetime(2010, 1, 12)),
124 updated_date=get_timestamp(datetime(2010, 1, 12)),
125 )
126
127 config.set_for_repo(
128 'japan',
129 language_menu_options=['ja', 'en', 'ko', 'zh-CN', 'zh-TW', 'pt-BR', 'es'],
130 repo_titles={
131 'en': '2011 Japan Earthquake',
132 'zh-TW': u'2011 \u65e5\u672c\u5730\u9707',
133 'zh-CN': u'2011 \u65e5\u672c\u5730\u9707',
134 'pt-BR': u'2011 Terremoto no Jap\xe3o',
135 'ja': u'2011 \u65e5\u672c\u5730\u9707',
136 'es': u'2011 Terremoto en Jap\xf3n'
137 },
138 keywords=', '.join(COMMON_KEYWORDS),
139 use_family_name=True,
140 family_name_first=True,
141 use_alternate_names=True,
142 use_postal_code=True,
143 min_query_word_length=1,
144 show_profile_entry=True,
145 profile_websites=const.DEFAULT_PROFILE_WEBSITES,
146 map_default_zoom=7,
147 map_default_center=[38, 140.7],
148 map_size_pixels=[400, 400],
149 search_auth_key_required=True,
150 read_auth_key_required=True,
151 allow_believed_dead_via_ui=True,
152 start_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
153 results_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
154 view_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},
155 seek_query_form_custom_htmls={'en': '', 'fr': ''},
156 # NOTE(kpy): These two configuration settings only work for locations
157 # with a single, fixed time zone offset and no Daylight Saving Time.
158 time_zone_offset=9, # UTC+9
159 time_zone_abbreviation='JST',
160 jp_mobile_carrier_redirect=True,
161 published_date=get_timestamp(datetime(2011, 3, 11)),
162 updated_date=get_timestamp(datetime(2011, 3, 11)),
163 )
164
165 config.set_for_repo(
166 'pakistan',
167 repo_titles={
168 'en': 'Pakistan Floods',
169 'ur': u'\u067e\u0627\u06a9\u0633\u062a\u0627\u0646\u06cc \u0633\u06cc\u0644\u0627\u0628'
170 },
171 language_menu_options=['en', 'ur'],
172 keywords=', '.join([
173 'pakistan', 'flood', 'pakistan flood', 'pakistani'
174 ] + COMMON_KEYWORDS),
175 use_family_name=False,
176 family_name_first=False,
177 use_alternate_names=False,
178 use_postal_code=False,
179 min_query_word_length=1,
180 map_default_zoom=6,
181 map_default_center=[33.36, 73.26], # near Rawalpindi, Pakistan
182 map_size_pixels=[400, 500],
183 read_auth_key_required=False,
184 search_auth_key_required=False,
185 allow_believed_dead_via_ui=True,
186 start_page_custom_htmls={'en': '', 'fr': ''},
187 results_page_custom_htmls={'en': '', 'fr': ''},
188 view_page_custom_htmls={'en': '', 'fr': ''},
189 seek_query_form_custom_htmls={'en': '', 'fr': ''},
190 time_zone_offset=0,
191 time_zone_abbreviation='UTC',
192 published_date=get_timestamp(datetime(2010, 8, 6)),
193 updated_date=get_timestamp(datetime(2010, 8, 6)),
194 )
195
196
197 def setup_lang_test_config():
198 config.set_for_repo(
199 'lang-test',
200 # We set short titles to avoid exceeding the field's 500-char limit.
201 repo_titles=dict((lang, lang) for lang in const.LANGUAGE_ENDONYMS),
202 language_menu_options=list(const.LANGUAGE_ENDONYMS.keys()),
203 keywords=', '.join(COMMON_KEYWORDS),
204 use_family_name=True,
205 family_name_first=True,
206 use_alternate_names=True,
207 use_postal_code=True,
208 min_query_word_length=1,
209 map_default_zoom=6,
210 map_default_center=[0 ,0],
211 map_size_pixels=[400, 500],
212 read_auth_key_required=False,
213 search_auth_key_required=False,
214 allow_believed_dead_via_ui=True,
215 start_page_custom_htmls={'en': '', 'fr': ''},
216 results_page_custom_htmls={'en': '', 'fr': ''},
217 view_page_custom_htmls={'en': '', 'fr': ''},
218 seek_query_form_custom_htmls={'en': '', 'fr': ''},
219 )
[end of app/setup_pf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/setup_pf.py b/app/setup_pf.py
--- a/app/setup_pf.py
+++ b/app/setup_pf.py
@@ -64,6 +64,7 @@
# A Google Translate API key with a very low quota, just for testing.
translate_api_key='AIzaSyCXdz9x7LDL3BvieEP8Wcze64CC_iqslSE',
repo_aliases={},
+ sms_number_to_repo={},
referrer_whitelist=[],
initialized=True,
notification_email=const.DEFAULT_NOTIFICATION_EMAIL,
|
{"golden_diff": "diff --git a/app/setup_pf.py b/app/setup_pf.py\n--- a/app/setup_pf.py\n+++ b/app/setup_pf.py\n@@ -64,6 +64,7 @@\n # A Google Translate API key with a very low quota, just for testing.\n translate_api_key='AIzaSyCXdz9x7LDL3BvieEP8Wcze64CC_iqslSE',\n repo_aliases={},\n+ sms_number_to_repo={},\n referrer_whitelist=[],\n initialized=True,\n notification_email=const.DEFAULT_NOTIFICATION_EMAIL,\n", "issue": "sms_number_to_repo should have default value\nShould be initialized here:\r\nhttps://github.com/google/personfinder/blob/546f238fab407145292cc81c5e5682ad952f92f6/app/setup_pf.py#L62\r\n\r\nOtherwise it shows error on save of global admin page unless the user fills it manually.\n", "before_files": [{"content": "# Copyright 2009-2010 by Ka-Ping Yee\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom datetime import datetime\n\nimport const\nfrom model import *\nfrom utils import *\n\ndef setup_datastore():\n \"\"\"Sets up the subject types and translations in a datastore. (Existing\n subject types and messages will be updated; existing Subject or Report\n information will not be changed or deleted.)\"\"\"\n setup_repos()\n setup_configs()\n\ndef wipe_datastore(delete=None, keep=None):\n \"\"\"Deletes everything in the datastore. If 'delete' is given (a list of\n kind names), deletes only those kinds of entities. If 'keep' is given,\n skips deleting those kinds of entities.\"\"\"\n query = db.Query(keys_only=True)\n keys = query.fetch(1000)\n while keys:\n db.delete([key for key in keys\n if delete is None or key.kind() in delete\n if keep is None or key.kind() not in keep])\n keys = query.with_cursor(query.cursor()).fetch(1000)\n\ndef reset_datastore():\n \"\"\"Wipes everything in the datastore except Accounts,\n then sets up the datastore for new data.\"\"\"\n wipe_datastore(keep=['Account'])\n setup_datastore()\n\ndef setup_repos():\n db.put([Repo(key_name='haiti'),\n Repo(key_name='japan'),\n Repo(key_name='pakistan')])\n # Set some repositories active so they show on the main page.\n config.set_for_repo('japan', launched=True)\n config.set_for_repo('haiti', launched=True)\n\ndef setup_configs():\n \"\"\"Installs configuration settings used for testing by server_tests.\"\"\"\n COMMON_KEYWORDS = ['person', 'people', 'finder', 'person finder',\n 'people finder', 'crisis', 'survivor', 'family']\n\n # NOTE: the following two CAPTCHA keys are dummy keys for testing only.\n # (https://developers.google.com/recaptcha/docs/faq)\n # They should be replaced with real keys upon launch.\n config.set(captcha_site_key='6LeIxAcTAAAAAJcZVRqyHh71UMIEGNQ_MXjiZKhI',\n captcha_secret_key='6LeIxAcTAAAAAGG-vFI1TnRWxMZNFuojJ4WifJWe',\n # A Google Translate API key with a very low quota, just for testing.\n translate_api_key='AIzaSyCXdz9x7LDL3BvieEP8Wcze64CC_iqslSE',\n repo_aliases={},\n referrer_whitelist=[],\n initialized=True,\n notification_email=const.DEFAULT_NOTIFICATION_EMAIL,\n unreviewed_notes_threshold=(\n const.DEFAULT_UNREVIEWED_NOTES_THRESHOLD),\n )\n\n config.set_for_repo(\n 'haiti',\n # Appended to \"Google Person Finder\" in page titles.\n repo_titles={\n 'en': 'Haiti Earthquake',\n 'fr': u'S\\xe9isme en Ha\\xefti',\n 'ht': u'Tranbleman T\\xe8 an Ayiti',\n 'es': u'Terremoto en Hait\\xed'\n },\n # List of language codes that appear in the language menu.\n language_menu_options=['en', 'ht', 'fr', 'es'],\n # Content for the <meta name=\"keywords\"> tag.\n keywords=', '.join([\n 'haiti', 'earthquake', 'haiti earthquake', 'haitian',\n u'ha\\xefti', u's\\xe9isme', 'tremblement', 'tremblement de terre',\n 'famille', 'recherche de personnes', 'terremoto'\n ] + COMMON_KEYWORDS),\n # If false, hide the family_name field and use only given_name.\n use_family_name=True,\n # Presentation order for the given name and family name.\n family_name_first=False,\n # If true, show extra fields for alternate names.\n use_alternate_names=True,\n # If false, hide the home_zip field.\n use_postal_code=True,\n # Require at least this many letters in each word of a text query.\n min_query_word_length=2,\n # Show input fields for profile URLs in create page.\n show_profile_entry=True,\n # Default list of profile websites to show in create page.\n profile_websites=const.DEFAULT_PROFILE_WEBSITES,\n # Default map viewport for the location field in the note form.\n map_default_zoom=7,\n map_default_center=[18.968637, -72.284546],\n map_size_pixels=[400, 280],\n # If true, the feeds and read API require an authorization key.\n read_auth_key_required=False,\n # If true, the search API requires an authorization key.\n search_auth_key_required=False,\n # If true, show \"believed dead\" option in the note status dropdown\n allow_believed_dead_via_ui=True,\n # Custom html messages to show on main page, results page, view page,\n # and query form, keyed by language codes.\n start_page_custom_htmls={'en': '', 'fr': ''},\n results_page_custom_htmls={'en': '', 'fr': ''},\n view_page_custom_htmls={'en': '', 'fr': ''},\n seek_query_form_custom_htmls={'en': '', 'fr': ''},\n time_zone_offset=0,\n time_zone_abbreviation='UTC',\n published_date=get_timestamp(datetime(2010, 1, 12)),\n updated_date=get_timestamp(datetime(2010, 1, 12)),\n )\n\n config.set_for_repo(\n 'japan',\n language_menu_options=['ja', 'en', 'ko', 'zh-CN', 'zh-TW', 'pt-BR', 'es'],\n repo_titles={\n 'en': '2011 Japan Earthquake',\n 'zh-TW': u'2011 \\u65e5\\u672c\\u5730\\u9707',\n 'zh-CN': u'2011 \\u65e5\\u672c\\u5730\\u9707',\n 'pt-BR': u'2011 Terremoto no Jap\\xe3o',\n 'ja': u'2011 \\u65e5\\u672c\\u5730\\u9707',\n 'es': u'2011 Terremoto en Jap\\xf3n'\n },\n keywords=', '.join(COMMON_KEYWORDS),\n use_family_name=True,\n family_name_first=True,\n use_alternate_names=True,\n use_postal_code=True,\n min_query_word_length=1,\n show_profile_entry=True,\n profile_websites=const.DEFAULT_PROFILE_WEBSITES,\n map_default_zoom=7,\n map_default_center=[38, 140.7],\n map_size_pixels=[400, 400],\n search_auth_key_required=True,\n read_auth_key_required=True,\n allow_believed_dead_via_ui=True,\n start_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},\n results_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},\n view_page_custom_htmls={'en': 'Custom message', 'fr': 'French'},\n seek_query_form_custom_htmls={'en': '', 'fr': ''},\n # NOTE(kpy): These two configuration settings only work for locations\n # with a single, fixed time zone offset and no Daylight Saving Time.\n time_zone_offset=9, # UTC+9\n time_zone_abbreviation='JST',\n jp_mobile_carrier_redirect=True,\n published_date=get_timestamp(datetime(2011, 3, 11)),\n updated_date=get_timestamp(datetime(2011, 3, 11)),\n )\n\n config.set_for_repo(\n 'pakistan',\n repo_titles={\n 'en': 'Pakistan Floods',\n 'ur': u'\\u067e\\u0627\\u06a9\\u0633\\u062a\\u0627\\u0646\\u06cc \\u0633\\u06cc\\u0644\\u0627\\u0628'\n },\n language_menu_options=['en', 'ur'],\n keywords=', '.join([\n 'pakistan', 'flood', 'pakistan flood', 'pakistani'\n ] + COMMON_KEYWORDS),\n use_family_name=False,\n family_name_first=False,\n use_alternate_names=False,\n use_postal_code=False,\n min_query_word_length=1,\n map_default_zoom=6,\n map_default_center=[33.36, 73.26], # near Rawalpindi, Pakistan\n map_size_pixels=[400, 500],\n read_auth_key_required=False,\n search_auth_key_required=False,\n allow_believed_dead_via_ui=True,\n start_page_custom_htmls={'en': '', 'fr': ''},\n results_page_custom_htmls={'en': '', 'fr': ''},\n view_page_custom_htmls={'en': '', 'fr': ''},\n seek_query_form_custom_htmls={'en': '', 'fr': ''},\n time_zone_offset=0,\n time_zone_abbreviation='UTC',\n published_date=get_timestamp(datetime(2010, 8, 6)),\n updated_date=get_timestamp(datetime(2010, 8, 6)),\n )\n\n\ndef setup_lang_test_config():\n config.set_for_repo(\n 'lang-test',\n # We set short titles to avoid exceeding the field's 500-char limit.\n repo_titles=dict((lang, lang) for lang in const.LANGUAGE_ENDONYMS),\n language_menu_options=list(const.LANGUAGE_ENDONYMS.keys()),\n keywords=', '.join(COMMON_KEYWORDS),\n use_family_name=True,\n family_name_first=True,\n use_alternate_names=True,\n use_postal_code=True,\n min_query_word_length=1,\n map_default_zoom=6,\n map_default_center=[0 ,0],\n map_size_pixels=[400, 500],\n read_auth_key_required=False,\n search_auth_key_required=False,\n allow_believed_dead_via_ui=True,\n start_page_custom_htmls={'en': '', 'fr': ''},\n results_page_custom_htmls={'en': '', 'fr': ''},\n view_page_custom_htmls={'en': '', 'fr': ''},\n seek_query_form_custom_htmls={'en': '', 'fr': ''},\n )", "path": "app/setup_pf.py"}]}
| 3,593 | 122 |
gh_patches_debug_6084
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-107
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkov fails to start in Windows environments
**Describe the bug**
After you install Checkov on Windows, running Checkov does nothing.
**To Reproduce**
Steps to reproduce the behavior:
1. Open Powershell/cmd
2. Run cli command 'checkov'
3. Does nothing
**Expected behavior**
The tool running. Magic.
**Screenshots**
I'm not sure showing nothing would help.
**Desktop (please complete the following information):**
- OS: Windows 10
- Checkov Version 1.0.173
**Additional context**
I know Windows! Like who cares and tbh ive got WSL2 and it works a dream but customers, customers and their awful locked down... anyway.
I'm using Python37 where i've installed .
If you look in your c:/Python37/scripts folder there is a "checkov" bash script. This is the nub of it this doesn't run! However if you add a batch file "checkov-scan.bat" [or call whatever} with this content:
```cmd
C:\Python37\python C:\Python37\Lib\site-packages\checkov\main.py %1 %2
```
Then when you run "checkov-scan" at your shell, it works! So is there anyway you could package up something similar in a release? please?
Also I made a python based pre-commit for checkov called checkov-scan - here <https://github.com/JamesWoolfenden/pre-commit>
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "alabaster==0.7.12",
28 "attrs==19.3.0",
29 "babel==2.7.0",
30 "certifi==2019.11.28",
31 "chardet==3.0.4",
32 "coverage==4.5.4",
33 "coverage-badge==1.0.1",
34 "detect-secrets==0.13.0",
35 "docopt==0.6.2",
36 "docutils==0.15.2",
37 "idna==2.8",
38 "imagesize==1.1.0",
39 "importlib-metadata==1.1.0; python_version < '3.8'",
40 "jinja2==2.10.3",
41 "lark-parser==0.7.8",
42 "markupsafe==1.1.1",
43 "more-itertools==8.0.0",
44 "packaging==19.2",
45 "pluggy==0.13.1",
46 "py==1.8.0",
47 "pygments==2.5.2",
48 "pyparsing==2.4.5",
49 "pytest==5.3.1",
50 "python-hcl2==0.2.0",
51 "pytz==2019.3",
52 "pyyaml==5.1.2",
53 "requests==2.22.0",
54 "six==1.13.0",
55 "snowballstemmer==2.0.0",
56 "sphinx==2.2.1",
57 "sphinxcontrib-applehelp==1.0.1",
58 "sphinxcontrib-devhelp==1.0.1",
59 "sphinxcontrib-htmlhelp==1.0.2",
60 "sphinxcontrib-jsmath==1.0.1",
61 "sphinxcontrib-qthelp==1.0.2",
62 "sphinxcontrib-serializinghtml==1.1.3",
63 "urllib3==1.25.7",
64 "wcwidth==0.1.7",
65 "zipp==0.6.0",
66 ]
67 },
68 install_requires=[
69 "chardet==3.0.4",
70 "colorama==0.4.3",
71 "docopt==0.6.2",
72 "idna==2.8",
73 "junit-xml==1.8",
74 "lark-parser==0.7.8",
75 "python-hcl2==0.2.0",
76 "pyyaml==5.2",
77 "requests==2.22.0",
78 "six==1.13.0",
79 "tabulate==0.8.6",
80 "termcolor==1.1.0",
81 "urllib3==1.25.7",
82 "dpath==1.5.0"
83 ],
84 license="Apache License 2.0",
85 name="checkov",
86 version=version,
87 description="Infrastructure as code static analysis",
88 author="bridgecrew",
89 author_email="[email protected]",
90 url="https://github.com/bridgecrewio/checkov",
91 packages=setuptools.find_packages(exclude=["tests*"]),
92 scripts=["bin/checkov"],
93 long_description=long_description,
94 long_description_content_type="text/markdown",
95 classifiers=[
96 'Environment :: Console',
97 'Intended Audience :: Developers',
98 'Intended Audience :: System Administrators',
99 'Programming Language :: Python :: 3.6',
100 'Programming Language :: Python :: 3.7',
101 'Topic :: Security',
102 'Topic :: Software Development :: Build Tools'
103 ]
104 )
105
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -89,7 +89,7 @@
author_email="[email protected]",
url="https://github.com/bridgecrewio/checkov",
packages=setuptools.find_packages(exclude=["tests*"]),
- scripts=["bin/checkov"],
+ scripts=["bin/checkov","bin/checkov.bat"],
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -89,7 +89,7 @@\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n- scripts=[\"bin/checkov\"],\n+ scripts=[\"bin/checkov\",\"bin/checkov.bat\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n", "issue": "Checkov fails to start in Windows environments \n**Describe the bug**\r\nAfter you install Checkov on Windows, running Checkov does nothing.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open Powershell/cmd\r\n2. Run cli command 'checkov'\r\n3. Does nothing\r\n\r\n**Expected behavior**\r\nThe tool running. Magic.\r\n\r\n**Screenshots**\r\nI'm not sure showing nothing would help.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Checkov Version 1.0.173\r\n\r\n**Additional context**\r\nI know Windows! Like who cares and tbh ive got WSL2 and it works a dream but customers, customers and their awful locked down... anyway.\r\nI'm using Python37 where i've installed .\r\nIf you look in your c:/Python37/scripts folder there is a \"checkov\" bash script. This is the nub of it this doesn't run! However if you add a batch file \"checkov-scan.bat\" [or call whatever} with this content:\r\n```cmd\r\nC:\\Python37\\python C:\\Python37\\Lib\\site-packages\\checkov\\main.py %1 %2\r\n```\r\nThen when you run \"checkov-scan\" at your shell, it works! So is there anyway you could package up something similar in a release? please? \r\nAlso I made a python based pre-commit for checkov called checkov-scan - here <https://github.com/JamesWoolfenden/pre-commit>\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"detect-secrets==0.13.0\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"python-hcl2==0.2.0\",\n \"pytz==2019.3\",\n \"pyyaml==5.1.2\",\n \"requests==2.22.0\",\n \"six==1.13.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.7\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n ]\n },\n install_requires=[\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"python-hcl2==0.2.0\",\n \"pyyaml==5.2\",\n \"requests==2.22.0\",\n \"six==1.13.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.7\",\n \"dpath==1.5.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n scripts=[\"bin/checkov\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]}
| 2,029 | 109 |
gh_patches_debug_37157
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-230
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Listener waits too long and too often for more sound
Often when saying short queries in which the listener hasn't detected enough total noise it sits there waiting for more when in fact you are actually done speaking. To resolve this we should decrease the minimum seconds of noise and put a limit to how long it will wait (perhaps 3-4 seconds).
</issue>
<code>
[start of mycroft/client/speech/mic.py]
1 # Copyright 2016 Mycroft AI, Inc.
2 #
3 # This file is part of Mycroft Core.
4 #
5 # Mycroft Core is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Mycroft Core is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
17
18
19 import collections
20 import audioop
21 from time import sleep
22
23 import pyaudio
24 from speech_recognition import (
25 Microphone,
26 AudioSource,
27 WaitTimeoutError,
28 AudioData
29 )
30 import speech_recognition
31 from mycroft.util.log import getLogger
32
33 logger = getLogger(__name__)
34 __author__ = 'seanfitz'
35
36
37 class MutableStream(object):
38 def __init__(self, wrapped_stream, format, muted=False):
39 assert wrapped_stream is not None
40 self.wrapped_stream = wrapped_stream
41 self.muted = muted
42 self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)
43 self.muted_buffer = b''.join([b'\x00' * self.SAMPLE_WIDTH])
44
45 def mute(self):
46 self.muted = True
47
48 def unmute(self):
49 self.muted = False
50
51 def read(self, size):
52 frames = collections.deque()
53 remaining = size
54 while remaining > 0:
55 to_read = min(self.wrapped_stream.get_read_available(), remaining)
56 if to_read == 0:
57 sleep(.01)
58 continue
59 result = self.wrapped_stream.read(to_read)
60 frames.append(result)
61 remaining -= to_read
62
63 if self.muted:
64 return self.muted_buffer
65 input_latency = self.wrapped_stream.get_input_latency()
66 if input_latency > 0.2:
67 logger.warn("High input latency: %f" % input_latency)
68 audio = b"".join(list(frames))
69 return audio
70
71 def close(self):
72 self.wrapped_stream.close()
73 self.wrapped_stream = None
74
75 def is_stopped(self):
76 return self.wrapped_stream.is_stopped()
77
78 def stop_stream(self):
79 return self.wrapped_stream.stop_stream()
80
81
82 class MutableMicrophone(Microphone):
83 def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024):
84 Microphone.__init__(
85 self, device_index=device_index, sample_rate=sample_rate,
86 chunk_size=chunk_size)
87 self.muted = False
88
89 def __enter__(self):
90 assert self.stream is None, \
91 "This audio source is already inside a context manager"
92 self.audio = pyaudio.PyAudio()
93 self.stream = MutableStream(self.audio.open(
94 input_device_index=self.device_index, channels=1,
95 format=self.format, rate=self.SAMPLE_RATE,
96 frames_per_buffer=self.CHUNK,
97 input=True, # stream is an input stream
98 ), self.format, self.muted)
99 return self
100
101 def __exit__(self, exc_type, exc_value, traceback):
102 if not self.stream.is_stopped():
103 self.stream.stop_stream()
104 self.stream.close()
105 self.stream = None
106 self.audio.terminate()
107
108 def mute(self):
109 self.muted = True
110 if self.stream:
111 self.stream.mute()
112
113 def unmute(self):
114 self.muted = False
115 if self.stream:
116 self.stream.unmute()
117
118
119 class ResponsiveRecognizer(speech_recognition.Recognizer):
120 # The maximum audio in seconds to keep for transcribing a phrase
121 # The wake word must fit in this time
122 SAVED_WW_SEC = 1.0
123
124 # Padding of silence when feeding to pocketsphinx
125 SILENCE_SEC = 0.01
126
127 # The minimum seconds of noise before a
128 # phrase can be considered complete
129 MIN_LOUD_SEC_PER_PHRASE = 0.2
130
131 # The maximum length a phrase can be recorded,
132 # provided there is noise the entire time
133 RECORDING_TIMEOUT = 30.0
134
135 # Time between pocketsphinx checks for the wake word
136 SEC_BETWEEN_WW_CHECKS = 0.2
137
138 def __init__(self, wake_word_recognizer):
139 speech_recognition.Recognizer.__init__(self)
140 self.wake_word_recognizer = wake_word_recognizer
141 self.audio = pyaudio.PyAudio()
142
143 @staticmethod
144 def record_sound_chunk(source):
145 return source.stream.read(source.CHUNK)
146
147 @staticmethod
148 def calc_energy(sound_chunk, sample_width):
149 return audioop.rms(sound_chunk, sample_width)
150
151 def wake_word_in_audio(self, frame_data):
152 hyp = self.wake_word_recognizer.transcribe(frame_data)
153 return self.wake_word_recognizer.found_wake_word(hyp)
154
155 def record_phrase(self, source, sec_per_buffer):
156 """
157 This attempts to record an entire spoken phrase. Essentially,
158 this waits for a period of silence and then returns the audio
159
160 :rtype: bytearray
161 :param source: AudioSource
162 :param sec_per_buffer: Based on source.SAMPLE_RATE
163 :return: bytearray representing the frame_data of the recorded phrase
164 """
165 num_loud_chunks = 0
166 noise = 0
167
168 max_noise = 20
169 min_noise = 0
170
171 def increase_noise(level):
172 if level < max_noise:
173 return level + 2
174 return level
175
176 def decrease_noise(level):
177 if level > min_noise:
178 return level - 1
179 return level
180
181 # Smallest number of loud chunks required to return
182 min_loud_chunks = int(self.MIN_LOUD_SEC_PER_PHRASE / sec_per_buffer)
183
184 # Maximum number of chunks to record before timing out
185 max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)
186 num_chunks = 0
187
188 # bytearray to store audio in
189 byte_data = '\0' * source.SAMPLE_WIDTH
190
191 phrase_complete = False
192 while num_chunks < max_chunks and not phrase_complete:
193 chunk = self.record_sound_chunk(source)
194 byte_data += chunk
195 num_chunks += 1
196
197 energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
198 is_loud = energy > self.energy_threshold
199 if is_loud:
200 noise = increase_noise(noise)
201 num_loud_chunks += 1
202 else:
203 noise = decrease_noise(noise)
204 self.adjust_threshold(energy, sec_per_buffer)
205
206 if noise <= min_noise and num_loud_chunks > min_loud_chunks:
207 phrase_complete = True
208
209 return byte_data
210
211 @staticmethod
212 def sec_to_bytes(sec, source):
213 return sec * source.SAMPLE_RATE * source.SAMPLE_WIDTH
214
215 def wait_until_wake_word(self, source, sec_per_buffer):
216 num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *
217 source.SAMPLE_WIDTH)
218
219 silence = '\0' * num_silent_bytes
220
221 # bytearray to store audio in
222 byte_data = silence
223
224 buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer
225 buffers_since_check = 0.0
226
227 # Max bytes for byte_data before audio is removed from the front
228 max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)
229
230 said_wake_word = False
231 while not said_wake_word:
232 chunk = self.record_sound_chunk(source)
233
234 energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)
235 if energy < self.energy_threshold:
236 self.adjust_threshold(energy, sec_per_buffer)
237
238 needs_to_grow = len(byte_data) < max_size
239 if needs_to_grow:
240 byte_data += chunk
241 else: # Remove beginning of audio and add new chunk to end
242 byte_data = byte_data[len(chunk):] + chunk
243
244 buffers_since_check += 1.0
245 if buffers_since_check < buffers_per_check:
246 buffers_since_check -= buffers_per_check
247 said_wake_word = self.wake_word_in_audio(byte_data + silence)
248
249 @staticmethod
250 def create_audio_data(raw_data, source):
251 """
252 Constructs an AudioData instance with the same parameters
253 as the source and the specified frame_data
254 """
255 return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
256
257 def listen(self, source, emitter):
258 """
259 Listens for audio that Mycroft should respond to
260
261 :param source: an ``AudioSource`` instance for reading from
262 :param emitter: a pyee EventEmitter for sending when the wakeword
263 has been found
264 """
265 assert isinstance(source, AudioSource), "Source must be an AudioSource"
266
267 bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH
268 sec_per_buffer = float(source.CHUNK) / bytes_per_sec
269
270 logger.debug("Waiting for wake word...")
271 self.wait_until_wake_word(source, sec_per_buffer)
272
273 logger.debug("Recording...")
274 emitter.emit("recognizer_loop:record_begin")
275 frame_data = self.record_phrase(source, sec_per_buffer)
276 audio_data = self.create_audio_data(frame_data, source)
277 emitter.emit("recognizer_loop:record_end")
278 logger.debug("Thinking...")
279
280 return audio_data
281
282 def adjust_threshold(self, energy, seconds_per_buffer):
283 if self.dynamic_energy_threshold and energy > 0:
284 # account for different chunk sizes and rates
285 damping = (
286 self.dynamic_energy_adjustment_damping ** seconds_per_buffer)
287 target_energy = energy * self.dynamic_energy_ratio
288 self.energy_threshold = (
289 self.energy_threshold * damping +
290 target_energy * (1 - damping))
291
[end of mycroft/client/speech/mic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mycroft/client/speech/mic.py b/mycroft/client/speech/mic.py
--- a/mycroft/client/speech/mic.py
+++ b/mycroft/client/speech/mic.py
@@ -126,12 +126,16 @@
# The minimum seconds of noise before a
# phrase can be considered complete
- MIN_LOUD_SEC_PER_PHRASE = 0.2
+ MIN_LOUD_SEC_PER_PHRASE = 0.1
# The maximum length a phrase can be recorded,
# provided there is noise the entire time
RECORDING_TIMEOUT = 30.0
+ # The maximum time it will continue to record silence
+ # when not enough noise has been detected
+ RECORDING_TIMEOUT_WITH_SILENCE = 3.0
+
# Time between pocketsphinx checks for the wake word
SEC_BETWEEN_WW_CHECKS = 0.2
@@ -167,17 +171,17 @@
num_loud_chunks = 0
noise = 0
- max_noise = 20
+ max_noise = 25
min_noise = 0
def increase_noise(level):
if level < max_noise:
- return level + 2
+ return level + 200 * sec_per_buffer
return level
def decrease_noise(level):
if level > min_noise:
- return level - 1
+ return level - 100 * sec_per_buffer
return level
# Smallest number of loud chunks required to return
@@ -187,6 +191,10 @@
max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)
num_chunks = 0
+ # Will return if exceeded this even if there's not enough loud chunks
+ max_chunks_of_silence = int(self.RECORDING_TIMEOUT_WITH_SILENCE /
+ sec_per_buffer)
+
# bytearray to store audio in
byte_data = '\0' * source.SAMPLE_WIDTH
@@ -205,7 +213,10 @@
noise = decrease_noise(noise)
self.adjust_threshold(energy, sec_per_buffer)
- if noise <= min_noise and num_loud_chunks > min_loud_chunks:
+ was_loud_enough = num_loud_chunks > min_loud_chunks
+ quiet_enough = noise <= min_noise
+ recorded_too_much_silence = num_chunks > max_chunks_of_silence
+ if quiet_enough and (was_loud_enough or recorded_too_much_silence):
phrase_complete = True
return byte_data
|
{"golden_diff": "diff --git a/mycroft/client/speech/mic.py b/mycroft/client/speech/mic.py\n--- a/mycroft/client/speech/mic.py\n+++ b/mycroft/client/speech/mic.py\n@@ -126,12 +126,16 @@\n \n # The minimum seconds of noise before a\n # phrase can be considered complete\n- MIN_LOUD_SEC_PER_PHRASE = 0.2\n+ MIN_LOUD_SEC_PER_PHRASE = 0.1\n \n # The maximum length a phrase can be recorded,\n # provided there is noise the entire time\n RECORDING_TIMEOUT = 30.0\n \n+ # The maximum time it will continue to record silence\n+ # when not enough noise has been detected\n+ RECORDING_TIMEOUT_WITH_SILENCE = 3.0\n+\n # Time between pocketsphinx checks for the wake word\n SEC_BETWEEN_WW_CHECKS = 0.2\n \n@@ -167,17 +171,17 @@\n num_loud_chunks = 0\n noise = 0\n \n- max_noise = 20\n+ max_noise = 25\n min_noise = 0\n \n def increase_noise(level):\n if level < max_noise:\n- return level + 2\n+ return level + 200 * sec_per_buffer\n return level\n \n def decrease_noise(level):\n if level > min_noise:\n- return level - 1\n+ return level - 100 * sec_per_buffer\n return level\n \n # Smallest number of loud chunks required to return\n@@ -187,6 +191,10 @@\n max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)\n num_chunks = 0\n \n+ # Will return if exceeded this even if there's not enough loud chunks\n+ max_chunks_of_silence = int(self.RECORDING_TIMEOUT_WITH_SILENCE /\n+ sec_per_buffer)\n+\n # bytearray to store audio in\n byte_data = '\\0' * source.SAMPLE_WIDTH\n \n@@ -205,7 +213,10 @@\n noise = decrease_noise(noise)\n self.adjust_threshold(energy, sec_per_buffer)\n \n- if noise <= min_noise and num_loud_chunks > min_loud_chunks:\n+ was_loud_enough = num_loud_chunks > min_loud_chunks\n+ quiet_enough = noise <= min_noise\n+ recorded_too_much_silence = num_chunks > max_chunks_of_silence\n+ if quiet_enough and (was_loud_enough or recorded_too_much_silence):\n phrase_complete = True\n \n return byte_data\n", "issue": "Listener waits too long and too often for more sound\nOften when saying short queries in which the listener hasn't detected enough total noise it sits there waiting for more when in fact you are actually done speaking. To resolve this we should decrease the minimum seconds of noise and put a limit to how long it will wait (perhaps 3-4 seconds).\n\n", "before_files": [{"content": "# Copyright 2016 Mycroft AI, Inc.\n#\n# This file is part of Mycroft Core.\n#\n# Mycroft Core is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Mycroft Core is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.\n\n\nimport collections\nimport audioop\nfrom time import sleep\n\nimport pyaudio\nfrom speech_recognition import (\n Microphone,\n AudioSource,\n WaitTimeoutError,\n AudioData\n)\nimport speech_recognition\nfrom mycroft.util.log import getLogger\n\nlogger = getLogger(__name__)\n__author__ = 'seanfitz'\n\n\nclass MutableStream(object):\n def __init__(self, wrapped_stream, format, muted=False):\n assert wrapped_stream is not None\n self.wrapped_stream = wrapped_stream\n self.muted = muted\n self.SAMPLE_WIDTH = pyaudio.get_sample_size(format)\n self.muted_buffer = b''.join([b'\\x00' * self.SAMPLE_WIDTH])\n\n def mute(self):\n self.muted = True\n\n def unmute(self):\n self.muted = False\n\n def read(self, size):\n frames = collections.deque()\n remaining = size\n while remaining > 0:\n to_read = min(self.wrapped_stream.get_read_available(), remaining)\n if to_read == 0:\n sleep(.01)\n continue\n result = self.wrapped_stream.read(to_read)\n frames.append(result)\n remaining -= to_read\n\n if self.muted:\n return self.muted_buffer\n input_latency = self.wrapped_stream.get_input_latency()\n if input_latency > 0.2:\n logger.warn(\"High input latency: %f\" % input_latency)\n audio = b\"\".join(list(frames))\n return audio\n\n def close(self):\n self.wrapped_stream.close()\n self.wrapped_stream = None\n\n def is_stopped(self):\n return self.wrapped_stream.is_stopped()\n\n def stop_stream(self):\n return self.wrapped_stream.stop_stream()\n\n\nclass MutableMicrophone(Microphone):\n def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024):\n Microphone.__init__(\n self, device_index=device_index, sample_rate=sample_rate,\n chunk_size=chunk_size)\n self.muted = False\n\n def __enter__(self):\n assert self.stream is None, \\\n \"This audio source is already inside a context manager\"\n self.audio = pyaudio.PyAudio()\n self.stream = MutableStream(self.audio.open(\n input_device_index=self.device_index, channels=1,\n format=self.format, rate=self.SAMPLE_RATE,\n frames_per_buffer=self.CHUNK,\n input=True, # stream is an input stream\n ), self.format, self.muted)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self.stream.is_stopped():\n self.stream.stop_stream()\n self.stream.close()\n self.stream = None\n self.audio.terminate()\n\n def mute(self):\n self.muted = True\n if self.stream:\n self.stream.mute()\n\n def unmute(self):\n self.muted = False\n if self.stream:\n self.stream.unmute()\n\n\nclass ResponsiveRecognizer(speech_recognition.Recognizer):\n # The maximum audio in seconds to keep for transcribing a phrase\n # The wake word must fit in this time\n SAVED_WW_SEC = 1.0\n\n # Padding of silence when feeding to pocketsphinx\n SILENCE_SEC = 0.01\n\n # The minimum seconds of noise before a\n # phrase can be considered complete\n MIN_LOUD_SEC_PER_PHRASE = 0.2\n\n # The maximum length a phrase can be recorded,\n # provided there is noise the entire time\n RECORDING_TIMEOUT = 30.0\n\n # Time between pocketsphinx checks for the wake word\n SEC_BETWEEN_WW_CHECKS = 0.2\n\n def __init__(self, wake_word_recognizer):\n speech_recognition.Recognizer.__init__(self)\n self.wake_word_recognizer = wake_word_recognizer\n self.audio = pyaudio.PyAudio()\n\n @staticmethod\n def record_sound_chunk(source):\n return source.stream.read(source.CHUNK)\n\n @staticmethod\n def calc_energy(sound_chunk, sample_width):\n return audioop.rms(sound_chunk, sample_width)\n\n def wake_word_in_audio(self, frame_data):\n hyp = self.wake_word_recognizer.transcribe(frame_data)\n return self.wake_word_recognizer.found_wake_word(hyp)\n\n def record_phrase(self, source, sec_per_buffer):\n \"\"\"\n This attempts to record an entire spoken phrase. Essentially,\n this waits for a period of silence and then returns the audio\n\n :rtype: bytearray\n :param source: AudioSource\n :param sec_per_buffer: Based on source.SAMPLE_RATE\n :return: bytearray representing the frame_data of the recorded phrase\n \"\"\"\n num_loud_chunks = 0\n noise = 0\n\n max_noise = 20\n min_noise = 0\n\n def increase_noise(level):\n if level < max_noise:\n return level + 2\n return level\n\n def decrease_noise(level):\n if level > min_noise:\n return level - 1\n return level\n\n # Smallest number of loud chunks required to return\n min_loud_chunks = int(self.MIN_LOUD_SEC_PER_PHRASE / sec_per_buffer)\n\n # Maximum number of chunks to record before timing out\n max_chunks = int(self.RECORDING_TIMEOUT / sec_per_buffer)\n num_chunks = 0\n\n # bytearray to store audio in\n byte_data = '\\0' * source.SAMPLE_WIDTH\n\n phrase_complete = False\n while num_chunks < max_chunks and not phrase_complete:\n chunk = self.record_sound_chunk(source)\n byte_data += chunk\n num_chunks += 1\n\n energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)\n is_loud = energy > self.energy_threshold\n if is_loud:\n noise = increase_noise(noise)\n num_loud_chunks += 1\n else:\n noise = decrease_noise(noise)\n self.adjust_threshold(energy, sec_per_buffer)\n\n if noise <= min_noise and num_loud_chunks > min_loud_chunks:\n phrase_complete = True\n\n return byte_data\n\n @staticmethod\n def sec_to_bytes(sec, source):\n return sec * source.SAMPLE_RATE * source.SAMPLE_WIDTH\n\n def wait_until_wake_word(self, source, sec_per_buffer):\n num_silent_bytes = int(self.SILENCE_SEC * source.SAMPLE_RATE *\n source.SAMPLE_WIDTH)\n\n silence = '\\0' * num_silent_bytes\n\n # bytearray to store audio in\n byte_data = silence\n\n buffers_per_check = self.SEC_BETWEEN_WW_CHECKS / sec_per_buffer\n buffers_since_check = 0.0\n\n # Max bytes for byte_data before audio is removed from the front\n max_size = self.sec_to_bytes(self.SAVED_WW_SEC, source)\n\n said_wake_word = False\n while not said_wake_word:\n chunk = self.record_sound_chunk(source)\n\n energy = self.calc_energy(chunk, source.SAMPLE_WIDTH)\n if energy < self.energy_threshold:\n self.adjust_threshold(energy, sec_per_buffer)\n\n needs_to_grow = len(byte_data) < max_size\n if needs_to_grow:\n byte_data += chunk\n else: # Remove beginning of audio and add new chunk to end\n byte_data = byte_data[len(chunk):] + chunk\n\n buffers_since_check += 1.0\n if buffers_since_check < buffers_per_check:\n buffers_since_check -= buffers_per_check\n said_wake_word = self.wake_word_in_audio(byte_data + silence)\n\n @staticmethod\n def create_audio_data(raw_data, source):\n \"\"\"\n Constructs an AudioData instance with the same parameters\n as the source and the specified frame_data\n \"\"\"\n return AudioData(raw_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)\n\n def listen(self, source, emitter):\n \"\"\"\n Listens for audio that Mycroft should respond to\n\n :param source: an ``AudioSource`` instance for reading from\n :param emitter: a pyee EventEmitter for sending when the wakeword\n has been found\n \"\"\"\n assert isinstance(source, AudioSource), \"Source must be an AudioSource\"\n\n bytes_per_sec = source.SAMPLE_RATE * source.SAMPLE_WIDTH\n sec_per_buffer = float(source.CHUNK) / bytes_per_sec\n\n logger.debug(\"Waiting for wake word...\")\n self.wait_until_wake_word(source, sec_per_buffer)\n\n logger.debug(\"Recording...\")\n emitter.emit(\"recognizer_loop:record_begin\")\n frame_data = self.record_phrase(source, sec_per_buffer)\n audio_data = self.create_audio_data(frame_data, source)\n emitter.emit(\"recognizer_loop:record_end\")\n logger.debug(\"Thinking...\")\n\n return audio_data\n\n def adjust_threshold(self, energy, seconds_per_buffer):\n if self.dynamic_energy_threshold and energy > 0:\n # account for different chunk sizes and rates\n damping = (\n self.dynamic_energy_adjustment_damping ** seconds_per_buffer)\n target_energy = energy * self.dynamic_energy_ratio\n self.energy_threshold = (\n self.energy_threshold * damping +\n target_energy * (1 - damping))\n", "path": "mycroft/client/speech/mic.py"}]}
| 3,574 | 597 |
gh_patches_debug_4777
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-665
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
500 ошибка в админке
https://www.shopelectro.ru/admin/shopelectro/productpage/?has_category=no
</issue>
<code>
[start of shopelectro/management/commands/_update_catalog/update_products.py]
1 import logging
2 import typing
3 from collections import defaultdict
4 from copy import deepcopy
5 from functools import reduce
6 from itertools import chain
7 from typing import Dict, Iterator, List
8 from xml.etree.ElementTree import Element
9
10 from django.conf import settings
11 from django.contrib.auth.models import User
12 from django.core.mail import send_mail
13 from django.db import transaction
14 from django.db.models import QuerySet
15 from django.template.loader import render_to_string
16
17 from shopelectro.management.commands._update_catalog.utils import (
18 XmlFile, is_correct_uuid, NOT_SAVE_TEMPLATE, UUID, Data, floor
19 )
20 from shopelectro.models import Product, ProductPage, Tag
21
22
23 logger = logging.getLogger(__name__)
24
25
26 def fetch_products(root: Element, config: XmlFile) -> Iterator:
27 product_els = root.findall(config.xpaths['products'])
28 for product_el in product_els:
29 name = product_el.find(config.xpaths['name']).text
30 uuid = product_el.find(config.xpaths['uuid']).text
31 vendor_code = product_el.find(
32 config.xpaths['vendor_code']
33 ).text.lstrip('0')
34 content = product_el.find(config.xpaths['page_content']).text or ''
35
36 tag_value_els = (
37 tag_el.find(config.xpaths['tag_value_uuid'])
38 for tag_el in product_el.findall(config.xpaths['tags'])
39 if tag_el is not None
40 )
41
42 tag_uuids = list(filter(is_correct_uuid, (
43 tag_value.text
44 for tag_value in tag_value_els
45 # should use 'is not None', because __bool__ does not defined
46 if tag_value is not None
47 )))
48
49 tags = Tag.objects.filter(uuid__in=tag_uuids)
50
51 yield uuid, {
52 'name': name,
53 'vendor_code': vendor_code,
54 'page': {
55 'content': content
56 },
57 'tags': tags
58 }
59
60
61 def fetch_prices(root: Element, config) -> typing.Iterator:
62 def get_price_values(prices_el):
63 return list(sorted(
64 float(price_el.find(config.xpaths['price']).text)
65 for price_el in prices_el.findall(config.xpaths['prices'])
66 ))
67
68 def multiply(prices: typing.List[float]):
69 def floor_prices(prices, precision: floor):
70 return [
71 floor(price * multiplier, precision)
72 for price, multiplier in zip(prices, settings.PRICE_MULTIPLIERS)
73 ]
74 *wholesale_prices, retail_price = prices
75 return (
76 floor_prices(wholesale_prices, precision=2) +
77 floor_prices([retail_price], precision=0)
78 )
79
80 product_price_els = root.findall(config.xpaths['product_prices'])
81 for prices_el in product_price_els:
82 product_uuid = prices_el.find(config.xpaths['product_uuid']).text
83 prices = dict(zip(
84 config.extra_options['price_types'],
85 multiply(get_price_values(prices_el))
86 ))
87 yield product_uuid, prices
88
89
90 def fetch_in_stock(root: Element, config: XmlFile) -> Iterator:
91 product_els = root.findall(config.xpaths['products'])
92 for product_el in product_els:
93 uuid = product_el.find(config.xpaths['product_uuid']).text
94 in_stock = product_el.find(config.xpaths['in_stock']).text
95
96 if not (in_stock.isdigit() and int(in_stock) >= 0):
97 in_stock = 0
98
99 yield uuid, {
100 'in_stock': in_stock,
101 }
102
103
104 product_file = XmlFile(
105 fetch_callback=fetch_products,
106 xml_path_pattern='**/webdata/**/goods/**/import*.xml',
107 xpath_queries={
108 'products': './/{}Товары/',
109 'name': '.{}Наименование',
110 'uuid': '.{}Ид',
111 'page_content': '.{}Описание',
112 'tags': '.{}ЗначенияСвойств/',
113 'tag_value_uuid': '.{}Значение',
114 'vendor_code': '.{0}ЗначенияРеквизитов/{0}ЗначениеРеквизита'
115 '[{0}Наименование="Код"]/{0}Значение',
116 },
117 )
118
119 price_file = XmlFile(
120 fetch_callback=fetch_prices,
121 xml_path_pattern='**/webdata/**/goods/**/prices*.xml',
122 xpath_queries={
123 'product_prices': './/{}Предложения/',
124 'product_uuid': '.{}Ид',
125 'prices': '.{}Цены/',
126 'price': '.{}ЦенаЗаЕдиницу',
127 },
128 extra_options={
129 'price_types': [
130 'purchase_price', 'wholesale_large', 'wholesale_medium',
131 'wholesale_small', 'price',
132 ],
133 },
134 )
135
136
137 in_stock_file = XmlFile(
138 fetch_callback=fetch_in_stock,
139 xml_path_pattern='**/webdata/**/goods/**/rests*.xml',
140 xpath_queries={
141 'products': './/{}Предложения/',
142 'product_uuid': '.{}Ид',
143 'in_stock': './/{}Количество',
144 },
145 )
146
147
148 def merge_data(*data) -> Dict[UUID, Data]:
149 """
150 Merge data from xml files with different structure.
151
152 Example: files with product names and prices.
153 """
154 product_data = defaultdict(dict)
155 for key, data in chain.from_iterable(filter(None, data)):
156 product_data[key].update(data)
157
158 return product_data
159
160
161 def clean_data(data: Dict[UUID, Data]):
162 def has_all_prices(_, product_data):
163 price_types = price_file.extra_options['price_types']
164 has = all(
165 product_data.get(price_type)
166 for price_type in price_types
167 )
168 if not has:
169 logger.info(NOT_SAVE_TEMPLATE.format(
170 entity='Product',
171 name=product_data['name'],
172 field='price'
173 ))
174 return has
175
176 def has_vendor_code(_, product_data):
177 has = bool(product_data['vendor_code'])
178
179 if not has:
180 logger.info(NOT_SAVE_TEMPLATE.format(
181 entity='Product',
182 name=product_data['name'],
183 field='vendor_code'
184 ))
185
186 return has
187
188 def has_uuid(uuid, product_data):
189 has = is_correct_uuid(uuid)
190 if not has:
191 logger.info(NOT_SAVE_TEMPLATE.format(
192 entity='Product',
193 name=product_data['name'],
194 field='uuid'
195 ))
196 return has
197
198 def filter_(product_data):
199 return all(
200 f(*product_data)
201 for f in [has_all_prices, has_uuid, has_vendor_code]
202 )
203
204 cleaned_data = dict(
205 product_data
206 for product_data in data.items()
207 if filter_(product_data)
208 )
209
210 return cleaned_data
211
212
213 def report(recipients=None, message=None):
214 message = message or render_to_string('report.html')
215
216 user_query = (
217 User.objects
218 .filter(is_staff=True, is_superuser=False, is_active=True, email__isnull=False)
219 )
220
221 recipient_list = recipients or [user.email for user in user_query]
222
223 if recipient_list:
224 send_mail(
225 subject='Обновления каталога товаров',
226 message=message,
227 from_email=settings.EMAIL_SENDER,
228 recipient_list=recipient_list,
229 html_message=message,
230 )
231
232 logger.info('Sent message to {}'.format(
233 reduce(lambda x, y: '{}, {}'.format(x, y), recipient_list)
234 ))
235
236
237 @transaction.atomic
238 def delete(data: Dict[UUID, Data]):
239 uuids = list(data)
240 pages_to_deactivate = ProductPage.objects.exclude(
241 shopelectro_product__uuid__in=uuids)
242 pages_to_deactivate.update(is_active=False)
243 deactivated_count = pages_to_deactivate.count()
244 logger.info(f'{deactivated_count} products and {deactivated_count} pages were deleted.')
245
246
247 @transaction.atomic
248 def update(data: Dict[UUID, Data]) -> QuerySet:
249 def save(product, field, value):
250 if field == 'name' and getattr(product, field, None):
251 return
252 elif field == 'page':
253 for page_field, page_value in value.items():
254 if not getattr(product.page, page_field, ''):
255 setattr(product.page, page_field, page_value)
256 elif field == 'tags':
257 product.tags = merge(list(product.tags.all()), value)
258 else:
259 setattr(product, field, value)
260
261 def merge(left: List, right: List) -> List:
262 """Merge two arrays with order preserving."""
263 # Dirty patch for preserving tags, appended from admin.
264 # Still waiting 1C throwing out.
265 return left + [e for e in right if e not in left]
266
267 products = Product.objects.filter(uuid__in=data)
268
269 for product in products:
270 product_data = data[str(product.uuid)]
271 for field, value in product_data.items():
272 save(product, field, value)
273 product.save()
274
275 logger.info('{} products were updated.'.format(products.count()))
276 return products
277
278
279 @transaction.atomic
280 def create(data: Dict[UUID, Data], updated_products: QuerySet) -> QuerySet:
281 data = deepcopy(data)
282 uuids_for_create = (
283 set(data) - set(str(product.uuid) for product in updated_products)
284 )
285
286 for uuid in uuids_for_create:
287 product_data = data.get(uuid)
288 tags = product_data.pop('tags', {})
289 page_data = product_data.pop('page', {})
290
291 new_product = Product.objects.create(**product_data, uuid=uuid)
292 new_product.tags.set(tags)
293 for field, value in page_data.items():
294 setattr(new_product.page, field, value)
295 new_product.page.save()
296
297 created_products = Product.objects.filter(uuid__in=uuids_for_create)
298
299 logger.info('{} products were created.'.format(created_products.count()))
300 return created_products
301
302
303 class UpdateProductError(Exception):
304 pass
305
306
307 def main(*args, **kwargs):
308 cleaned_product_data = clean_data(merge_data(
309 product_file.get_data(),
310 price_file.get_data(),
311 in_stock_file.get_data(),
312 ))
313
314 if not cleaned_product_data:
315
316 parsed_files = {
317 'product_files': list(product_file.parsed_files),
318 'price_files': list(price_file.parsed_files),
319 'in_stock_files': list(in_stock_file.parsed_files),
320 }
321
322 if not any(parsed_files.values()):
323 message = 'Files does not exist: {}'.format(parsed_files)
324 else:
325 # file structure is unstable.
326 # You should adapt code for it if you got this error
327 message = (
328 'The file structure has changed'
329 ' or it does not contain the required data.'
330 )
331
332 raise UpdateProductError(message)
333
334 delete(cleaned_product_data)
335 updated_products = update(cleaned_product_data)
336 created_products = create(cleaned_product_data, updated_products)
337
338 if created_products.exists():
339 report(kwargs['recipients'])
340
[end of shopelectro/management/commands/_update_catalog/update_products.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shopelectro/management/commands/_update_catalog/update_products.py b/shopelectro/management/commands/_update_catalog/update_products.py
--- a/shopelectro/management/commands/_update_catalog/update_products.py
+++ b/shopelectro/management/commands/_update_catalog/update_products.py
@@ -236,6 +236,11 @@
@transaction.atomic
def delete(data: Dict[UUID, Data]):
+ """
+ Deactivate stale pages.
+
+ Deactivate all pages that are still in db, but already not in `data`.
+ """
uuids = list(data)
pages_to_deactivate = ProductPage.objects.exclude(
shopelectro_product__uuid__in=uuids)
|
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_products.py b/shopelectro/management/commands/_update_catalog/update_products.py\n--- a/shopelectro/management/commands/_update_catalog/update_products.py\n+++ b/shopelectro/management/commands/_update_catalog/update_products.py\n@@ -236,6 +236,11 @@\n \n @transaction.atomic\n def delete(data: Dict[UUID, Data]):\n+ \"\"\"\n+ Deactivate stale pages.\n+\n+ Deactivate all pages that are still in db, but already not in `data`.\n+ \"\"\"\n uuids = list(data)\n pages_to_deactivate = ProductPage.objects.exclude(\n shopelectro_product__uuid__in=uuids)\n", "issue": "500 \u043e\u0448\u0438\u0431\u043a\u0430 \u0432 \u0430\u0434\u043c\u0438\u043d\u043a\u0435\nhttps://www.shopelectro.ru/admin/shopelectro/productpage/?has_category=no\n", "before_files": [{"content": "import logging\nimport typing\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom functools import reduce\nfrom itertools import chain\nfrom typing import Dict, Iterator, List\nfrom xml.etree.ElementTree import Element\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.mail import send_mail\nfrom django.db import transaction\nfrom django.db.models import QuerySet\nfrom django.template.loader import render_to_string\n\nfrom shopelectro.management.commands._update_catalog.utils import (\n XmlFile, is_correct_uuid, NOT_SAVE_TEMPLATE, UUID, Data, floor\n)\nfrom shopelectro.models import Product, ProductPage, Tag\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef fetch_products(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n name = product_el.find(config.xpaths['name']).text\n uuid = product_el.find(config.xpaths['uuid']).text\n vendor_code = product_el.find(\n config.xpaths['vendor_code']\n ).text.lstrip('0')\n content = product_el.find(config.xpaths['page_content']).text or ''\n\n tag_value_els = (\n tag_el.find(config.xpaths['tag_value_uuid'])\n for tag_el in product_el.findall(config.xpaths['tags'])\n if tag_el is not None\n )\n\n tag_uuids = list(filter(is_correct_uuid, (\n tag_value.text\n for tag_value in tag_value_els\n # should use 'is not None', because __bool__ does not defined\n if tag_value is not None\n )))\n\n tags = Tag.objects.filter(uuid__in=tag_uuids)\n\n yield uuid, {\n 'name': name,\n 'vendor_code': vendor_code,\n 'page': {\n 'content': content\n },\n 'tags': tags\n }\n\n\ndef fetch_prices(root: Element, config) -> typing.Iterator:\n def get_price_values(prices_el):\n return list(sorted(\n float(price_el.find(config.xpaths['price']).text)\n for price_el in prices_el.findall(config.xpaths['prices'])\n ))\n\n def multiply(prices: typing.List[float]):\n def floor_prices(prices, precision: floor):\n return [\n floor(price * multiplier, precision)\n for price, multiplier in zip(prices, settings.PRICE_MULTIPLIERS)\n ]\n *wholesale_prices, retail_price = prices\n return (\n floor_prices(wholesale_prices, precision=2) +\n floor_prices([retail_price], precision=0)\n )\n\n product_price_els = root.findall(config.xpaths['product_prices'])\n for prices_el in product_price_els:\n product_uuid = prices_el.find(config.xpaths['product_uuid']).text\n prices = dict(zip(\n config.extra_options['price_types'],\n multiply(get_price_values(prices_el))\n ))\n yield product_uuid, prices\n\n\ndef fetch_in_stock(root: Element, config: XmlFile) -> Iterator:\n product_els = root.findall(config.xpaths['products'])\n for product_el in product_els:\n uuid = product_el.find(config.xpaths['product_uuid']).text\n in_stock = product_el.find(config.xpaths['in_stock']).text\n\n if not (in_stock.isdigit() and int(in_stock) >= 0):\n in_stock = 0\n\n yield uuid, {\n 'in_stock': in_stock,\n }\n\n\nproduct_file = XmlFile(\n fetch_callback=fetch_products,\n xml_path_pattern='**/webdata/**/goods/**/import*.xml',\n xpath_queries={\n 'products': './/{}\u0422\u043e\u0432\u0430\u0440\u044b/',\n 'name': '.{}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435',\n 'uuid': '.{}\u0418\u0434',\n 'page_content': '.{}\u041e\u043f\u0438\u0441\u0430\u043d\u0438\u0435',\n 'tags': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0421\u0432\u043e\u0439\u0441\u0442\u0432/',\n 'tag_value_uuid': '.{}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n 'vendor_code': '.{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u044f\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u043e\u0432/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435\u0420\u0435\u043a\u0432\u0438\u0437\u0438\u0442\u0430'\n '[{0}\u041d\u0430\u0438\u043c\u0435\u043d\u043e\u0432\u0430\u043d\u0438\u0435=\"\u041a\u043e\u0434\"]/{0}\u0417\u043d\u0430\u0447\u0435\u043d\u0438\u0435',\n },\n)\n\nprice_file = XmlFile(\n fetch_callback=fetch_prices,\n xml_path_pattern='**/webdata/**/goods/**/prices*.xml',\n xpath_queries={\n 'product_prices': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'prices': '.{}\u0426\u0435\u043d\u044b/',\n 'price': '.{}\u0426\u0435\u043d\u0430\u0417\u0430\u0415\u0434\u0438\u043d\u0438\u0446\u0443',\n },\n extra_options={\n 'price_types': [\n 'purchase_price', 'wholesale_large', 'wholesale_medium',\n 'wholesale_small', 'price',\n ],\n },\n)\n\n\nin_stock_file = XmlFile(\n fetch_callback=fetch_in_stock,\n xml_path_pattern='**/webdata/**/goods/**/rests*.xml',\n xpath_queries={\n 'products': './/{}\u041f\u0440\u0435\u0434\u043b\u043e\u0436\u0435\u043d\u0438\u044f/',\n 'product_uuid': '.{}\u0418\u0434',\n 'in_stock': './/{}\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e',\n },\n)\n\n\ndef merge_data(*data) -> Dict[UUID, Data]:\n \"\"\"\n Merge data from xml files with different structure.\n\n Example: files with product names and prices.\n \"\"\"\n product_data = defaultdict(dict)\n for key, data in chain.from_iterable(filter(None, data)):\n product_data[key].update(data)\n\n return product_data\n\n\ndef clean_data(data: Dict[UUID, Data]):\n def has_all_prices(_, product_data):\n price_types = price_file.extra_options['price_types']\n has = all(\n product_data.get(price_type)\n for price_type in price_types\n )\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='price'\n ))\n return has\n\n def has_vendor_code(_, product_data):\n has = bool(product_data['vendor_code'])\n\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='vendor_code'\n ))\n\n return has\n\n def has_uuid(uuid, product_data):\n has = is_correct_uuid(uuid)\n if not has:\n logger.info(NOT_SAVE_TEMPLATE.format(\n entity='Product',\n name=product_data['name'],\n field='uuid'\n ))\n return has\n\n def filter_(product_data):\n return all(\n f(*product_data)\n for f in [has_all_prices, has_uuid, has_vendor_code]\n )\n\n cleaned_data = dict(\n product_data\n for product_data in data.items()\n if filter_(product_data)\n )\n\n return cleaned_data\n\n\ndef report(recipients=None, message=None):\n message = message or render_to_string('report.html')\n\n user_query = (\n User.objects\n .filter(is_staff=True, is_superuser=False, is_active=True, email__isnull=False)\n )\n\n recipient_list = recipients or [user.email for user in user_query]\n\n if recipient_list:\n send_mail(\n subject='\u041e\u0431\u043d\u043e\u0432\u043b\u0435\u043d\u0438\u044f \u043a\u0430\u0442\u0430\u043b\u043e\u0433\u0430 \u0442\u043e\u0432\u0430\u0440\u043e\u0432',\n message=message,\n from_email=settings.EMAIL_SENDER,\n recipient_list=recipient_list,\n html_message=message,\n )\n\n logger.info('Sent message to {}'.format(\n reduce(lambda x, y: '{}, {}'.format(x, y), recipient_list)\n ))\n\n\[email protected]\ndef delete(data: Dict[UUID, Data]):\n uuids = list(data)\n pages_to_deactivate = ProductPage.objects.exclude(\n shopelectro_product__uuid__in=uuids)\n pages_to_deactivate.update(is_active=False)\n deactivated_count = pages_to_deactivate.count()\n logger.info(f'{deactivated_count} products and {deactivated_count} pages were deleted.')\n\n\[email protected]\ndef update(data: Dict[UUID, Data]) -> QuerySet:\n def save(product, field, value):\n if field == 'name' and getattr(product, field, None):\n return\n elif field == 'page':\n for page_field, page_value in value.items():\n if not getattr(product.page, page_field, ''):\n setattr(product.page, page_field, page_value)\n elif field == 'tags':\n product.tags = merge(list(product.tags.all()), value)\n else:\n setattr(product, field, value)\n\n def merge(left: List, right: List) -> List:\n \"\"\"Merge two arrays with order preserving.\"\"\"\n # Dirty patch for preserving tags, appended from admin.\n # Still waiting 1C throwing out.\n return left + [e for e in right if e not in left]\n\n products = Product.objects.filter(uuid__in=data)\n\n for product in products:\n product_data = data[str(product.uuid)]\n for field, value in product_data.items():\n save(product, field, value)\n product.save()\n\n logger.info('{} products were updated.'.format(products.count()))\n return products\n\n\[email protected]\ndef create(data: Dict[UUID, Data], updated_products: QuerySet) -> QuerySet:\n data = deepcopy(data)\n uuids_for_create = (\n set(data) - set(str(product.uuid) for product in updated_products)\n )\n\n for uuid in uuids_for_create:\n product_data = data.get(uuid)\n tags = product_data.pop('tags', {})\n page_data = product_data.pop('page', {})\n\n new_product = Product.objects.create(**product_data, uuid=uuid)\n new_product.tags.set(tags)\n for field, value in page_data.items():\n setattr(new_product.page, field, value)\n new_product.page.save()\n\n created_products = Product.objects.filter(uuid__in=uuids_for_create)\n\n logger.info('{} products were created.'.format(created_products.count()))\n return created_products\n\n\nclass UpdateProductError(Exception):\n pass\n\n\ndef main(*args, **kwargs):\n cleaned_product_data = clean_data(merge_data(\n product_file.get_data(),\n price_file.get_data(),\n in_stock_file.get_data(),\n ))\n\n if not cleaned_product_data:\n\n parsed_files = {\n 'product_files': list(product_file.parsed_files),\n 'price_files': list(price_file.parsed_files),\n 'in_stock_files': list(in_stock_file.parsed_files),\n }\n\n if not any(parsed_files.values()):\n message = 'Files does not exist: {}'.format(parsed_files)\n else:\n # file structure is unstable.\n # You should adapt code for it if you got this error\n message = (\n 'The file structure has changed'\n ' or it does not contain the required data.'\n )\n\n raise UpdateProductError(message)\n\n delete(cleaned_product_data)\n updated_products = update(cleaned_product_data)\n created_products = create(cleaned_product_data, updated_products)\n\n if created_products.exists():\n report(kwargs['recipients'])\n", "path": "shopelectro/management/commands/_update_catalog/update_products.py"}]}
| 3,850 | 163 |
gh_patches_debug_8917
|
rasdani/github-patches
|
git_diff
|
beeware__toga-585
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Canvas Dashed Line Support for Gtk+
Hey PyCon AU 2018 sprinters, and other new contributors - here is a great way to contribute for someone who runs Linux:
Recently @bryall implemented dashed line support for Canvas in the Cocoa backend in #578. It would be great to implement support for dashed lines in Gtk+ as well.
</issue>
<code>
[start of src/gtk/toga_gtk/widgets/canvas.py]
1 import gi
2
3 gi.require_version("Gtk", "3.0")
4 from gi.repository import Gtk
5
6 try:
7 import cairo
8 except ImportError:
9 cairo = None
10 try:
11 gi.require_version("Pango", "1.0")
12 from gi.repository import Pango
13
14 SCALE = Pango.SCALE
15 except ImportError:
16 SCALE = 1024
17
18 from .base import Widget
19 from ..color import native_color
20
21
22 class Canvas(Widget):
23 def create(self):
24 if cairo is None:
25 raise RuntimeError(
26 "'import cairo' failed; may need to install python-gi-cairo."
27 )
28
29 self.native = Gtk.DrawingArea()
30 self.native.interface = self.interface
31 self.native.connect("draw", self.gtk_draw_callback)
32
33 def gtk_draw_callback(self, canvas, gtk_context):
34 """Creates a draw callback
35
36 Gtk+ uses a drawing callback to draw on a DrawingArea. Assignment of the
37 callback function creates a Gtk+ canvas and Gtk+ context automatically
38 using the canvas and gtk_context function arguments. This method calls
39 the draw method on the interface Canvas to draw the objects.
40
41 """
42 self.interface._draw(self, draw_context=gtk_context)
43
44 def redraw(self):
45 pass
46
47 # Basic paths
48
49 def new_path(self, draw_context, *args, **kwargs):
50 draw_context.new_path()
51
52 def closed_path(self, x, y, draw_context, *args, **kwargs):
53 draw_context.close_path()
54
55 def move_to(self, x, y, draw_context, *args, **kwargs):
56 draw_context.move_to(x, y)
57
58 def line_to(self, x, y, draw_context, *args, **kwargs):
59 draw_context.line_to(x, y)
60
61 # Basic shapes
62
63 def bezier_curve_to(
64 self, cp1x, cp1y, cp2x, cp2y, x, y, draw_context, *args, **kwargs
65 ):
66 draw_context.curve_to(cp1x, cp1y, cp2x, cp2y, x, y)
67
68 def quadratic_curve_to(self, cpx, cpy, x, y, draw_context, *args, **kwargs):
69 draw_context.curve_to(cpx, cpy, cpx, cpy, x, y)
70
71 def arc(
72 self,
73 x,
74 y,
75 radius,
76 startangle,
77 endangle,
78 anticlockwise,
79 draw_context,
80 *args,
81 **kwargs
82 ):
83 if anticlockwise:
84 draw_context.arc_negative(x, y, radius, startangle, endangle)
85 else:
86 draw_context.arc(x, y, radius, startangle, endangle)
87
88 def ellipse(
89 self,
90 x,
91 y,
92 radiusx,
93 radiusy,
94 rotation,
95 startangle,
96 endangle,
97 anticlockwise,
98 draw_context,
99 *args,
100 **kwargs
101 ):
102 draw_context.save()
103 draw_context.translate(x, y)
104 if radiusx >= radiusy:
105 draw_context.scale(1, radiusy / radiusx)
106 self.arc(0, 0, radiusx, startangle, endangle, anticlockwise, draw_context)
107 else:
108 draw_context.scale(radiusx / radiusy, 1)
109 self.arc(0, 0, radiusy, startangle, endangle, anticlockwise, draw_context)
110 draw_context.rotate(rotation)
111 draw_context.identity_matrix()
112 draw_context.restore()
113
114 def rect(self, x, y, width, height, draw_context, *args, **kwargs):
115 draw_context.rectangle(x, y, width, height)
116
117 # Drawing Paths
118
119 def apply_color(self, color, draw_context, *args, **kwargs):
120 if color is not None:
121 draw_context.set_source_rgba(*native_color(color))
122 else:
123 # set color to black
124 draw_context.set_source_rgba(0, 0, 0, 1.0)
125
126 def fill(self, color, fill_rule, preserve, draw_context, *args, **kwargs):
127 self.apply_color(color, draw_context)
128 if fill_rule is "evenodd":
129 draw_context.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)
130 else:
131 draw_context.set_fill_rule(cairo.FILL_RULE_WINDING)
132 if preserve:
133 draw_context.fill_preserve()
134 else:
135 draw_context.fill()
136
137 def stroke(self, color, line_width, draw_context, *args, **kwargs):
138 self.apply_color(color, draw_context)
139 draw_context.set_line_width(line_width)
140 draw_context.stroke()
141
142 # Transformations
143
144 def rotate(self, radians, draw_context, *args, **kwargs):
145 draw_context.rotate(radians)
146
147 def scale(self, sx, sy, draw_context, *args, **kwargs):
148 draw_context.scale(sx, sy)
149
150 def translate(self, tx, ty, draw_context, *args, **kwargs):
151 draw_context.translate(tx, ty)
152
153 def reset_transform(self, draw_context, *args, **kwargs):
154 draw_context.identity_matrix()
155
156 # Text
157
158 def write_text(self, text, x, y, font, draw_context, *args, **kwargs):
159 # Set font family and size
160 if font:
161 write_font = font
162 elif self.native.font:
163 write_font = self.native.font
164 write_font.family = self.native.font.get_family()
165 write_font.size = self.native.font.get_size() / SCALE
166 draw_context.select_font_face(write_font.family)
167 draw_context.set_font_size(write_font.size)
168
169 # Support writing multiline text
170 for line in text.splitlines():
171 width, height = write_font.measure(line)
172 draw_context.move_to(x, y)
173 draw_context.text_path(line)
174 y += height
175
176 def measure_text(self, text, font, draw_context, *args, **kwargs):
177 # Set font family and size
178 if font:
179 draw_context.select_font_face(font.family)
180 draw_context.set_font_size(font.size)
181 elif self.native.font:
182 draw_context.select_font_face(self.native.font.get_family())
183 draw_context.set_font_size(self.native.font.get_size() / SCALE)
184
185 x_bearing, y_bearing, width, height, x_advance, y_advance = draw_context.text_extents(
186 text
187 )
188 return width, height
189
190 # Rehint
191
192 def rehint(self):
193 # print("REHINT", self, self.native.get_preferred_width(), self.native.get_preferred_height())
194 width = self.native.get_preferred_width()
195 height = self.native.get_preferred_height()
196
[end of src/gtk/toga_gtk/widgets/canvas.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/gtk/toga_gtk/widgets/canvas.py b/src/gtk/toga_gtk/widgets/canvas.py
--- a/src/gtk/toga_gtk/widgets/canvas.py
+++ b/src/gtk/toga_gtk/widgets/canvas.py
@@ -134,10 +134,13 @@
else:
draw_context.fill()
- def stroke(self, color, line_width, draw_context, *args, **kwargs):
+ def stroke(self, color, line_width, line_dash, draw_context, *args, **kwargs):
self.apply_color(color, draw_context)
draw_context.set_line_width(line_width)
+ if line_dash is not None:
+ draw_context.set_dash(line_dash)
draw_context.stroke()
+ draw_context.set_dash([])
# Transformations
|
{"golden_diff": "diff --git a/src/gtk/toga_gtk/widgets/canvas.py b/src/gtk/toga_gtk/widgets/canvas.py\n--- a/src/gtk/toga_gtk/widgets/canvas.py\n+++ b/src/gtk/toga_gtk/widgets/canvas.py\n@@ -134,10 +134,13 @@\n else:\n draw_context.fill()\n \n- def stroke(self, color, line_width, draw_context, *args, **kwargs):\n+ def stroke(self, color, line_width, line_dash, draw_context, *args, **kwargs):\n self.apply_color(color, draw_context)\n draw_context.set_line_width(line_width)\n+ if line_dash is not None:\n+ draw_context.set_dash(line_dash)\n draw_context.stroke()\n+ draw_context.set_dash([])\n \n # Transformations\n", "issue": "Add Canvas Dashed Line Support for Gtk+\nHey PyCon AU 2018 sprinters, and other new contributors - here is a great way to contribute for someone who runs Linux:\r\n\r\nRecently @bryall implemented dashed line support for Canvas in the Cocoa backend in #578. It would be great to implement support for dashed lines in Gtk+ as well.\n", "before_files": [{"content": "import gi\n\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gtk\n\ntry:\n import cairo\nexcept ImportError:\n cairo = None\ntry:\n gi.require_version(\"Pango\", \"1.0\")\n from gi.repository import Pango\n\n SCALE = Pango.SCALE\nexcept ImportError:\n SCALE = 1024\n\nfrom .base import Widget\nfrom ..color import native_color\n\n\nclass Canvas(Widget):\n def create(self):\n if cairo is None:\n raise RuntimeError(\n \"'import cairo' failed; may need to install python-gi-cairo.\"\n )\n\n self.native = Gtk.DrawingArea()\n self.native.interface = self.interface\n self.native.connect(\"draw\", self.gtk_draw_callback)\n\n def gtk_draw_callback(self, canvas, gtk_context):\n \"\"\"Creates a draw callback\n\n Gtk+ uses a drawing callback to draw on a DrawingArea. Assignment of the\n callback function creates a Gtk+ canvas and Gtk+ context automatically\n using the canvas and gtk_context function arguments. This method calls\n the draw method on the interface Canvas to draw the objects.\n\n \"\"\"\n self.interface._draw(self, draw_context=gtk_context)\n\n def redraw(self):\n pass\n\n # Basic paths\n\n def new_path(self, draw_context, *args, **kwargs):\n draw_context.new_path()\n\n def closed_path(self, x, y, draw_context, *args, **kwargs):\n draw_context.close_path()\n\n def move_to(self, x, y, draw_context, *args, **kwargs):\n draw_context.move_to(x, y)\n\n def line_to(self, x, y, draw_context, *args, **kwargs):\n draw_context.line_to(x, y)\n\n # Basic shapes\n\n def bezier_curve_to(\n self, cp1x, cp1y, cp2x, cp2y, x, y, draw_context, *args, **kwargs\n ):\n draw_context.curve_to(cp1x, cp1y, cp2x, cp2y, x, y)\n\n def quadratic_curve_to(self, cpx, cpy, x, y, draw_context, *args, **kwargs):\n draw_context.curve_to(cpx, cpy, cpx, cpy, x, y)\n\n def arc(\n self,\n x,\n y,\n radius,\n startangle,\n endangle,\n anticlockwise,\n draw_context,\n *args,\n **kwargs\n ):\n if anticlockwise:\n draw_context.arc_negative(x, y, radius, startangle, endangle)\n else:\n draw_context.arc(x, y, radius, startangle, endangle)\n\n def ellipse(\n self,\n x,\n y,\n radiusx,\n radiusy,\n rotation,\n startangle,\n endangle,\n anticlockwise,\n draw_context,\n *args,\n **kwargs\n ):\n draw_context.save()\n draw_context.translate(x, y)\n if radiusx >= radiusy:\n draw_context.scale(1, radiusy / radiusx)\n self.arc(0, 0, radiusx, startangle, endangle, anticlockwise, draw_context)\n else:\n draw_context.scale(radiusx / radiusy, 1)\n self.arc(0, 0, radiusy, startangle, endangle, anticlockwise, draw_context)\n draw_context.rotate(rotation)\n draw_context.identity_matrix()\n draw_context.restore()\n\n def rect(self, x, y, width, height, draw_context, *args, **kwargs):\n draw_context.rectangle(x, y, width, height)\n\n # Drawing Paths\n\n def apply_color(self, color, draw_context, *args, **kwargs):\n if color is not None:\n draw_context.set_source_rgba(*native_color(color))\n else:\n # set color to black\n draw_context.set_source_rgba(0, 0, 0, 1.0)\n\n def fill(self, color, fill_rule, preserve, draw_context, *args, **kwargs):\n self.apply_color(color, draw_context)\n if fill_rule is \"evenodd\":\n draw_context.set_fill_rule(cairo.FILL_RULE_EVEN_ODD)\n else:\n draw_context.set_fill_rule(cairo.FILL_RULE_WINDING)\n if preserve:\n draw_context.fill_preserve()\n else:\n draw_context.fill()\n\n def stroke(self, color, line_width, draw_context, *args, **kwargs):\n self.apply_color(color, draw_context)\n draw_context.set_line_width(line_width)\n draw_context.stroke()\n\n # Transformations\n\n def rotate(self, radians, draw_context, *args, **kwargs):\n draw_context.rotate(radians)\n\n def scale(self, sx, sy, draw_context, *args, **kwargs):\n draw_context.scale(sx, sy)\n\n def translate(self, tx, ty, draw_context, *args, **kwargs):\n draw_context.translate(tx, ty)\n\n def reset_transform(self, draw_context, *args, **kwargs):\n draw_context.identity_matrix()\n\n # Text\n\n def write_text(self, text, x, y, font, draw_context, *args, **kwargs):\n # Set font family and size\n if font:\n write_font = font\n elif self.native.font:\n write_font = self.native.font\n write_font.family = self.native.font.get_family()\n write_font.size = self.native.font.get_size() / SCALE\n draw_context.select_font_face(write_font.family)\n draw_context.set_font_size(write_font.size)\n\n # Support writing multiline text\n for line in text.splitlines():\n width, height = write_font.measure(line)\n draw_context.move_to(x, y)\n draw_context.text_path(line)\n y += height\n\n def measure_text(self, text, font, draw_context, *args, **kwargs):\n # Set font family and size\n if font:\n draw_context.select_font_face(font.family)\n draw_context.set_font_size(font.size)\n elif self.native.font:\n draw_context.select_font_face(self.native.font.get_family())\n draw_context.set_font_size(self.native.font.get_size() / SCALE)\n\n x_bearing, y_bearing, width, height, x_advance, y_advance = draw_context.text_extents(\n text\n )\n return width, height\n\n # Rehint\n\n def rehint(self):\n # print(\"REHINT\", self, self.native.get_preferred_width(), self.native.get_preferred_height())\n width = self.native.get_preferred_width()\n height = self.native.get_preferred_height()\n", "path": "src/gtk/toga_gtk/widgets/canvas.py"}]}
| 2,537 | 178 |
gh_patches_debug_21881
|
rasdani/github-patches
|
git_diff
|
google__TensorNetwork-263
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ncon_interface tests fail
</issue>
<code>
[start of conftest.py]
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18 import pytest
19
20
21 @pytest.fixture(name="backend", params=["numpy", "tensorflow",
22 "jax", "pytorch"])
23 def backend_fixure(request):
24 return request.param
25
[end of conftest.py]
[start of tensornetwork/__init__.py]
1 from __future__ import absolute_import
2 from tensornetwork.network import TensorNetwork
3 from tensornetwork.network_components import Node, Edge, CopyNode
4 from tensornetwork.ncon_interface import ncon, ncon_network
5 from tensornetwork.version import __version__
6 from tensornetwork.visualization.graphviz import to_graphviz
7 from tensornetwork import contractors
8 from tensornetwork import config
9 from typing import Text, Optional, Type
10 from tensornetwork.utils import load
11
12
13 def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:
14 config.default_backend = backend
15 config.default_dype = dtype
16
[end of tensornetwork/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -16,9 +16,33 @@
from __future__ import division
from __future__ import print_function
import pytest
+import jax
+import tensornetwork
+import tensorflow as tf
@pytest.fixture(name="backend", params=["numpy", "tensorflow",
"jax", "pytorch"])
def backend_fixure(request):
return request.param
+
+
[email protected](autouse=True)
+def reset_default_backend():
+ tensornetwork.set_default_backend("numpy")
+ yield
+ tensornetwork.set_default_backend("numpy")
+
+
[email protected](autouse=True)
+def enable_jax_64():
+ jax.config.update("jax_enable_x64", True)
+ yield
+ jax.config.update("jax_enable_x64", True)
+
+
[email protected](autouse=True)
+def tf_enable_v2_behaviour():
+ tf.compat.v1.enable_v2_behavior()
+ yield
+ tf.compat.v1.enable_v2_behavior()
diff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py
--- a/tensornetwork/__init__.py
+++ b/tensornetwork/__init__.py
@@ -12,4 +12,4 @@
def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:
config.default_backend = backend
- config.default_dype = dtype
+ config.default_dtype = dtype
|
{"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -16,9 +16,33 @@\n from __future__ import division\n from __future__ import print_function\n import pytest\n+import jax\n+import tensornetwork\n+import tensorflow as tf\n \n \n @pytest.fixture(name=\"backend\", params=[\"numpy\", \"tensorflow\",\n \"jax\", \"pytorch\"])\n def backend_fixure(request):\n return request.param\n+\n+\[email protected](autouse=True)\n+def reset_default_backend():\n+ tensornetwork.set_default_backend(\"numpy\")\n+ yield\n+ tensornetwork.set_default_backend(\"numpy\")\n+\n+\[email protected](autouse=True)\n+def enable_jax_64():\n+ jax.config.update(\"jax_enable_x64\", True)\n+ yield\n+ jax.config.update(\"jax_enable_x64\", True)\n+\n+\[email protected](autouse=True)\n+def tf_enable_v2_behaviour():\n+ tf.compat.v1.enable_v2_behavior()\n+ yield\n+ tf.compat.v1.enable_v2_behavior()\ndiff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py\n--- a/tensornetwork/__init__.py\n+++ b/tensornetwork/__init__.py\n@@ -12,4 +12,4 @@\n \n def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:\n config.default_backend = backend\n- config.default_dype = dtype\n+ config.default_dtype = dtype\n", "issue": "ncon_interface tests fail \n\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport pytest\n\n\[email protected](name=\"backend\", params=[\"numpy\", \"tensorflow\",\n \"jax\", \"pytorch\"])\ndef backend_fixure(request):\n return request.param\n", "path": "conftest.py"}, {"content": "from __future__ import absolute_import\nfrom tensornetwork.network import TensorNetwork\nfrom tensornetwork.network_components import Node, Edge, CopyNode\nfrom tensornetwork.ncon_interface import ncon, ncon_network\nfrom tensornetwork.version import __version__\nfrom tensornetwork.visualization.graphviz import to_graphviz\nfrom tensornetwork import contractors\nfrom tensornetwork import config\nfrom typing import Text, Optional, Type\nfrom tensornetwork.utils import load\n\n\ndef set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:\n config.default_backend = backend\n config.default_dype = dtype\n", "path": "tensornetwork/__init__.py"}]}
| 954 | 355 |
gh_patches_debug_5526
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-11354
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Project version filter returns 500
It seems the `ProjectVersionListFilterSet.get_visibility()` is receiving the wrong amount of arguments.
To reproduce, just hit https://beta.readthedocs.org/projects/bigo-live-hack/?privacy=&sort=&visibility=hidden
Sentry issue: https://read-the-docs.sentry.io/issues/4721614191/?project=148442&query=is%3Aunresolved&referrer=issue-stream&statsPeriod=7d&stream_index=7
</issue>
<code>
[start of readthedocs/projects/filters.py]
1 """Filters used in project dashboard."""
2
3 import structlog
4 from django.db.models import Count, F, Max
5 from django.utils.translation import gettext_lazy as _
6 from django_filters import ChoiceFilter, OrderingFilter
7
8 from readthedocs.core.filters import FilteredModelChoiceFilter, ModelFilterSet
9 from readthedocs.projects.models import Project
10
11 log = structlog.get_logger(__name__)
12
13
14 class VersionSortOrderingFilter(OrderingFilter):
15
16 """
17 Version list sort ordering django_filters filter.
18
19 Django-filter is highly opionated, and the default model filters do not work
20 well with empty/null values in the filter choices. In our case, empty/null
21 values are used for a default query. So, to make this work, we will use a
22 custom filter, instead of an automated model filter.
23
24 The empty/None value is used to provide both a default value to the filter
25 (when there is no ``sort`` query param), but also provide an option that is
26 manually selectable (``?sort=relevance``). We can't do this with the default
27 filter, because result would be params like ``?sort=None``.
28 """
29
30 SORT_BUILD_COUNT = "build_count"
31 SORT_BUILD_DATE = "build_date"
32 SORT_NAME = "name"
33
34 def __init__(self, *args, **kwargs):
35 # The default filtering operation will be `-recent`, so we omit it
36 # from choices to avoid showing it on the list twice.
37 kwargs.setdefault("empty_label", _("Recently built"))
38 kwargs.setdefault(
39 "choices",
40 (
41 ("-" + self.SORT_BUILD_DATE, _("Least recently built")),
42 ("-" + self.SORT_BUILD_COUNT, _("Frequently built")),
43 (self.SORT_BUILD_COUNT, _("Least frequently built")),
44 (self.SORT_NAME, _("Name")),
45 ("-" + self.SORT_NAME, _("Name (descending)")),
46 ),
47 )
48 super().__init__(*args, **kwargs)
49
50 def filter(self, qs, value):
51 # This is where we use the None value for this custom filter. This
52 # doesn't work with a standard model filter. Note: ``value`` is always
53 # an iterable, but can be empty.
54
55 if not value:
56 value = [self.SORT_BUILD_DATE]
57
58 annotations = {}
59 order_bys = []
60 for field_ordered in value:
61 field = field_ordered.lstrip("-")
62
63 if field == self.SORT_BUILD_DATE:
64 annotations[self.SORT_BUILD_DATE] = Max("builds__date")
65 elif field == self.SORT_BUILD_COUNT:
66 annotations[self.SORT_BUILD_COUNT] = Count("builds")
67 elif field == self.SORT_NAME:
68 # Alias field name here, as ``OrderingFilter`` was having trouble
69 # doing this with it's native field mapping
70 annotations[self.SORT_NAME] = F("verbose_name")
71
72 if field_ordered == self.SORT_BUILD_DATE:
73 order_bys.append(F(field).desc(nulls_last=True))
74 elif field_ordered == "-" + self.SORT_BUILD_DATE:
75 order_bys.append(F(field).asc(nulls_first=True))
76 else:
77 order_bys.append(field_ordered)
78
79 return qs.annotate(**annotations).order_by(*order_bys)
80
81
82 class ProjectSortOrderingFilter(OrderingFilter):
83
84 """
85 Project list sort ordering django_filters filter.
86
87 Django-filter is highly opionated, and the default model filters do not work
88 well with empty/null values in the filter choices. In our case, empty/null
89 values are used for a default query. So, to make this work, we will use a
90 custom filter, instead of an automated model filter.
91 """
92
93 SORT_NAME = "name"
94 SORT_MODIFIED_DATE = "modified_date"
95 SORT_BUILD_DATE = "build_date"
96 SORT_BUILD_COUNT = "build_count"
97
98 def __init__(self, *args, **kwargs):
99 # The default filtering operation will be `name`, so we omit it
100 # from choices to avoid showing it on the list twice.
101 kwargs.setdefault("empty_label", _("Recently built"))
102 kwargs.setdefault(
103 "choices",
104 (
105 ("-" + self.SORT_BUILD_DATE, _("Least recently built")),
106 ("-" + self.SORT_BUILD_COUNT, _("Frequently built")),
107 (self.SORT_BUILD_COUNT, _("Least frequently built")),
108 ("-" + self.SORT_MODIFIED_DATE, _("Recently modified")),
109 (self.SORT_MODIFIED_DATE, _("Least recently modified")),
110 (self.SORT_NAME, _("Name")),
111 ("-" + self.SORT_NAME, _("Name (descending)")),
112 ),
113 )
114 super().__init__(*args, **kwargs)
115
116 def filter(self, qs, value):
117 # This is where we use the None value from the custom filter
118 if not value:
119 value = [self.SORT_BUILD_DATE]
120
121 annotations = {}
122 order_bys = []
123 for field_ordered in value:
124 field = field_ordered.lstrip("-")
125
126 if field == self.SORT_BUILD_DATE:
127 annotations[self.SORT_BUILD_DATE] = Max("builds__date")
128 elif field == self.SORT_BUILD_COUNT:
129 annotations[self.SORT_BUILD_COUNT] = Count("builds")
130
131 if field_ordered == self.SORT_BUILD_DATE:
132 order_bys.append(F(field).desc(nulls_last=True))
133 elif field_ordered == "-" + self.SORT_BUILD_DATE:
134 order_bys.append(F(field).asc(nulls_first=True))
135 else:
136 order_bys.append(field_ordered)
137
138 return qs.annotate(**annotations).order_by(*order_bys)
139
140
141 class ProjectListFilterSet(ModelFilterSet):
142
143 """
144 Project list filter set for project list view.
145
146 This filter set enables list view sorting using a custom filter, and
147 provides search-as-you-type lookup filter as well.
148 """
149
150 slug = FilteredModelChoiceFilter(
151 label=_("Project"),
152 empty_label=_("All projects"),
153 to_field_name="slug",
154 queryset_method="get_project_queryset",
155 method="get_project",
156 label_attribute="name",
157 )
158
159 sort = ProjectSortOrderingFilter(
160 field_name="sort",
161 label=_("Sort by"),
162 )
163
164 def get_project_queryset(self):
165 return Project.objects.for_user(user=self.request.user)
166
167 def get_project(self, queryset, field_name, project):
168 return queryset.filter(slug=project.slug)
169
170
171 class ProjectVersionListFilterSet(ModelFilterSet):
172
173 """
174 Filter and sorting for project version listing page.
175
176 This is used from the project versions list view page to provide filtering
177 and sorting to the version list and search UI. It is normally instantiated
178 with an included queryset, which provides user project authorization.
179 """
180
181 VISIBILITY_HIDDEN = "hidden"
182 VISIBILITY_VISIBLE = "visible"
183
184 VISIBILITY_CHOICES = (
185 ("hidden", _("Hidden versions")),
186 ("visible", _("Visible versions")),
187 )
188
189 PRIVACY_CHOICES = (
190 ("public", _("Public versions")),
191 ("private", _("Private versions")),
192 )
193
194 # Attribute filter fields
195 slug = FilteredModelChoiceFilter(
196 label=_("Version"),
197 empty_label=_("All versions"),
198 to_field_name="slug",
199 queryset_method="get_version_queryset",
200 method="get_version",
201 label_attribute="verbose_name",
202 )
203
204 privacy = ChoiceFilter(
205 field_name="privacy_level",
206 label=_("Privacy"),
207 choices=PRIVACY_CHOICES,
208 empty_label=_("Any"),
209 )
210 # This field looks better as ``visibility=hidden`` than it does
211 # ``hidden=true``, otherwise we could use a BooleanFilter instance here
212 # instead
213 visibility = ChoiceFilter(
214 field_name="hidden",
215 label=_("Visibility"),
216 choices=VISIBILITY_CHOICES,
217 method="get_visibility",
218 empty_label=_("Any"),
219 )
220
221 sort = VersionSortOrderingFilter(
222 field_name="sort",
223 label=_("Sort by"),
224 )
225
226 def __init__(self, *args, project=None, **kwargs):
227 self.project = project
228 super().__init__(*args, **kwargs)
229
230 def get_version(self, queryset, field_name, version):
231 return queryset.filter(slug=version.slug)
232
233 def get_version_queryset(self):
234 # This query is passed in at instantiation
235 return self.queryset
236
237 def get_visibility(self, queryset, *, value):
238 if value == self.VISIBILITY_HIDDEN:
239 return queryset.filter(hidden=True)
240 if value == self.VISIBILITY_VISIBLE:
241 return queryset.filter(hidden=False)
242 return queryset
243
[end of readthedocs/projects/filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py
--- a/readthedocs/projects/filters.py
+++ b/readthedocs/projects/filters.py
@@ -234,7 +234,7 @@
# This query is passed in at instantiation
return self.queryset
- def get_visibility(self, queryset, *, value):
+ def get_visibility(self, queryset, field_name, value):
if value == self.VISIBILITY_HIDDEN:
return queryset.filter(hidden=True)
if value == self.VISIBILITY_VISIBLE:
|
{"golden_diff": "diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py\n--- a/readthedocs/projects/filters.py\n+++ b/readthedocs/projects/filters.py\n@@ -234,7 +234,7 @@\n # This query is passed in at instantiation\n return self.queryset\n \n- def get_visibility(self, queryset, *, value):\n+ def get_visibility(self, queryset, field_name, value):\n if value == self.VISIBILITY_HIDDEN:\n return queryset.filter(hidden=True)\n if value == self.VISIBILITY_VISIBLE:\n", "issue": "Project version filter returns 500\nIt seems the `ProjectVersionListFilterSet.get_visibility()` is receiving the wrong amount of arguments.\r\n\r\nTo reproduce, just hit https://beta.readthedocs.org/projects/bigo-live-hack/?privacy=&sort=&visibility=hidden\r\n\r\nSentry issue: https://read-the-docs.sentry.io/issues/4721614191/?project=148442&query=is%3Aunresolved&referrer=issue-stream&statsPeriod=7d&stream_index=7\n", "before_files": [{"content": "\"\"\"Filters used in project dashboard.\"\"\"\n\nimport structlog\nfrom django.db.models import Count, F, Max\nfrom django.utils.translation import gettext_lazy as _\nfrom django_filters import ChoiceFilter, OrderingFilter\n\nfrom readthedocs.core.filters import FilteredModelChoiceFilter, ModelFilterSet\nfrom readthedocs.projects.models import Project\n\nlog = structlog.get_logger(__name__)\n\n\nclass VersionSortOrderingFilter(OrderingFilter):\n\n \"\"\"\n Version list sort ordering django_filters filter.\n\n Django-filter is highly opionated, and the default model filters do not work\n well with empty/null values in the filter choices. In our case, empty/null\n values are used for a default query. So, to make this work, we will use a\n custom filter, instead of an automated model filter.\n\n The empty/None value is used to provide both a default value to the filter\n (when there is no ``sort`` query param), but also provide an option that is\n manually selectable (``?sort=relevance``). We can't do this with the default\n filter, because result would be params like ``?sort=None``.\n \"\"\"\n\n SORT_BUILD_COUNT = \"build_count\"\n SORT_BUILD_DATE = \"build_date\"\n SORT_NAME = \"name\"\n\n def __init__(self, *args, **kwargs):\n # The default filtering operation will be `-recent`, so we omit it\n # from choices to avoid showing it on the list twice.\n kwargs.setdefault(\"empty_label\", _(\"Recently built\"))\n kwargs.setdefault(\n \"choices\",\n (\n (\"-\" + self.SORT_BUILD_DATE, _(\"Least recently built\")),\n (\"-\" + self.SORT_BUILD_COUNT, _(\"Frequently built\")),\n (self.SORT_BUILD_COUNT, _(\"Least frequently built\")),\n (self.SORT_NAME, _(\"Name\")),\n (\"-\" + self.SORT_NAME, _(\"Name (descending)\")),\n ),\n )\n super().__init__(*args, **kwargs)\n\n def filter(self, qs, value):\n # This is where we use the None value for this custom filter. This\n # doesn't work with a standard model filter. Note: ``value`` is always\n # an iterable, but can be empty.\n\n if not value:\n value = [self.SORT_BUILD_DATE]\n\n annotations = {}\n order_bys = []\n for field_ordered in value:\n field = field_ordered.lstrip(\"-\")\n\n if field == self.SORT_BUILD_DATE:\n annotations[self.SORT_BUILD_DATE] = Max(\"builds__date\")\n elif field == self.SORT_BUILD_COUNT:\n annotations[self.SORT_BUILD_COUNT] = Count(\"builds\")\n elif field == self.SORT_NAME:\n # Alias field name here, as ``OrderingFilter`` was having trouble\n # doing this with it's native field mapping\n annotations[self.SORT_NAME] = F(\"verbose_name\")\n\n if field_ordered == self.SORT_BUILD_DATE:\n order_bys.append(F(field).desc(nulls_last=True))\n elif field_ordered == \"-\" + self.SORT_BUILD_DATE:\n order_bys.append(F(field).asc(nulls_first=True))\n else:\n order_bys.append(field_ordered)\n\n return qs.annotate(**annotations).order_by(*order_bys)\n\n\nclass ProjectSortOrderingFilter(OrderingFilter):\n\n \"\"\"\n Project list sort ordering django_filters filter.\n\n Django-filter is highly opionated, and the default model filters do not work\n well with empty/null values in the filter choices. In our case, empty/null\n values are used for a default query. So, to make this work, we will use a\n custom filter, instead of an automated model filter.\n \"\"\"\n\n SORT_NAME = \"name\"\n SORT_MODIFIED_DATE = \"modified_date\"\n SORT_BUILD_DATE = \"build_date\"\n SORT_BUILD_COUNT = \"build_count\"\n\n def __init__(self, *args, **kwargs):\n # The default filtering operation will be `name`, so we omit it\n # from choices to avoid showing it on the list twice.\n kwargs.setdefault(\"empty_label\", _(\"Recently built\"))\n kwargs.setdefault(\n \"choices\",\n (\n (\"-\" + self.SORT_BUILD_DATE, _(\"Least recently built\")),\n (\"-\" + self.SORT_BUILD_COUNT, _(\"Frequently built\")),\n (self.SORT_BUILD_COUNT, _(\"Least frequently built\")),\n (\"-\" + self.SORT_MODIFIED_DATE, _(\"Recently modified\")),\n (self.SORT_MODIFIED_DATE, _(\"Least recently modified\")),\n (self.SORT_NAME, _(\"Name\")),\n (\"-\" + self.SORT_NAME, _(\"Name (descending)\")),\n ),\n )\n super().__init__(*args, **kwargs)\n\n def filter(self, qs, value):\n # This is where we use the None value from the custom filter\n if not value:\n value = [self.SORT_BUILD_DATE]\n\n annotations = {}\n order_bys = []\n for field_ordered in value:\n field = field_ordered.lstrip(\"-\")\n\n if field == self.SORT_BUILD_DATE:\n annotations[self.SORT_BUILD_DATE] = Max(\"builds__date\")\n elif field == self.SORT_BUILD_COUNT:\n annotations[self.SORT_BUILD_COUNT] = Count(\"builds\")\n\n if field_ordered == self.SORT_BUILD_DATE:\n order_bys.append(F(field).desc(nulls_last=True))\n elif field_ordered == \"-\" + self.SORT_BUILD_DATE:\n order_bys.append(F(field).asc(nulls_first=True))\n else:\n order_bys.append(field_ordered)\n\n return qs.annotate(**annotations).order_by(*order_bys)\n\n\nclass ProjectListFilterSet(ModelFilterSet):\n\n \"\"\"\n Project list filter set for project list view.\n\n This filter set enables list view sorting using a custom filter, and\n provides search-as-you-type lookup filter as well.\n \"\"\"\n\n slug = FilteredModelChoiceFilter(\n label=_(\"Project\"),\n empty_label=_(\"All projects\"),\n to_field_name=\"slug\",\n queryset_method=\"get_project_queryset\",\n method=\"get_project\",\n label_attribute=\"name\",\n )\n\n sort = ProjectSortOrderingFilter(\n field_name=\"sort\",\n label=_(\"Sort by\"),\n )\n\n def get_project_queryset(self):\n return Project.objects.for_user(user=self.request.user)\n\n def get_project(self, queryset, field_name, project):\n return queryset.filter(slug=project.slug)\n\n\nclass ProjectVersionListFilterSet(ModelFilterSet):\n\n \"\"\"\n Filter and sorting for project version listing page.\n\n This is used from the project versions list view page to provide filtering\n and sorting to the version list and search UI. It is normally instantiated\n with an included queryset, which provides user project authorization.\n \"\"\"\n\n VISIBILITY_HIDDEN = \"hidden\"\n VISIBILITY_VISIBLE = \"visible\"\n\n VISIBILITY_CHOICES = (\n (\"hidden\", _(\"Hidden versions\")),\n (\"visible\", _(\"Visible versions\")),\n )\n\n PRIVACY_CHOICES = (\n (\"public\", _(\"Public versions\")),\n (\"private\", _(\"Private versions\")),\n )\n\n # Attribute filter fields\n slug = FilteredModelChoiceFilter(\n label=_(\"Version\"),\n empty_label=_(\"All versions\"),\n to_field_name=\"slug\",\n queryset_method=\"get_version_queryset\",\n method=\"get_version\",\n label_attribute=\"verbose_name\",\n )\n\n privacy = ChoiceFilter(\n field_name=\"privacy_level\",\n label=_(\"Privacy\"),\n choices=PRIVACY_CHOICES,\n empty_label=_(\"Any\"),\n )\n # This field looks better as ``visibility=hidden`` than it does\n # ``hidden=true``, otherwise we could use a BooleanFilter instance here\n # instead\n visibility = ChoiceFilter(\n field_name=\"hidden\",\n label=_(\"Visibility\"),\n choices=VISIBILITY_CHOICES,\n method=\"get_visibility\",\n empty_label=_(\"Any\"),\n )\n\n sort = VersionSortOrderingFilter(\n field_name=\"sort\",\n label=_(\"Sort by\"),\n )\n\n def __init__(self, *args, project=None, **kwargs):\n self.project = project\n super().__init__(*args, **kwargs)\n\n def get_version(self, queryset, field_name, version):\n return queryset.filter(slug=version.slug)\n\n def get_version_queryset(self):\n # This query is passed in at instantiation\n return self.queryset\n\n def get_visibility(self, queryset, *, value):\n if value == self.VISIBILITY_HIDDEN:\n return queryset.filter(hidden=True)\n if value == self.VISIBILITY_VISIBLE:\n return queryset.filter(hidden=False)\n return queryset\n", "path": "readthedocs/projects/filters.py"}]}
| 3,103 | 123 |
gh_patches_debug_14998
|
rasdani/github-patches
|
git_diff
|
ManageIQ__integration_tests-8406
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Catalog exists property throws "CandidateNotFound" Exception
When we call <catalog_obj>.exists , it throws "CandidateNotFound" Exception, where as in our test cases we expect a Boolean value "False"
>> https://github.com/ManageIQ/integration_tests/blob/master/cfme/services/catalogs/catalog.py#L119
Steps to Reproduce: <catalog_obj>.exists
Actual Result: Trace-back of "CandidateNotFound" Exception
Expected Result: False
</issue>
<code>
[start of cfme/services/catalogs/catalog.py]
1 import attr
2
3 from widgetastic.utils import Parameter
4 from widgetastic.widget import Text
5 from widgetastic_manageiq import MultiBoxSelect
6 from widgetastic_patternfly import Button, Input
7 from navmazing import NavigateToAttribute, NavigateToSibling
8
9 from cfme.common import Taggable
10 from cfme.modeling.base import BaseCollection, BaseEntity
11 from cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to
12 from cfme.utils.pretty import Pretty
13 from cfme.utils.update import Updateable
14 from cfme.utils.wait import wait_for
15
16 from . import ServicesCatalogView
17
18
19 class CatalogsMultiBoxSelect(MultiBoxSelect):
20 move_into_button = Button(title=Parameter("@move_into"))
21 move_from_button = Button(title=Parameter("@move_from"))
22
23
24 class CatalogForm(ServicesCatalogView):
25 title = Text('#explorer_title_text')
26
27 name = Input(name='name')
28 description = Input(name="description")
29 assign_catalog_items = CatalogsMultiBoxSelect(
30 move_into="Move Selected buttons right",
31 move_from="Move Selected buttons left",
32 available_items="available_fields",
33 chosen_items="selected_fields"
34 )
35
36 save_button = Button('Save')
37 cancel_button = Button('Cancel')
38
39
40 class CatalogsView(ServicesCatalogView):
41 title = Text("#explorer_title_text")
42
43 @property
44 def is_displayed(self):
45 return (
46 self.in_explorer and
47 self.catalogs.is_opened and
48 self.catalogs.tree.currently_selected == ["All Catalogs"])
49
50
51 class DetailsCatalogView(ServicesCatalogView):
52 title = Text("#explorer_title_text")
53
54 @property
55 def is_displayed(self):
56 return (
57 self.in_explorer and self.catalogs.is_opened and
58 self.title.text == 'Catalog "{}"'.format(self.context["object"].name)
59 )
60
61
62 class AddCatalogView(CatalogForm):
63
64 add_button = Button("Add")
65
66 @property
67 def is_displayed(self):
68 return (
69 self.in_explorer and self.catalogs.is_opened and
70 self.title.text == 'Adding a new Catalog'
71 )
72
73
74 class EditCatalogView(CatalogForm):
75
76 save_button = Button('Save')
77 reset_button = Button('Reset')
78
79 @property
80 def is_displayed(self):
81 return (
82 self.in_explorer and self.catalogs.is_opened and
83 self.title.text == 'Editing Catalog "{}"'.format(self.context["object"].name)
84 )
85
86
87 @attr.s
88 class Catalog(BaseEntity, Updateable, Pretty, Taggable):
89
90 name = attr.ib()
91 description = attr.ib()
92 items = attr.ib(default=None)
93
94 def update(self, updates):
95 view = navigate_to(self, 'Edit')
96 changed = view.fill(updates)
97 if changed:
98 view.save_button.click()
99 else:
100 view.cancel_button.click()
101 view = self.create_view(DetailsCatalogView, override=updates, wait='10s')
102 view.flash.assert_no_error()
103 if changed:
104 view.flash.assert_message(
105 'Catalog "{}" was saved'.format(updates.get('name', self.name)))
106 else:
107 view.flash.assert_message(
108 'Edit of Catalog "{}" was cancelled by the user'.format(self.name))
109
110 def delete(self):
111 view = navigate_to(self, "Details")
112 view.configuration.item_select('Remove Catalog', handle_alert=True)
113 view = self.create_view(CatalogsView, wait='10s')
114 view.flash.assert_no_error()
115 view.flash.assert_success_message(
116 'Catalog "{}": Delete successful'.format(self.description or self.name))
117
118 @property
119 def exists(self):
120 try:
121 navigate_to(self, 'Details')
122 return True
123 except NameError:
124 return False
125
126
127 @attr.s
128 class CatalogCollection(BaseCollection):
129 """A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`"""
130 ENTITY = Catalog
131
132 def create(self, name, description, items=None):
133 """Create a catalog.
134
135 Args:
136 name: The name of the catalog
137 description: The description of the catalog
138 items: Items in the catalog
139 """
140 view = navigate_to(self, 'Add')
141 view.fill({
142 'name': name,
143 'description': description,
144 'assign_catalog_items': items
145 })
146 view.add_button.click()
147 catalog = self.instantiate(name=name, description=description, items=items)
148 view = self.create_view(CatalogsView)
149 assert view.is_displayed
150 view.flash.assert_no_error()
151 return catalog
152
153
154 @navigator.register(CatalogCollection)
155 class All(CFMENavigateStep):
156 VIEW = CatalogsView
157 prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
158
159 def step(self):
160 self.prerequisite_view.navigation.select('Services', 'Catalogs')
161 self.view.catalogs.tree.click_path("All Catalogs")
162
163
164 @navigator.register(CatalogCollection)
165 class Add(CFMENavigateStep):
166 VIEW = AddCatalogView
167 prerequisite = NavigateToSibling('All')
168
169 def step(self):
170 self.prerequisite_view.configuration.item_select('Add a New Catalog')
171
172
173 @navigator.register(Catalog)
174 class Details(CFMENavigateStep):
175 VIEW = DetailsCatalogView
176 prerequisite = NavigateToAttribute('parent', 'All')
177
178 def step(self):
179 self.prerequisite_view.catalogs.tree.click_path("All Catalogs", self.obj.name)
180
181
182 @navigator.register(Catalog)
183 class Edit(CFMENavigateStep):
184 VIEW = EditCatalogView
185 prerequisite = NavigateToSibling('Details')
186
187 def step(self):
188 self.prerequisite_view.configuration.item_select('Edit this Item')
189
[end of cfme/services/catalogs/catalog.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py
--- a/cfme/services/catalogs/catalog.py
+++ b/cfme/services/catalogs/catalog.py
@@ -3,7 +3,7 @@
from widgetastic.utils import Parameter
from widgetastic.widget import Text
from widgetastic_manageiq import MultiBoxSelect
-from widgetastic_patternfly import Button, Input
+from widgetastic_patternfly import Button, CandidateNotFound, Input
from navmazing import NavigateToAttribute, NavigateToSibling
from cfme.common import Taggable
@@ -120,7 +120,7 @@
try:
navigate_to(self, 'Details')
return True
- except NameError:
+ except (NameError, CandidateNotFound):
return False
|
{"golden_diff": "diff --git a/cfme/services/catalogs/catalog.py b/cfme/services/catalogs/catalog.py\n--- a/cfme/services/catalogs/catalog.py\n+++ b/cfme/services/catalogs/catalog.py\n@@ -3,7 +3,7 @@\n from widgetastic.utils import Parameter\n from widgetastic.widget import Text\n from widgetastic_manageiq import MultiBoxSelect\n-from widgetastic_patternfly import Button, Input\n+from widgetastic_patternfly import Button, CandidateNotFound, Input\n from navmazing import NavigateToAttribute, NavigateToSibling\n \n from cfme.common import Taggable\n@@ -120,7 +120,7 @@\n try:\n navigate_to(self, 'Details')\n return True\n- except NameError:\n+ except (NameError, CandidateNotFound):\n return False\n", "issue": "Catalog exists property throws \"CandidateNotFound\" Exception\nWhen we call <catalog_obj>.exists , it throws \"CandidateNotFound\" Exception, where as in our test cases we expect a Boolean value \"False\"\r\n>> https://github.com/ManageIQ/integration_tests/blob/master/cfme/services/catalogs/catalog.py#L119 \r\n\r\nSteps to Reproduce: <catalog_obj>.exists\r\nActual Result: Trace-back of \"CandidateNotFound\" Exception\r\nExpected Result: False \n", "before_files": [{"content": "import attr\n\nfrom widgetastic.utils import Parameter\nfrom widgetastic.widget import Text\nfrom widgetastic_manageiq import MultiBoxSelect\nfrom widgetastic_patternfly import Button, Input\nfrom navmazing import NavigateToAttribute, NavigateToSibling\n\nfrom cfme.common import Taggable\nfrom cfme.modeling.base import BaseCollection, BaseEntity\nfrom cfme.utils.appliance.implementations.ui import navigator, CFMENavigateStep, navigate_to\nfrom cfme.utils.pretty import Pretty\nfrom cfme.utils.update import Updateable\nfrom cfme.utils.wait import wait_for\n\nfrom . import ServicesCatalogView\n\n\nclass CatalogsMultiBoxSelect(MultiBoxSelect):\n move_into_button = Button(title=Parameter(\"@move_into\"))\n move_from_button = Button(title=Parameter(\"@move_from\"))\n\n\nclass CatalogForm(ServicesCatalogView):\n title = Text('#explorer_title_text')\n\n name = Input(name='name')\n description = Input(name=\"description\")\n assign_catalog_items = CatalogsMultiBoxSelect(\n move_into=\"Move Selected buttons right\",\n move_from=\"Move Selected buttons left\",\n available_items=\"available_fields\",\n chosen_items=\"selected_fields\"\n )\n\n save_button = Button('Save')\n cancel_button = Button('Cancel')\n\n\nclass CatalogsView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and\n self.catalogs.is_opened and\n self.catalogs.tree.currently_selected == [\"All Catalogs\"])\n\n\nclass DetailsCatalogView(ServicesCatalogView):\n title = Text(\"#explorer_title_text\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\nclass AddCatalogView(CatalogForm):\n\n add_button = Button(\"Add\")\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Adding a new Catalog'\n )\n\n\nclass EditCatalogView(CatalogForm):\n\n save_button = Button('Save')\n reset_button = Button('Reset')\n\n @property\n def is_displayed(self):\n return (\n self.in_explorer and self.catalogs.is_opened and\n self.title.text == 'Editing Catalog \"{}\"'.format(self.context[\"object\"].name)\n )\n\n\[email protected]\nclass Catalog(BaseEntity, Updateable, Pretty, Taggable):\n\n name = attr.ib()\n description = attr.ib()\n items = attr.ib(default=None)\n\n def update(self, updates):\n view = navigate_to(self, 'Edit')\n changed = view.fill(updates)\n if changed:\n view.save_button.click()\n else:\n view.cancel_button.click()\n view = self.create_view(DetailsCatalogView, override=updates, wait='10s')\n view.flash.assert_no_error()\n if changed:\n view.flash.assert_message(\n 'Catalog \"{}\" was saved'.format(updates.get('name', self.name)))\n else:\n view.flash.assert_message(\n 'Edit of Catalog \"{}\" was cancelled by the user'.format(self.name))\n\n def delete(self):\n view = navigate_to(self, \"Details\")\n view.configuration.item_select('Remove Catalog', handle_alert=True)\n view = self.create_view(CatalogsView, wait='10s')\n view.flash.assert_no_error()\n view.flash.assert_success_message(\n 'Catalog \"{}\": Delete successful'.format(self.description or self.name))\n\n @property\n def exists(self):\n try:\n navigate_to(self, 'Details')\n return True\n except NameError:\n return False\n\n\[email protected]\nclass CatalogCollection(BaseCollection):\n \"\"\"A collection for the :py:class:`cfme.services.catalogs.catalog.Catalog`\"\"\"\n ENTITY = Catalog\n\n def create(self, name, description, items=None):\n \"\"\"Create a catalog.\n\n Args:\n name: The name of the catalog\n description: The description of the catalog\n items: Items in the catalog\n \"\"\"\n view = navigate_to(self, 'Add')\n view.fill({\n 'name': name,\n 'description': description,\n 'assign_catalog_items': items\n })\n view.add_button.click()\n catalog = self.instantiate(name=name, description=description, items=items)\n view = self.create_view(CatalogsView)\n assert view.is_displayed\n view.flash.assert_no_error()\n return catalog\n\n\[email protected](CatalogCollection)\nclass All(CFMENavigateStep):\n VIEW = CatalogsView\n prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')\n\n def step(self):\n self.prerequisite_view.navigation.select('Services', 'Catalogs')\n self.view.catalogs.tree.click_path(\"All Catalogs\")\n\n\[email protected](CatalogCollection)\nclass Add(CFMENavigateStep):\n VIEW = AddCatalogView\n prerequisite = NavigateToSibling('All')\n\n def step(self):\n self.prerequisite_view.configuration.item_select('Add a New Catalog')\n\n\[email protected](Catalog)\nclass Details(CFMENavigateStep):\n VIEW = DetailsCatalogView\n prerequisite = NavigateToAttribute('parent', 'All')\n\n def step(self):\n self.prerequisite_view.catalogs.tree.click_path(\"All Catalogs\", self.obj.name)\n\n\[email protected](Catalog)\nclass Edit(CFMENavigateStep):\n VIEW = EditCatalogView\n prerequisite = NavigateToSibling('Details')\n\n def step(self):\n self.prerequisite_view.configuration.item_select('Edit this Item')\n", "path": "cfme/services/catalogs/catalog.py"}]}
| 2,325 | 172 |
gh_patches_debug_33591
|
rasdani/github-patches
|
git_diff
|
litestar-org__litestar-2777
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
refactor: improve typing in CSRF middleware
https://github.com/litestar-org/litestar/blob/7414f7fd7d4782223502895e6a23b77ed635cd2d/litestar/middleware/csrf.py#L87-L127
At line 105, we use `dict.get()` to set the value of `existing_csrf_token` so it can be `None` if the header doesn't exist.
At line 123, that block is guarded by `self._csrf_tokens_match()` which will return `False` if it is `None`, so actually `existing_csrf_token` cannot be falsy in this block, its just that its value is not narrowed appropriately.
Fixing this would probably be as simple as using `request.cookies.get(..., "")` and `request.headers.get(..., "")` on lines 104 and 105 respectively, and re-type downstream methods to only accept `str` instead of `str | None`.
_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/pull/2751#discussion_r1405515256_
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2770">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2770/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2770/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of litestar/middleware/csrf.py]
1 from __future__ import annotations
2
3 import hashlib
4 import hmac
5 import secrets
6 from secrets import compare_digest
7 from typing import TYPE_CHECKING, Any
8
9 from litestar.datastructures import MutableScopeHeaders
10 from litestar.datastructures.cookie import Cookie
11 from litestar.enums import RequestEncodingType, ScopeType
12 from litestar.exceptions import PermissionDeniedException
13 from litestar.middleware._utils import (
14 build_exclude_path_pattern,
15 should_bypass_middleware,
16 )
17 from litestar.middleware.base import MiddlewareProtocol
18 from litestar.utils.scope.state import ScopeState
19
20 if TYPE_CHECKING:
21 from litestar.config.csrf import CSRFConfig
22 from litestar.connection import Request
23 from litestar.types import (
24 ASGIApp,
25 HTTPSendMessage,
26 Message,
27 Receive,
28 Scope,
29 Scopes,
30 Send,
31 )
32
33 __all__ = ("CSRFMiddleware",)
34
35
36 CSRF_SECRET_BYTES = 32
37 CSRF_SECRET_LENGTH = CSRF_SECRET_BYTES * 2
38
39
40 def generate_csrf_hash(token: str, secret: str) -> str:
41 """Generate an HMAC that signs the CSRF token.
42
43 Args:
44 token: A hashed token.
45 secret: A secret value.
46
47 Returns:
48 A CSRF hash.
49 """
50 return hmac.new(secret.encode(), token.encode(), hashlib.sha256).hexdigest()
51
52
53 def generate_csrf_token(secret: str) -> str:
54 """Generate a CSRF token that includes a randomly generated string signed by an HMAC.
55
56 Args:
57 secret: A secret string.
58
59 Returns:
60 A unique CSRF token.
61 """
62 token = secrets.token_hex(CSRF_SECRET_BYTES)
63 token_hash = generate_csrf_hash(token=token, secret=secret)
64 return token + token_hash
65
66
67 class CSRFMiddleware(MiddlewareProtocol):
68 """CSRF Middleware class.
69
70 This Middleware protects against attacks by setting a CSRF cookie with a token and verifying it in request headers.
71 """
72
73 scopes: Scopes = {ScopeType.HTTP}
74
75 def __init__(self, app: ASGIApp, config: CSRFConfig) -> None:
76 """Initialize ``CSRFMiddleware``.
77
78 Args:
79 app: The ``next`` ASGI app to call.
80 config: The CSRFConfig instance.
81 """
82 self.app = app
83 self.config = config
84 self.exclude = build_exclude_path_pattern(exclude=config.exclude)
85
86 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
87 """ASGI callable.
88
89 Args:
90 scope: The ASGI connection scope.
91 receive: The ASGI receive function.
92 send: The ASGI send function.
93
94 Returns:
95 None
96 """
97 if scope["type"] != ScopeType.HTTP:
98 await self.app(scope, receive, send)
99 return
100
101 request: Request[Any, Any, Any] = scope["app"].request_class(scope=scope, receive=receive)
102 content_type, _ = request.content_type
103 csrf_cookie = request.cookies.get(self.config.cookie_name)
104 existing_csrf_token = request.headers.get(self.config.header_name)
105
106 if not existing_csrf_token and content_type in {
107 RequestEncodingType.URL_ENCODED,
108 RequestEncodingType.MULTI_PART,
109 }:
110 form = await request.form()
111 existing_csrf_token = form.get("_csrf_token", None)
112
113 connection_state = ScopeState.from_scope(scope)
114 if request.method in self.config.safe_methods or should_bypass_middleware(
115 scope=scope,
116 scopes=self.scopes,
117 exclude_opt_key=self.config.exclude_from_csrf_key,
118 exclude_path_pattern=self.exclude,
119 ):
120 token = connection_state.csrf_token = csrf_cookie or generate_csrf_token(secret=self.config.secret)
121 await self.app(scope, receive, self.create_send_wrapper(send=send, csrf_cookie=csrf_cookie, token=token))
122 elif self._csrf_tokens_match(existing_csrf_token, csrf_cookie):
123 # we haven't properly narrowed the type of `existing_csrf_token` to be non-None, but we know it is
124 connection_state.csrf_token = existing_csrf_token # type: ignore[assignment]
125 await self.app(scope, receive, send)
126 else:
127 raise PermissionDeniedException("CSRF token verification failed")
128
129 def create_send_wrapper(self, send: Send, token: str, csrf_cookie: str | None) -> Send:
130 """Wrap ``send`` to handle CSRF validation.
131
132 Args:
133 token: The CSRF token.
134 send: The ASGI send function.
135 csrf_cookie: CSRF cookie.
136
137 Returns:
138 An ASGI send function.
139 """
140
141 async def send_wrapper(message: Message) -> None:
142 """Send function that wraps the original send to inject a cookie.
143
144 Args:
145 message: An ASGI ``Message``
146
147 Returns:
148 None
149 """
150 if csrf_cookie is None and message["type"] == "http.response.start":
151 message.setdefault("headers", [])
152 self._set_cookie_if_needed(message=message, token=token)
153 await send(message)
154
155 return send_wrapper
156
157 def _set_cookie_if_needed(self, message: HTTPSendMessage, token: str) -> None:
158 headers = MutableScopeHeaders.from_message(message)
159 cookie = Cookie(
160 key=self.config.cookie_name,
161 value=token,
162 path=self.config.cookie_path,
163 secure=self.config.cookie_secure,
164 httponly=self.config.cookie_httponly,
165 samesite=self.config.cookie_samesite,
166 domain=self.config.cookie_domain,
167 )
168 headers.add("set-cookie", cookie.to_header(header=""))
169
170 def _decode_csrf_token(self, token: str) -> str | None:
171 """Decode a CSRF token and validate its HMAC."""
172 if len(token) < CSRF_SECRET_LENGTH + 1:
173 return None
174
175 token_secret = token[:CSRF_SECRET_LENGTH]
176 existing_hash = token[CSRF_SECRET_LENGTH:]
177 expected_hash = generate_csrf_hash(token=token_secret, secret=self.config.secret)
178 return token_secret if compare_digest(existing_hash, expected_hash) else None
179
180 def _csrf_tokens_match(self, request_csrf_token: str | None, cookie_csrf_token: str | None) -> bool:
181 """Take the CSRF tokens from the request and the cookie and verify both are valid and identical."""
182 if not (request_csrf_token and cookie_csrf_token):
183 return False
184
185 decoded_request_token = self._decode_csrf_token(request_csrf_token)
186 decoded_cookie_token = self._decode_csrf_token(cookie_csrf_token)
187 if decoded_request_token is None or decoded_cookie_token is None:
188 return False
189
190 return compare_digest(decoded_request_token, decoded_cookie_token)
191
[end of litestar/middleware/csrf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/litestar/middleware/csrf.py b/litestar/middleware/csrf.py
--- a/litestar/middleware/csrf.py
+++ b/litestar/middleware/csrf.py
@@ -119,9 +119,12 @@
):
token = connection_state.csrf_token = csrf_cookie or generate_csrf_token(secret=self.config.secret)
await self.app(scope, receive, self.create_send_wrapper(send=send, csrf_cookie=csrf_cookie, token=token))
- elif self._csrf_tokens_match(existing_csrf_token, csrf_cookie):
- # we haven't properly narrowed the type of `existing_csrf_token` to be non-None, but we know it is
- connection_state.csrf_token = existing_csrf_token # type: ignore[assignment]
+ elif (
+ existing_csrf_token is not None
+ and csrf_cookie is not None
+ and self._csrf_tokens_match(existing_csrf_token, csrf_cookie)
+ ):
+ connection_state.csrf_token = existing_csrf_token
await self.app(scope, receive, send)
else:
raise PermissionDeniedException("CSRF token verification failed")
@@ -177,11 +180,8 @@
expected_hash = generate_csrf_hash(token=token_secret, secret=self.config.secret)
return token_secret if compare_digest(existing_hash, expected_hash) else None
- def _csrf_tokens_match(self, request_csrf_token: str | None, cookie_csrf_token: str | None) -> bool:
+ def _csrf_tokens_match(self, request_csrf_token: str, cookie_csrf_token: str) -> bool:
"""Take the CSRF tokens from the request and the cookie and verify both are valid and identical."""
- if not (request_csrf_token and cookie_csrf_token):
- return False
-
decoded_request_token = self._decode_csrf_token(request_csrf_token)
decoded_cookie_token = self._decode_csrf_token(cookie_csrf_token)
if decoded_request_token is None or decoded_cookie_token is None:
|
{"golden_diff": "diff --git a/litestar/middleware/csrf.py b/litestar/middleware/csrf.py\n--- a/litestar/middleware/csrf.py\n+++ b/litestar/middleware/csrf.py\n@@ -119,9 +119,12 @@\n ):\n token = connection_state.csrf_token = csrf_cookie or generate_csrf_token(secret=self.config.secret)\n await self.app(scope, receive, self.create_send_wrapper(send=send, csrf_cookie=csrf_cookie, token=token))\n- elif self._csrf_tokens_match(existing_csrf_token, csrf_cookie):\n- # we haven't properly narrowed the type of `existing_csrf_token` to be non-None, but we know it is\n- connection_state.csrf_token = existing_csrf_token # type: ignore[assignment]\n+ elif (\n+ existing_csrf_token is not None\n+ and csrf_cookie is not None\n+ and self._csrf_tokens_match(existing_csrf_token, csrf_cookie)\n+ ):\n+ connection_state.csrf_token = existing_csrf_token\n await self.app(scope, receive, send)\n else:\n raise PermissionDeniedException(\"CSRF token verification failed\")\n@@ -177,11 +180,8 @@\n expected_hash = generate_csrf_hash(token=token_secret, secret=self.config.secret)\n return token_secret if compare_digest(existing_hash, expected_hash) else None\n \n- def _csrf_tokens_match(self, request_csrf_token: str | None, cookie_csrf_token: str | None) -> bool:\n+ def _csrf_tokens_match(self, request_csrf_token: str, cookie_csrf_token: str) -> bool:\n \"\"\"Take the CSRF tokens from the request and the cookie and verify both are valid and identical.\"\"\"\n- if not (request_csrf_token and cookie_csrf_token):\n- return False\n-\n decoded_request_token = self._decode_csrf_token(request_csrf_token)\n decoded_cookie_token = self._decode_csrf_token(cookie_csrf_token)\n if decoded_request_token is None or decoded_cookie_token is None:\n", "issue": "refactor: improve typing in CSRF middleware\nhttps://github.com/litestar-org/litestar/blob/7414f7fd7d4782223502895e6a23b77ed635cd2d/litestar/middleware/csrf.py#L87-L127\r\n\r\nAt line 105, we use `dict.get()` to set the value of `existing_csrf_token` so it can be `None` if the header doesn't exist.\r\n\r\nAt line 123, that block is guarded by `self._csrf_tokens_match()` which will return `False` if it is `None`, so actually `existing_csrf_token` cannot be falsy in this block, its just that its value is not narrowed appropriately.\r\n\r\nFixing this would probably be as simple as using `request.cookies.get(..., \"\")` and `request.headers.get(..., \"\")` on lines 104 and 105 respectively, and re-type downstream methods to only accept `str` instead of `str | None`.\r\n\r\n_Originally posted by @peterschutt in https://github.com/litestar-org/litestar/pull/2751#discussion_r1405515256_\r\n \n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2770\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2770/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2770/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport hashlib\nimport hmac\nimport secrets\nfrom secrets import compare_digest\nfrom typing import TYPE_CHECKING, Any\n\nfrom litestar.datastructures import MutableScopeHeaders\nfrom litestar.datastructures.cookie import Cookie\nfrom litestar.enums import RequestEncodingType, ScopeType\nfrom litestar.exceptions import PermissionDeniedException\nfrom litestar.middleware._utils import (\n build_exclude_path_pattern,\n should_bypass_middleware,\n)\nfrom litestar.middleware.base import MiddlewareProtocol\nfrom litestar.utils.scope.state import ScopeState\n\nif TYPE_CHECKING:\n from litestar.config.csrf import CSRFConfig\n from litestar.connection import Request\n from litestar.types import (\n ASGIApp,\n HTTPSendMessage,\n Message,\n Receive,\n Scope,\n Scopes,\n Send,\n )\n\n__all__ = (\"CSRFMiddleware\",)\n\n\nCSRF_SECRET_BYTES = 32\nCSRF_SECRET_LENGTH = CSRF_SECRET_BYTES * 2\n\n\ndef generate_csrf_hash(token: str, secret: str) -> str:\n \"\"\"Generate an HMAC that signs the CSRF token.\n\n Args:\n token: A hashed token.\n secret: A secret value.\n\n Returns:\n A CSRF hash.\n \"\"\"\n return hmac.new(secret.encode(), token.encode(), hashlib.sha256).hexdigest()\n\n\ndef generate_csrf_token(secret: str) -> str:\n \"\"\"Generate a CSRF token that includes a randomly generated string signed by an HMAC.\n\n Args:\n secret: A secret string.\n\n Returns:\n A unique CSRF token.\n \"\"\"\n token = secrets.token_hex(CSRF_SECRET_BYTES)\n token_hash = generate_csrf_hash(token=token, secret=secret)\n return token + token_hash\n\n\nclass CSRFMiddleware(MiddlewareProtocol):\n \"\"\"CSRF Middleware class.\n\n This Middleware protects against attacks by setting a CSRF cookie with a token and verifying it in request headers.\n \"\"\"\n\n scopes: Scopes = {ScopeType.HTTP}\n\n def __init__(self, app: ASGIApp, config: CSRFConfig) -> None:\n \"\"\"Initialize ``CSRFMiddleware``.\n\n Args:\n app: The ``next`` ASGI app to call.\n config: The CSRFConfig instance.\n \"\"\"\n self.app = app\n self.config = config\n self.exclude = build_exclude_path_pattern(exclude=config.exclude)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"ASGI callable.\n\n Args:\n scope: The ASGI connection scope.\n receive: The ASGI receive function.\n send: The ASGI send function.\n\n Returns:\n None\n \"\"\"\n if scope[\"type\"] != ScopeType.HTTP:\n await self.app(scope, receive, send)\n return\n\n request: Request[Any, Any, Any] = scope[\"app\"].request_class(scope=scope, receive=receive)\n content_type, _ = request.content_type\n csrf_cookie = request.cookies.get(self.config.cookie_name)\n existing_csrf_token = request.headers.get(self.config.header_name)\n\n if not existing_csrf_token and content_type in {\n RequestEncodingType.URL_ENCODED,\n RequestEncodingType.MULTI_PART,\n }:\n form = await request.form()\n existing_csrf_token = form.get(\"_csrf_token\", None)\n\n connection_state = ScopeState.from_scope(scope)\n if request.method in self.config.safe_methods or should_bypass_middleware(\n scope=scope,\n scopes=self.scopes,\n exclude_opt_key=self.config.exclude_from_csrf_key,\n exclude_path_pattern=self.exclude,\n ):\n token = connection_state.csrf_token = csrf_cookie or generate_csrf_token(secret=self.config.secret)\n await self.app(scope, receive, self.create_send_wrapper(send=send, csrf_cookie=csrf_cookie, token=token))\n elif self._csrf_tokens_match(existing_csrf_token, csrf_cookie):\n # we haven't properly narrowed the type of `existing_csrf_token` to be non-None, but we know it is\n connection_state.csrf_token = existing_csrf_token # type: ignore[assignment]\n await self.app(scope, receive, send)\n else:\n raise PermissionDeniedException(\"CSRF token verification failed\")\n\n def create_send_wrapper(self, send: Send, token: str, csrf_cookie: str | None) -> Send:\n \"\"\"Wrap ``send`` to handle CSRF validation.\n\n Args:\n token: The CSRF token.\n send: The ASGI send function.\n csrf_cookie: CSRF cookie.\n\n Returns:\n An ASGI send function.\n \"\"\"\n\n async def send_wrapper(message: Message) -> None:\n \"\"\"Send function that wraps the original send to inject a cookie.\n\n Args:\n message: An ASGI ``Message``\n\n Returns:\n None\n \"\"\"\n if csrf_cookie is None and message[\"type\"] == \"http.response.start\":\n message.setdefault(\"headers\", [])\n self._set_cookie_if_needed(message=message, token=token)\n await send(message)\n\n return send_wrapper\n\n def _set_cookie_if_needed(self, message: HTTPSendMessage, token: str) -> None:\n headers = MutableScopeHeaders.from_message(message)\n cookie = Cookie(\n key=self.config.cookie_name,\n value=token,\n path=self.config.cookie_path,\n secure=self.config.cookie_secure,\n httponly=self.config.cookie_httponly,\n samesite=self.config.cookie_samesite,\n domain=self.config.cookie_domain,\n )\n headers.add(\"set-cookie\", cookie.to_header(header=\"\"))\n\n def _decode_csrf_token(self, token: str) -> str | None:\n \"\"\"Decode a CSRF token and validate its HMAC.\"\"\"\n if len(token) < CSRF_SECRET_LENGTH + 1:\n return None\n\n token_secret = token[:CSRF_SECRET_LENGTH]\n existing_hash = token[CSRF_SECRET_LENGTH:]\n expected_hash = generate_csrf_hash(token=token_secret, secret=self.config.secret)\n return token_secret if compare_digest(existing_hash, expected_hash) else None\n\n def _csrf_tokens_match(self, request_csrf_token: str | None, cookie_csrf_token: str | None) -> bool:\n \"\"\"Take the CSRF tokens from the request and the cookie and verify both are valid and identical.\"\"\"\n if not (request_csrf_token and cookie_csrf_token):\n return False\n\n decoded_request_token = self._decode_csrf_token(request_csrf_token)\n decoded_cookie_token = self._decode_csrf_token(cookie_csrf_token)\n if decoded_request_token is None or decoded_cookie_token is None:\n return False\n\n return compare_digest(decoded_request_token, decoded_cookie_token)\n", "path": "litestar/middleware/csrf.py"}]}
| 2,990 | 442 |
gh_patches_debug_57271
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'async for' requires an object with __aiter__ method, got AIOTracedCursor
## Problem
Using ddtrace and aiopg, if I do:
```python
await cur.execute(query)
async for value in cur:
yield value
```
If my connection is not patched, I get:
```
TypeError: 'async for' requires an object with __aiter__ method, got AIOTracedCursor
(...)
File "path/to/my/file.py", line 241, in get_many
async for value in cur:
```
(if my connection is not patched, it works)
## Analysis
The cursor class is replaced with `AIOTracedCursor` which inherits `wrapt.ObjectProxy`.
Problem is, while thanks to `ObjectProxy`, `AIOTracedCursor().__aiter__()` would most probably work and return whatever the real proxy would return, this is not enough for Python to accept that the cursor is an iterator.
A small example with simple objects:
```python
class A():
def iter(self):
return iter([])
async def aiter(self):
return iter([])
def __getattr__(self, attr):
if attr.endswith("iter__"):
return getattr(self, attr.strip("_"))
a = A()
```
We implement `a.__iter__()` and `a.__aiter__()` but Python doesn't see it:
```
In [6]: a.__iter__()
Out[6]: <list_iterator at 0x7fdff00de860>
In [7]: a.__aiter__()
Out[7]: <coroutine object A.aiter at 0x7fdff00ddba0>
In [8]: async for e in a: print(e)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
cell_name in async-def-wrapper()
TypeError: 'async for' requires an object with __aiter__ method, got A
In [9]: iter(a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-9-2b64cb055077> in <module>
----> 1 iter(a)
TypeError: 'A' object is not iterable
```
</issue>
<code>
[start of ddtrace/contrib/aiopg/connection.py]
1 import asyncio
2 from ddtrace.vendor import wrapt
3
4 from aiopg.utils import _ContextManager
5
6 from .. import dbapi
7 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
8 from ...ext import sql, AppTypes
9 from ...pin import Pin
10 from ...settings import config
11
12
13 class AIOTracedCursor(wrapt.ObjectProxy):
14 """ TracedCursor wraps a psql cursor and traces its queries. """
15
16 def __init__(self, cursor, pin):
17 super(AIOTracedCursor, self).__init__(cursor)
18 pin.onto(self)
19 name = pin.app or 'sql'
20 self._datadog_name = '%s.query' % name
21
22 @asyncio.coroutine
23 def _trace_method(self, method, resource, extra_tags, *args, **kwargs):
24 pin = Pin.get_from(self)
25 if not pin or not pin.enabled():
26 result = yield from method(*args, **kwargs)
27 return result
28 service = pin.service
29
30 with pin.tracer.trace(self._datadog_name, service=service,
31 resource=resource) as s:
32 s.span_type = sql.TYPE
33 s.set_tag(sql.QUERY, resource)
34 s.set_tags(pin.tags)
35 s.set_tags(extra_tags)
36
37 # set analytics sample rate
38 s.set_tag(
39 ANALYTICS_SAMPLE_RATE_KEY,
40 config.aiopg.get_analytics_sample_rate()
41 )
42
43 try:
44 result = yield from method(*args, **kwargs)
45 return result
46 finally:
47 s.set_metric('db.rowcount', self.rowcount)
48
49 @asyncio.coroutine
50 def executemany(self, query, *args, **kwargs):
51 # FIXME[matt] properly handle kwargs here. arg names can be different
52 # with different libs.
53 result = yield from self._trace_method(
54 self.__wrapped__.executemany, query, {'sql.executemany': 'true'},
55 query, *args, **kwargs)
56 return result
57
58 @asyncio.coroutine
59 def execute(self, query, *args, **kwargs):
60 result = yield from self._trace_method(
61 self.__wrapped__.execute, query, {}, query, *args, **kwargs)
62 return result
63
64 @asyncio.coroutine
65 def callproc(self, proc, args):
66 result = yield from self._trace_method(
67 self.__wrapped__.callproc, proc, {}, proc, args)
68 return result
69
70
71 class AIOTracedConnection(wrapt.ObjectProxy):
72 """ TracedConnection wraps a Connection with tracing code. """
73
74 def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor):
75 super(AIOTracedConnection, self).__init__(conn)
76 name = dbapi._get_vendor(conn)
77 db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)
78 db_pin.onto(self)
79 # wrapt requires prefix of `_self` for attributes that are only in the
80 # proxy (since some of our source objects will use `__slots__`)
81 self._self_cursor_cls = cursor_cls
82
83 def cursor(self, *args, **kwargs):
84 # unfortunately we also need to patch this method as otherwise "self"
85 # ends up being the aiopg connection object
86 coro = self._cursor(*args, **kwargs)
87 return _ContextManager(coro)
88
89 @asyncio.coroutine
90 def _cursor(self, *args, **kwargs):
91 cursor = yield from self.__wrapped__._cursor(*args, **kwargs)
92 pin = Pin.get_from(self)
93 if not pin:
94 return cursor
95 return self._self_cursor_cls(cursor, pin)
96
[end of ddtrace/contrib/aiopg/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py
--- a/ddtrace/contrib/aiopg/connection.py
+++ b/ddtrace/contrib/aiopg/connection.py
@@ -67,6 +67,9 @@
self.__wrapped__.callproc, proc, {}, proc, args)
return result
+ def __aiter__(self):
+ return self.__wrapped__.__aiter__()
+
class AIOTracedConnection(wrapt.ObjectProxy):
""" TracedConnection wraps a Connection with tracing code. """
|
{"golden_diff": "diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py\n--- a/ddtrace/contrib/aiopg/connection.py\n+++ b/ddtrace/contrib/aiopg/connection.py\n@@ -67,6 +67,9 @@\n self.__wrapped__.callproc, proc, {}, proc, args)\n return result\n \n+ def __aiter__(self):\n+ return self.__wrapped__.__aiter__()\n+\n \n class AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n", "issue": "'async for' requires an object with __aiter__ method, got AIOTracedCursor\n## Problem\r\nUsing ddtrace and aiopg, if I do:\r\n\r\n```python\r\nawait cur.execute(query)\r\nasync for value in cur:\r\n yield value\r\n```\r\nIf my connection is not patched, I get:\r\n```\r\nTypeError: 'async for' requires an object with __aiter__ method, got AIOTracedCursor\r\n(...)\r\n File \"path/to/my/file.py\", line 241, in get_many\r\n async for value in cur:\r\n```\r\n(if my connection is not patched, it works)\r\n\r\n## Analysis\r\n\r\nThe cursor class is replaced with `AIOTracedCursor` which inherits `wrapt.ObjectProxy`.\r\n\r\nProblem is, while thanks to `ObjectProxy`, `AIOTracedCursor().__aiter__()` would most probably work and return whatever the real proxy would return, this is not enough for Python to accept that the cursor is an iterator.\r\n\r\nA small example with simple objects:\r\n```python\r\nclass A():\r\n def iter(self):\r\n return iter([])\r\n\r\n async def aiter(self):\r\n return iter([])\r\n\r\n def __getattr__(self, attr):\r\n if attr.endswith(\"iter__\"):\r\n return getattr(self, attr.strip(\"_\"))\r\na = A()\r\n```\r\nWe implement `a.__iter__()` and `a.__aiter__()` but Python doesn't see it:\r\n```\r\nIn [6]: a.__iter__() \r\nOut[6]: <list_iterator at 0x7fdff00de860>\r\n\r\nIn [7]: a.__aiter__() \r\nOut[7]: <coroutine object A.aiter at 0x7fdff00ddba0>\r\n\r\nIn [8]: async for e in a: print(e) \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\ncell_name in async-def-wrapper()\r\n\r\nTypeError: 'async for' requires an object with __aiter__ method, got A \r\n\r\nIn [9]: iter(a) \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-9-2b64cb055077> in <module>\r\n----> 1 iter(a)\r\n\r\nTypeError: 'A' object is not iterable\r\n\r\n```\n", "before_files": [{"content": "import asyncio\nfrom ddtrace.vendor import wrapt\n\nfrom aiopg.utils import _ContextManager\n\nfrom .. import dbapi\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...ext import sql, AppTypes\nfrom ...pin import Pin\nfrom ...settings import config\n\n\nclass AIOTracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces its queries. \"\"\"\n\n def __init__(self, cursor, pin):\n super(AIOTracedCursor, self).__init__(cursor)\n pin.onto(self)\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n @asyncio.coroutine\n def _trace_method(self, method, resource, extra_tags, *args, **kwargs):\n pin = Pin.get_from(self)\n if not pin or not pin.enabled():\n result = yield from method(*args, **kwargs)\n return result\n service = pin.service\n\n with pin.tracer.trace(self._datadog_name, service=service,\n resource=resource) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, resource)\n s.set_tags(pin.tags)\n s.set_tags(extra_tags)\n\n # set analytics sample rate\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.aiopg.get_analytics_sample_rate()\n )\n\n try:\n result = yield from method(*args, **kwargs)\n return result\n finally:\n s.set_metric('db.rowcount', self.rowcount)\n\n @asyncio.coroutine\n def executemany(self, query, *args, **kwargs):\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n result = yield from self._trace_method(\n self.__wrapped__.executemany, query, {'sql.executemany': 'true'},\n query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def execute(self, query, *args, **kwargs):\n result = yield from self._trace_method(\n self.__wrapped__.execute, query, {}, query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def callproc(self, proc, args):\n result = yield from self._trace_method(\n self.__wrapped__.callproc, proc, {}, proc, args)\n return result\n\n\nclass AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor):\n super(AIOTracedConnection, self).__init__(conn)\n name = dbapi._get_vendor(conn)\n db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)\n db_pin.onto(self)\n # wrapt requires prefix of `_self` for attributes that are only in the\n # proxy (since some of our source objects will use `__slots__`)\n self._self_cursor_cls = cursor_cls\n\n def cursor(self, *args, **kwargs):\n # unfortunately we also need to patch this method as otherwise \"self\"\n # ends up being the aiopg connection object\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n @asyncio.coroutine\n def _cursor(self, *args, **kwargs):\n cursor = yield from self.__wrapped__._cursor(*args, **kwargs)\n pin = Pin.get_from(self)\n if not pin:\n return cursor\n return self._self_cursor_cls(cursor, pin)\n", "path": "ddtrace/contrib/aiopg/connection.py"}]}
| 2,016 | 129 |
gh_patches_debug_29199
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1361
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IN-AP has changed its data url
The new link is https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx (same page layout I think). Old link returns 404.
</issue>
<code>
[start of parsers/IN_AP.py]
1 #!/usr/bin/env python3
2
3 from requests import Session
4 from .lib import zonekey, IN, web
5
6
7 def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
8 """Fetch Andhra Pradesh production"""
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 zonekey.assert_zone_key(zone_key, 'IN-AP')
13
14 html = web.get_response_soup(zone_key,
15 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
16 india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
17
18 hydro_value = IN.read_value_from_span_id(html, 'lblHydel')
19 gas_value = IN.read_value_from_span_id(html, 'lblGas')
20 wind_value = IN.read_value_from_span_id(html, 'lblWind')
21 solar_value = IN.read_value_from_span_id(html, 'lblSolar')
22
23 # All thermal centrals are considered coal based production
24 # https://en.wikipedia.org/wiki/Power_sector_of_Andhra_Pradesh
25 thermal_value = IN.read_value_from_span_id(html, 'lblThermal')
26
27 cgs_value = IN.read_value_from_span_id(html, 'lblCGS')
28 ipp_value = IN.read_value_from_span_id(html, 'lblIPPS')
29
30 data = {
31 'zoneKey': zone_key,
32 'datetime': india_date.datetime,
33 'production': {
34 'biomass': 0.0,
35 'coal': thermal_value,
36 'gas': gas_value,
37 'hydro': hydro_value,
38 'nuclear': 0.0,
39 'oil': 0.0,
40 'solar': solar_value,
41 'wind': wind_value,
42 'geothermal': 0.0,
43 'unknown': round(cgs_value + ipp_value, 2)
44 },
45 'storage': {
46 'hydro': 0.0
47 },
48 'source': 'core.ap.gov.in',
49 }
50
51 return data
52
53
54 def fetch_consumption(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
55 """Fetch Andhra Pradesh consumption"""
56 if target_datetime:
57 raise NotImplementedError('This parser is not yet able to parse past dates')
58
59 zonekey.assert_zone_key(zone_key, 'IN-AP')
60
61 html = web.get_response_soup(zone_key,
62 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
63 india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
64
65 demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')
66
67 data = {
68 'zoneKey': zone_key,
69 'datetime': india_date.datetime,
70 'consumption': demand_value,
71 'source': 'core.ap.gov.in'
72 }
73
74 return data
75
76
77 if __name__ == '__main__':
78 session = Session()
79 print(fetch_production('IN-AP', session))
80 print(fetch_consumption('IN-AP', session))
81
[end of parsers/IN_AP.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/IN_AP.py b/parsers/IN_AP.py
--- a/parsers/IN_AP.py
+++ b/parsers/IN_AP.py
@@ -3,7 +3,6 @@
from requests import Session
from .lib import zonekey, IN, web
-
def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
"""Fetch Andhra Pradesh production"""
if target_datetime:
@@ -12,7 +11,7 @@
zonekey.assert_zone_key(zone_key, 'IN-AP')
html = web.get_response_soup(zone_key,
- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
hydro_value = IN.read_value_from_span_id(html, 'lblHydel')
@@ -59,7 +58,7 @@
zonekey.assert_zone_key(zone_key, 'IN-AP')
html = web.get_response_soup(zone_key,
- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')
|
{"golden_diff": "diff --git a/parsers/IN_AP.py b/parsers/IN_AP.py\n--- a/parsers/IN_AP.py\n+++ b/parsers/IN_AP.py\n@@ -3,7 +3,6 @@\n from requests import Session\n from .lib import zonekey, IN, web\n \n-\n def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh production\"\"\"\n if target_datetime:\n@@ -12,7 +11,7 @@\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n \n html = web.get_response_soup(zone_key,\n- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n \n hydro_value = IN.read_value_from_span_id(html, 'lblHydel')\n@@ -59,7 +58,7 @@\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n \n html = web.get_response_soup(zone_key,\n- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n \n demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')\n", "issue": "IN-AP has changed its data url\nThe new link is https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx (same page layout I think). Old link returns 404.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom requests import Session\nfrom .lib import zonekey, IN, web\n\n\ndef fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh production\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n\n html = web.get_response_soup(zone_key,\n 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n\n hydro_value = IN.read_value_from_span_id(html, 'lblHydel')\n gas_value = IN.read_value_from_span_id(html, 'lblGas')\n wind_value = IN.read_value_from_span_id(html, 'lblWind')\n solar_value = IN.read_value_from_span_id(html, 'lblSolar')\n\n # All thermal centrals are considered coal based production\n # https://en.wikipedia.org/wiki/Power_sector_of_Andhra_Pradesh\n thermal_value = IN.read_value_from_span_id(html, 'lblThermal')\n\n cgs_value = IN.read_value_from_span_id(html, 'lblCGS')\n ipp_value = IN.read_value_from_span_id(html, 'lblIPPS')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': india_date.datetime,\n 'production': {\n 'biomass': 0.0,\n 'coal': thermal_value,\n 'gas': gas_value,\n 'hydro': hydro_value,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': solar_value,\n 'wind': wind_value,\n 'geothermal': 0.0,\n 'unknown': round(cgs_value + ipp_value, 2)\n },\n 'storage': {\n 'hydro': 0.0\n },\n 'source': 'core.ap.gov.in',\n }\n\n return data\n\n\ndef fetch_consumption(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh consumption\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n\n html = web.get_response_soup(zone_key,\n 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n\n demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': india_date.datetime,\n 'consumption': demand_value,\n 'source': 'core.ap.gov.in'\n }\n\n return data\n\n\nif __name__ == '__main__':\n session = Session()\n print(fetch_production('IN-AP', session))\n print(fetch_consumption('IN-AP', session))\n", "path": "parsers/IN_AP.py"}]}
| 1,418 | 357 |
gh_patches_debug_16923
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-1130
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary assignment on `HOST_WEBMAIL`
We came across another piece of garbage:
https://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L19
https://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L22
</issue>
<code>
[start of core/nginx/config.py]
1 #!/usr/bin/python3
2
3 import os
4 import logging as log
5 import sys
6 from socrate import system, conf
7
8 args = os.environ.copy()
9
10 log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
11
12 # Get the first DNS server
13 with open("/etc/resolv.conf") as handle:
14 content = handle.read().split()
15 args["RESOLVER"] = content[content.index("nameserver") + 1]
16
17 args["ADMIN_ADDRESS"] = system.resolve_address(args.get("HOST_ADMIN", "admin"))
18 args["ANTISPAM_ADDRESS"] = system.resolve_address(args.get("HOST_ANTISPAM", "antispam:11334"))
19 args["HOST_WEBMAIL"] = args.get("HOST_WEBMAIL", "webmail")
20 if args["WEBMAIL"] != "none":
21 args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL"))
22 args["HOST_WEBDAV"] = args.get("HOST_WEBDAV", "webdav:5232")
23 if args["WEBDAV"] != "none":
24 args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV"))
25
26 # TLS configuration
27 cert_name = os.getenv("TLS_CERT_FILENAME", default="cert.pem")
28 keypair_name = os.getenv("TLS_KEYPAIR_FILENAME", default="key.pem")
29 args["TLS"] = {
30 "cert": ("/certs/%s" % cert_name, "/certs/%s" % keypair_name),
31 "letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
32 "/certs/letsencrypt/live/mailu/privkey.pem"),
33 "mail": ("/certs/%s" % cert_name, "/certs/%s" % keypair_name),
34 "mail-letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
35 "/certs/letsencrypt/live/mailu/privkey.pem"),
36 "notls": None
37 }[args["TLS_FLAVOR"]]
38
39 if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
40 print("Missing cert or key file, disabling TLS")
41 args["TLS_ERROR"] = "yes"
42
43 # Build final configuration paths
44 conf.jinja("/conf/tls.conf", args, "/etc/nginx/tls.conf")
45 conf.jinja("/conf/proxy.conf", args, "/etc/nginx/proxy.conf")
46 conf.jinja("/conf/nginx.conf", args, "/etc/nginx/nginx.conf")
47 if os.path.exists("/var/run/nginx.pid"):
48 os.system("nginx -s reload")
49
[end of core/nginx/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/nginx/config.py b/core/nginx/config.py
--- a/core/nginx/config.py
+++ b/core/nginx/config.py
@@ -16,12 +16,10 @@
args["ADMIN_ADDRESS"] = system.resolve_address(args.get("HOST_ADMIN", "admin"))
args["ANTISPAM_ADDRESS"] = system.resolve_address(args.get("HOST_ANTISPAM", "antispam:11334"))
-args["HOST_WEBMAIL"] = args.get("HOST_WEBMAIL", "webmail")
if args["WEBMAIL"] != "none":
- args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL"))
-args["HOST_WEBDAV"] = args.get("HOST_WEBDAV", "webdav:5232")
+ args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL", "webmail"))
if args["WEBDAV"] != "none":
- args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV"))
+ args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV", "webdav:5232"))
# TLS configuration
cert_name = os.getenv("TLS_CERT_FILENAME", default="cert.pem")
|
{"golden_diff": "diff --git a/core/nginx/config.py b/core/nginx/config.py\n--- a/core/nginx/config.py\n+++ b/core/nginx/config.py\n@@ -16,12 +16,10 @@\n \n args[\"ADMIN_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ADMIN\", \"admin\"))\n args[\"ANTISPAM_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\n-args[\"HOST_WEBMAIL\"] = args.get(\"HOST_WEBMAIL\", \"webmail\")\n if args[\"WEBMAIL\"] != \"none\":\n- args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\"))\n-args[\"HOST_WEBDAV\"] = args.get(\"HOST_WEBDAV\", \"webdav:5232\")\n+ args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\", \"webmail\"))\n if args[\"WEBDAV\"] != \"none\":\n- args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\"))\n+ args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\", \"webdav:5232\"))\n \n # TLS configuration\n cert_name = os.getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\")\n", "issue": "Unnecessary assignment on `HOST_WEBMAIL`\nWe came across another piece of garbage:\r\n\r\nhttps://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L19\r\n\r\nhttps://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L22\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\nfrom socrate import system, conf\n\nargs = os.environ.copy()\n\nlog.basicConfig(stream=sys.stderr, level=args.get(\"LOG_LEVEL\", \"WARNING\"))\n\n# Get the first DNS server\nwith open(\"/etc/resolv.conf\") as handle:\n content = handle.read().split()\n args[\"RESOLVER\"] = content[content.index(\"nameserver\") + 1]\n\nargs[\"ADMIN_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ADMIN\", \"admin\"))\nargs[\"ANTISPAM_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\nargs[\"HOST_WEBMAIL\"] = args.get(\"HOST_WEBMAIL\", \"webmail\")\nif args[\"WEBMAIL\"] != \"none\":\n args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\"))\nargs[\"HOST_WEBDAV\"] = args.get(\"HOST_WEBDAV\", \"webdav:5232\")\nif args[\"WEBDAV\"] != \"none\":\n args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\"))\n\n# TLS configuration\ncert_name = os.getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\")\nkeypair_name = os.getenv(\"TLS_KEYPAIR_FILENAME\", default=\"key.pem\")\nargs[\"TLS\"] = {\n \"cert\": (\"/certs/%s\" % cert_name, \"/certs/%s\" % keypair_name),\n \"letsencrypt\": (\"/certs/letsencrypt/live/mailu/fullchain.pem\",\n \"/certs/letsencrypt/live/mailu/privkey.pem\"),\n \"mail\": (\"/certs/%s\" % cert_name, \"/certs/%s\" % keypair_name),\n \"mail-letsencrypt\": (\"/certs/letsencrypt/live/mailu/fullchain.pem\",\n \"/certs/letsencrypt/live/mailu/privkey.pem\"),\n \"notls\": None\n}[args[\"TLS_FLAVOR\"]]\n\nif args[\"TLS\"] and not all(os.path.exists(file_path) for file_path in args[\"TLS\"]):\n print(\"Missing cert or key file, disabling TLS\")\n args[\"TLS_ERROR\"] = \"yes\"\n\n# Build final configuration paths\nconf.jinja(\"/conf/tls.conf\", args, \"/etc/nginx/tls.conf\")\nconf.jinja(\"/conf/proxy.conf\", args, \"/etc/nginx/proxy.conf\")\nconf.jinja(\"/conf/nginx.conf\", args, \"/etc/nginx/nginx.conf\")\nif os.path.exists(\"/var/run/nginx.pid\"):\n os.system(\"nginx -s reload\")\n", "path": "core/nginx/config.py"}]}
| 1,302 | 272 |
gh_patches_debug_15711
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6087
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete a TP from an old style project and the Project page stays cached
1. Create a new TP
2. TP is available
3. Delete TP
4. Project page still shows project listed - though it should be gone
5. Going to supposedly deleted TP and we get 404
We're not expiring cache when a TP is deleted.
</issue>
<code>
[start of pootle/apps/pootle_revision/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save, pre_delete
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import revision_updater
13 from pootle_app.models import Directory
14 from pootle_data.models import StoreData
15 from pootle_store.models import Store
16
17
18 @receiver(post_save, sender=StoreData)
19 def handle_storedata_save(**kwargs):
20 revision_updater.get(Store)(
21 context=kwargs["instance"].store).update(keys=["stats", "checks"])
22
23
24 @receiver(post_save, sender=Directory)
25 def handle_directory_save(**kwargs):
26 context = (
27 kwargs["instance"].parent
28 if kwargs.get("created")
29 else kwargs["instance"])
30 revision_updater.get(Directory)(
31 context=context).update(keys=["stats", "checks"])
32
33
34 @receiver(pre_delete, sender=Directory)
35 def handle_directory_delete(**kwargs):
36 revision_updater.get(Directory)(
37 context=kwargs["instance"].parent).update(keys=["stats", "checks"])
38
[end of pootle/apps/pootle_revision/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py
--- a/pootle/apps/pootle_revision/receivers.py
+++ b/pootle/apps/pootle_revision/receivers.py
@@ -13,6 +13,7 @@
from pootle_app.models import Directory
from pootle_data.models import StoreData
from pootle_store.models import Store
+from pootle_translationproject.models import TranslationProject
@receiver(post_save, sender=StoreData)
@@ -35,3 +36,9 @@
def handle_directory_delete(**kwargs):
revision_updater.get(Directory)(
context=kwargs["instance"].parent).update(keys=["stats", "checks"])
+
+
+@receiver(pre_delete, sender=TranslationProject)
+def handle_tp_delete(**kwargs):
+ revision_updater.get(Directory)(
+ context=kwargs["instance"].directory).update(keys=["stats", "checks"])
|
{"golden_diff": "diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py\n--- a/pootle/apps/pootle_revision/receivers.py\n+++ b/pootle/apps/pootle_revision/receivers.py\n@@ -13,6 +13,7 @@\n from pootle_app.models import Directory\n from pootle_data.models import StoreData\n from pootle_store.models import Store\n+from pootle_translationproject.models import TranslationProject\n \n \n @receiver(post_save, sender=StoreData)\n@@ -35,3 +36,9 @@\n def handle_directory_delete(**kwargs):\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(pre_delete, sender=TranslationProject)\n+def handle_tp_delete(**kwargs):\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"].directory).update(keys=[\"stats\", \"checks\"])\n", "issue": "Delete a TP from an old style project and the Project page stays cached\n1. Create a new TP\r\n2. TP is available\r\n3. Delete TP\r\n4. Project page still shows project listed - though it should be gone\r\n5. Going to supposedly deleted TP and we get 404\r\n\r\nWe're not expiring cache when a TP is deleted.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import revision_updater\nfrom pootle_app.models import Directory\nfrom pootle_data.models import StoreData\nfrom pootle_store.models import Store\n\n\n@receiver(post_save, sender=StoreData)\ndef handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(post_save, sender=Directory)\ndef handle_directory_save(**kwargs):\n context = (\n kwargs[\"instance\"].parent\n if kwargs.get(\"created\")\n else kwargs[\"instance\"])\n revision_updater.get(Directory)(\n context=context).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(pre_delete, sender=Directory)\ndef handle_directory_delete(**kwargs):\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n", "path": "pootle/apps/pootle_revision/receivers.py"}]}
| 969 | 215 |
gh_patches_debug_41037
|
rasdani/github-patches
|
git_diff
|
pyca__cryptography-3716
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Yet another building on PyPy(5.8) issue
Hi,
I'm trying to build cryptography (tried both 1.9 and master) against PyPy 5.8beta0.
I'm running on osx sierra.
I've looked at several issues here, and have tried several suggested solutions, but none worked for me 😞
I'm trying to link against brew openssl.
`brew list --versions openssl` -> `openssl 1.0.2j 1.0.2k 1.0.2l`
I've built pypy's openssl module without any special problem
```
env PYTHONPATH=/Users/omerba/Workspace/pypy \
DYLD_LIBRARY_PATH="/Users/omerba/anaconda/lib" \
CFLAGS="-I/usr/local/opt/openssl/include" \
LDFLAGS="-L/usr/local/opt/openssl/lib" \
/Users/omerba/Workspace/pypy/pypy3-c pypy/tool/build_cffi_imports.py
```
But when i try to build cryptography (in cryptography's dir):
```
env PYTHONPATH=/Users/omerba/Workspace/pypy \
DYLD_LIBRARY_PATH="/Users/omerba/anaconda/lib" \
CFLAGS="-I/usr/local/opt/openssl/include" \
LDFLAGS="-L/usr/local/opt/openssl/lib" \
/Users/omerba/Workspace/pypy/pypy3-c setup.py install
```
I get:
```
running install
running bdist_egg
running egg_info
writing src/cryptography.egg-info/PKG-INFO
writing dependency_links to src/cryptography.egg-info/dependency_links.txt
writing entry points to src/cryptography.egg-info/entry_points.txt
writing requirements to src/cryptography.egg-info/requires.txt
writing top-level names to src/cryptography.egg-info/top_level.txt
reading manifest file 'src/cryptography.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
no previously-included directories found matching 'docs/_build'
warning: no previously-included files matching '*' found under directory 'vectors'
writing manifest file 'src/cryptography.egg-info/SOURCES.txt'
installing library code to build/bdist.macosx-10.12-x86_64/egg
running install_lib
running build_py
running build_ext
generating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_padding.c'
already up-to-date
generating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_constant_time.c'
already up-to-date
generating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_openssl.c'
building '_openssl' extension
cc -pthread -DNDEBUG -O2 -I/usr/local/opt/openssl/include -fPIC -I/Users/omerba/Workspace/pypy/include -c build/temp.macosx-10.12-x86_64-3.5/_openssl.c -o build/temp.macosx-10.12-x86_64-3.5/build/temp.macosx-10.12-x86_64-3.5/_openssl.o
build/temp.macosx-10.12-x86_64-3.5/_openssl.c:2503:9: warning: comparison of function 'getentropy' not equal to a null pointer is always true [-Wtautological-pointer-compare]
if (getentropy != NULL) {
^~~~~~~~~~ ~~~~
build/temp.macosx-10.12-x86_64-3.5/_openssl.c:2503:9: note: prefix with the address-of operator to silence this warning
if (getentropy != NULL) {
^
&
build/temp.macosx-10.12-x86_64-3.5/_openssl.c:3454:22: warning: comparison of constant 1152921504606846975 with expression of type 'unsigned int' is always false [-Wtautological-constant-out-of-range-compare]
_ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/Users/omerba/Workspace/pypy/include/pymem.h:38:10: note: expanded from macro 'PyMem_New'
( ((n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \
~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
build/temp.macosx-10.12-x86_64-3.5/_openssl.c:74852:1: warning: control reaches end of non-void function [-Wreturn-type]
}
^
3 warnings generated.
cc -pthread -shared -L/usr/local/opt/openssl/lib -I/usr/local/opt/openssl/include build/temp.macosx-10.12-x86_64-3.5/build/temp.macosx-10.12-x86_64-3.5/_openssl.o -lssl -lcrypto -o build/lib.macosx-10.12-x86_64-3.5/cryptography/hazmat/bindings/_openssl.pypy3-58-x86_64-darwin.so
clang: warning: argument unused during compilation: '-pthread' [-Wunused-command-line-argument]
Undefined symbols for architecture x86_64:
"_PyPyErr_NoMemory", referenced from:
__setup_ssl_threads in _openssl.o
"_PyPyMem_Free", referenced from:
__setup_ssl_threads in _openssl.o
"_PyPyMem_Malloc", referenced from:
__setup_ssl_threads in _openssl.o
"_PyPyThread_acquire_lock", referenced from:
__ssl_thread_locking_function in _openssl.o
"_PyPyThread_allocate_lock", referenced from:
__setup_ssl_threads in _openssl.o
"_PyPyThread_free_lock", referenced from:
__setup_ssl_threads in _openssl.o
"_PyPyThread_release_lock", referenced from:
__ssl_thread_locking_function in _openssl.o
ld: symbol(s) not found for architecture x86_64
clang: error: linker command failed with exit code 1 (use -v to see invocation)
error: command 'cc' failed with exit status 1
```
Googling the issue only leads to [this](https://bitbucket.org/pypy/pypy/issues/2538/_ssl_buildpy-fails-on-macos-with-the-py35) exact same issue on the PyPy repo.
But the PyPy team seemed to have resolved it by making some changes to their cffi backend.
I'll admit that I tried to shamelessly copy these changes to cryptography's _cffi_src dir - which made the package install successfully, but then when I actually tried to use it:
`from cryptography.hazmat.backends import default_backend, openssl`
it blows up..
```
AttributeErrorTraceback (most recent call last)
<ipython-input-5-cc19c86b2edb> in <module>()
1 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
----> 2 from cryptography.hazmat.backends import default_backend, openssl
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/backends/openssl/__init__.py in <module>()
5 from __future__ import absolute_import, division, print_function
6
----> 7 from cryptography.hazmat.backends.openssl.backend import backend
8
9
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/backends/openssl/backend.py in <module>()
47 _CertificateSigningRequest, _RevokedCertificate
48 )
---> 49 from cryptography.hazmat.bindings.openssl import binding
50 from cryptography.hazmat.primitives import hashes, serialization
51 from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in <module>()
154 # condition registering the OpenSSL locks. On Python 3.4+ the import lock
155 # is per module so this approach will not work.
--> 156 Binding.init_static_locks()
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in init_static_locks(cls)
135 def init_static_locks(cls):
136 with cls._lock_init_lock:
--> 137 cls._ensure_ffi_initialized()
138 # Use Python's implementation if available, importing _ssl triggers
139 # the setup for this.
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in _ensure_ffi_initialized(cls)
122 with cls._init_lock:
123 if not cls._lib_loaded:
--> 124 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)
125 cls._lib_loaded = True
126 # initialize the SSL library
~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in build_conditional_library(lib, conditional_names)
82 excluded_names = set()
83 for condition, names in conditional_names.items():
---> 84 if not getattr(lib, condition):
85 excluded_names |= set(names)
86
AttributeError: cffi library 'cryptography.hazmat.bindings._openssl' has no function, constant or global variable named 'Cryptography_HAS_DTLS'
```
Thanks for your work guys!
</issue>
<code>
[start of src/_cffi_src/openssl/callbacks.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 INCLUDES = """
8 #include <openssl/ssl.h>
9 #include <openssl/x509.h>
10 #include <openssl/x509_vfy.h>
11 #include <openssl/crypto.h>
12
13 #include <pythread.h>
14 """
15
16 TYPES = """
17 typedef struct {
18 char *password;
19 int length;
20 int called;
21 int error;
22 int maxsize;
23 } CRYPTOGRAPHY_PASSWORD_DATA;
24 """
25
26 FUNCTIONS = """
27 int _setup_ssl_threads(void);
28 int Cryptography_pem_password_cb(char *, int, int, void *);
29 """
30
31 MACROS = """
32 """
33
34 CUSTOMIZATIONS = """
35 /* This code is derived from the locking code found in the Python _ssl module's
36 locking callback for OpenSSL.
37
38 Copyright 2001-2016 Python Software Foundation; All Rights Reserved.
39 */
40
41 static unsigned int _ssl_locks_count = 0;
42 static PyThread_type_lock *_ssl_locks = NULL;
43
44 static void _ssl_thread_locking_function(int mode, int n, const char *file,
45 int line) {
46 /* this function is needed to perform locking on shared data
47 structures. (Note that OpenSSL uses a number of global data
48 structures that will be implicitly shared whenever multiple
49 threads use OpenSSL.) Multi-threaded applications will
50 crash at random if it is not set.
51
52 locking_function() must be able to handle up to
53 CRYPTO_num_locks() different mutex locks. It sets the n-th
54 lock if mode & CRYPTO_LOCK, and releases it otherwise.
55
56 file and line are the file number of the function setting the
57 lock. They can be useful for debugging.
58 */
59
60 if ((_ssl_locks == NULL) ||
61 (n < 0) || ((unsigned)n >= _ssl_locks_count)) {
62 return;
63 }
64
65 if (mode & CRYPTO_LOCK) {
66 PyThread_acquire_lock(_ssl_locks[n], 1);
67 } else {
68 PyThread_release_lock(_ssl_locks[n]);
69 }
70 }
71
72 int _setup_ssl_threads(void) {
73 unsigned int i;
74
75 if (_ssl_locks == NULL) {
76 _ssl_locks_count = CRYPTO_num_locks();
77 _ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);
78 if (_ssl_locks == NULL) {
79 PyErr_NoMemory();
80 return 0;
81 }
82 memset(_ssl_locks, 0, sizeof(PyThread_type_lock) * _ssl_locks_count);
83 for (i = 0; i < _ssl_locks_count; i++) {
84 _ssl_locks[i] = PyThread_allocate_lock();
85 if (_ssl_locks[i] == NULL) {
86 unsigned int j;
87 for (j = 0; j < i; j++) {
88 PyThread_free_lock(_ssl_locks[j]);
89 }
90 PyMem_Free(_ssl_locks);
91 return 0;
92 }
93 }
94 CRYPTO_set_locking_callback(_ssl_thread_locking_function);
95 }
96 return 1;
97 }
98
99 typedef struct {
100 char *password;
101 int length;
102 int called;
103 int error;
104 int maxsize;
105 } CRYPTOGRAPHY_PASSWORD_DATA;
106
107 int Cryptography_pem_password_cb(char *buf, int size,
108 int rwflag, void *userdata) {
109 /* The password cb is only invoked if OpenSSL decides the private
110 key is encrypted. So this path only occurs if it needs a password */
111 CRYPTOGRAPHY_PASSWORD_DATA *st = (CRYPTOGRAPHY_PASSWORD_DATA *)userdata;
112 st->called += 1;
113 st->maxsize = size;
114 if (st->length == 0) {
115 st->error = -1;
116 return 0;
117 } else if (st->length < size) {
118 memcpy(buf, st->password, st->length);
119 return st->length;
120 } else {
121 st->error = -2;
122 return 0;
123 }
124 }
125 """
126
[end of src/_cffi_src/openssl/callbacks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/_cffi_src/openssl/callbacks.py b/src/_cffi_src/openssl/callbacks.py
--- a/src/_cffi_src/openssl/callbacks.py
+++ b/src/_cffi_src/openssl/callbacks.py
@@ -10,7 +10,13 @@
#include <openssl/x509_vfy.h>
#include <openssl/crypto.h>
-#include <pythread.h>
+#ifdef _WIN32
+#include <Windows.h>
+#else
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#endif
"""
TYPES = """
@@ -36,10 +42,47 @@
locking callback for OpenSSL.
Copyright 2001-2016 Python Software Foundation; All Rights Reserved.
+
+ It has been subsequently modified to use cross platform locking without
+ using CPython APIs by Armin Rigo of the PyPy project.
*/
+#ifdef _WIN32
+typedef CRITICAL_SECTION Cryptography_mutex;
+static __inline void cryptography_mutex_init(Cryptography_mutex *mutex) {
+ InitializeCriticalSection(mutex);
+}
+static __inline void cryptography_mutex_lock(Cryptography_mutex *mutex) {
+ EnterCriticalSection(mutex);
+}
+static __inline void cryptography_mutex_unlock(Cryptography_mutex *mutex) {
+ LeaveCriticalSection(mutex);
+}
+#else
+typedef pthread_mutex_t Cryptography_mutex;
+#define ASSERT_STATUS(call) \
+ if ((call) != 0) { \
+ perror("Fatal error in callback initialization: " #call); \
+ abort(); \
+ }
+static inline void cryptography_mutex_init(Cryptography_mutex *mutex) {
+#if !defined(pthread_mutexattr_default)
+# define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)
+#endif
+ ASSERT_STATUS(pthread_mutex_init(mutex, pthread_mutexattr_default));
+}
+static inline void cryptography_mutex_lock(Cryptography_mutex *mutex) {
+ ASSERT_STATUS(pthread_mutex_lock(mutex));
+}
+static inline void cryptography_mutex_unlock(Cryptography_mutex *mutex) {
+ ASSERT_STATUS(pthread_mutex_unlock(mutex));
+}
+#endif
+
+
+
static unsigned int _ssl_locks_count = 0;
-static PyThread_type_lock *_ssl_locks = NULL;
+static Cryptography_mutex *_ssl_locks = NULL;
static void _ssl_thread_locking_function(int mode, int n, const char *file,
int line) {
@@ -63,35 +106,32 @@
}
if (mode & CRYPTO_LOCK) {
- PyThread_acquire_lock(_ssl_locks[n], 1);
+ cryptography_mutex_lock(_ssl_locks + n);
} else {
- PyThread_release_lock(_ssl_locks[n]);
+ cryptography_mutex_unlock(_ssl_locks + n);
+ }
+}
+
+static void init_mutexes(void) {
+ int i;
+ for (i = 0; i < _ssl_locks_count; i++) {
+ cryptography_mutex_init(_ssl_locks + i);
}
}
-int _setup_ssl_threads(void) {
- unsigned int i;
+int _setup_ssl_threads(void) {
if (_ssl_locks == NULL) {
_ssl_locks_count = CRYPTO_num_locks();
- _ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);
+ _ssl_locks = calloc(_ssl_locks_count, sizeof(Cryptography_mutex));
if (_ssl_locks == NULL) {
- PyErr_NoMemory();
return 0;
}
- memset(_ssl_locks, 0, sizeof(PyThread_type_lock) * _ssl_locks_count);
- for (i = 0; i < _ssl_locks_count; i++) {
- _ssl_locks[i] = PyThread_allocate_lock();
- if (_ssl_locks[i] == NULL) {
- unsigned int j;
- for (j = 0; j < i; j++) {
- PyThread_free_lock(_ssl_locks[j]);
- }
- PyMem_Free(_ssl_locks);
- return 0;
- }
- }
+ init_mutexes();
CRYPTO_set_locking_callback(_ssl_thread_locking_function);
+#ifndef _WIN32
+ pthread_atfork(NULL, NULL, &init_mutexes);
+#endif
}
return 1;
}
|
{"golden_diff": "diff --git a/src/_cffi_src/openssl/callbacks.py b/src/_cffi_src/openssl/callbacks.py\n--- a/src/_cffi_src/openssl/callbacks.py\n+++ b/src/_cffi_src/openssl/callbacks.py\n@@ -10,7 +10,13 @@\n #include <openssl/x509_vfy.h>\n #include <openssl/crypto.h>\n \n-#include <pythread.h>\n+#ifdef _WIN32\n+#include <Windows.h>\n+#else\n+#include <stdio.h>\n+#include <stdlib.h>\n+#include <pthread.h>\n+#endif\n \"\"\"\n \n TYPES = \"\"\"\n@@ -36,10 +42,47 @@\n locking callback for OpenSSL.\n \n Copyright 2001-2016 Python Software Foundation; All Rights Reserved.\n+\n+ It has been subsequently modified to use cross platform locking without\n+ using CPython APIs by Armin Rigo of the PyPy project.\n */\n \n+#ifdef _WIN32\n+typedef CRITICAL_SECTION Cryptography_mutex;\n+static __inline void cryptography_mutex_init(Cryptography_mutex *mutex) {\n+ InitializeCriticalSection(mutex);\n+}\n+static __inline void cryptography_mutex_lock(Cryptography_mutex *mutex) {\n+ EnterCriticalSection(mutex);\n+}\n+static __inline void cryptography_mutex_unlock(Cryptography_mutex *mutex) {\n+ LeaveCriticalSection(mutex);\n+}\n+#else\n+typedef pthread_mutex_t Cryptography_mutex;\n+#define ASSERT_STATUS(call) \\\n+ if ((call) != 0) { \\\n+ perror(\"Fatal error in callback initialization: \" #call); \\\n+ abort(); \\\n+ }\n+static inline void cryptography_mutex_init(Cryptography_mutex *mutex) {\n+#if !defined(pthread_mutexattr_default)\n+# define pthread_mutexattr_default ((pthread_mutexattr_t *)NULL)\n+#endif\n+ ASSERT_STATUS(pthread_mutex_init(mutex, pthread_mutexattr_default));\n+}\n+static inline void cryptography_mutex_lock(Cryptography_mutex *mutex) {\n+ ASSERT_STATUS(pthread_mutex_lock(mutex));\n+}\n+static inline void cryptography_mutex_unlock(Cryptography_mutex *mutex) {\n+ ASSERT_STATUS(pthread_mutex_unlock(mutex));\n+}\n+#endif\n+\n+\n+\n static unsigned int _ssl_locks_count = 0;\n-static PyThread_type_lock *_ssl_locks = NULL;\n+static Cryptography_mutex *_ssl_locks = NULL;\n \n static void _ssl_thread_locking_function(int mode, int n, const char *file,\n int line) {\n@@ -63,35 +106,32 @@\n }\n \n if (mode & CRYPTO_LOCK) {\n- PyThread_acquire_lock(_ssl_locks[n], 1);\n+ cryptography_mutex_lock(_ssl_locks + n);\n } else {\n- PyThread_release_lock(_ssl_locks[n]);\n+ cryptography_mutex_unlock(_ssl_locks + n);\n+ }\n+}\n+\n+static void init_mutexes(void) {\n+ int i;\n+ for (i = 0; i < _ssl_locks_count; i++) {\n+ cryptography_mutex_init(_ssl_locks + i);\n }\n }\n \n-int _setup_ssl_threads(void) {\n- unsigned int i;\n \n+int _setup_ssl_threads(void) {\n if (_ssl_locks == NULL) {\n _ssl_locks_count = CRYPTO_num_locks();\n- _ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);\n+ _ssl_locks = calloc(_ssl_locks_count, sizeof(Cryptography_mutex));\n if (_ssl_locks == NULL) {\n- PyErr_NoMemory();\n return 0;\n }\n- memset(_ssl_locks, 0, sizeof(PyThread_type_lock) * _ssl_locks_count);\n- for (i = 0; i < _ssl_locks_count; i++) {\n- _ssl_locks[i] = PyThread_allocate_lock();\n- if (_ssl_locks[i] == NULL) {\n- unsigned int j;\n- for (j = 0; j < i; j++) {\n- PyThread_free_lock(_ssl_locks[j]);\n- }\n- PyMem_Free(_ssl_locks);\n- return 0;\n- }\n- }\n+ init_mutexes();\n CRYPTO_set_locking_callback(_ssl_thread_locking_function);\n+#ifndef _WIN32\n+ pthread_atfork(NULL, NULL, &init_mutexes);\n+#endif\n }\n return 1;\n }\n", "issue": "Yet another building on PyPy(5.8) issue\nHi,\r\n\r\nI'm trying to build cryptography (tried both 1.9 and master) against PyPy 5.8beta0.\r\nI'm running on osx sierra.\r\n\r\nI've looked at several issues here, and have tried several suggested solutions, but none worked for me \ud83d\ude1e \r\n\r\nI'm trying to link against brew openssl.\r\n`brew list --versions openssl` -> `openssl 1.0.2j 1.0.2k 1.0.2l`\r\n\r\nI've built pypy's openssl module without any special problem\r\n\r\n```\r\nenv PYTHONPATH=/Users/omerba/Workspace/pypy \\ \r\n DYLD_LIBRARY_PATH=\"/Users/omerba/anaconda/lib\" \\\r\n CFLAGS=\"-I/usr/local/opt/openssl/include\" \\\r\n LDFLAGS=\"-L/usr/local/opt/openssl/lib\" \\\r\n /Users/omerba/Workspace/pypy/pypy3-c pypy/tool/build_cffi_imports.py\r\n```\r\nBut when i try to build cryptography (in cryptography's dir):\r\n```\r\n env PYTHONPATH=/Users/omerba/Workspace/pypy \\ \r\n DYLD_LIBRARY_PATH=\"/Users/omerba/anaconda/lib\" \\\r\n CFLAGS=\"-I/usr/local/opt/openssl/include\" \\\r\n LDFLAGS=\"-L/usr/local/opt/openssl/lib\" \\\r\n /Users/omerba/Workspace/pypy/pypy3-c setup.py install\r\n```\r\nI get:\r\n```\r\nrunning install\r\nrunning bdist_egg\r\nrunning egg_info\r\nwriting src/cryptography.egg-info/PKG-INFO\r\nwriting dependency_links to src/cryptography.egg-info/dependency_links.txt\r\nwriting entry points to src/cryptography.egg-info/entry_points.txt\r\nwriting requirements to src/cryptography.egg-info/requires.txt\r\nwriting top-level names to src/cryptography.egg-info/top_level.txt\r\nreading manifest file 'src/cryptography.egg-info/SOURCES.txt'\r\nreading manifest template 'MANIFEST.in'\r\nno previously-included directories found matching 'docs/_build'\r\nwarning: no previously-included files matching '*' found under directory 'vectors'\r\nwriting manifest file 'src/cryptography.egg-info/SOURCES.txt'\r\ninstalling library code to build/bdist.macosx-10.12-x86_64/egg\r\nrunning install_lib\r\nrunning build_py\r\nrunning build_ext\r\ngenerating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_padding.c'\r\nalready up-to-date\r\ngenerating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_constant_time.c'\r\nalready up-to-date\r\ngenerating cffi module 'build/temp.macosx-10.12-x86_64-3.5/_openssl.c'\r\nbuilding '_openssl' extension\r\ncc -pthread -DNDEBUG -O2 -I/usr/local/opt/openssl/include -fPIC -I/Users/omerba/Workspace/pypy/include -c build/temp.macosx-10.12-x86_64-3.5/_openssl.c -o build/temp.macosx-10.12-x86_64-3.5/build/temp.macosx-10.12-x86_64-3.5/_openssl.o\r\nbuild/temp.macosx-10.12-x86_64-3.5/_openssl.c:2503:9: warning: comparison of function 'getentropy' not equal to a null pointer is always true [-Wtautological-pointer-compare]\r\n if (getentropy != NULL) {\r\n ^~~~~~~~~~ ~~~~\r\nbuild/temp.macosx-10.12-x86_64-3.5/_openssl.c:2503:9: note: prefix with the address-of operator to silence this warning\r\n if (getentropy != NULL) {\r\n ^\r\n &\r\nbuild/temp.macosx-10.12-x86_64-3.5/_openssl.c:3454:22: warning: comparison of constant 1152921504606846975 with expression of type 'unsigned int' is always false [-Wtautological-constant-out-of-range-compare]\r\n _ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);\r\n ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n/Users/omerba/Workspace/pypy/include/pymem.h:38:10: note: expanded from macro 'PyMem_New'\r\n ( ((n) > PY_SSIZE_T_MAX / sizeof(type)) ? NULL : \\\r\n ~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\nbuild/temp.macosx-10.12-x86_64-3.5/_openssl.c:74852:1: warning: control reaches end of non-void function [-Wreturn-type]\r\n}\r\n^\r\n3 warnings generated.\r\ncc -pthread -shared -L/usr/local/opt/openssl/lib -I/usr/local/opt/openssl/include build/temp.macosx-10.12-x86_64-3.5/build/temp.macosx-10.12-x86_64-3.5/_openssl.o -lssl -lcrypto -o build/lib.macosx-10.12-x86_64-3.5/cryptography/hazmat/bindings/_openssl.pypy3-58-x86_64-darwin.so\r\nclang: warning: argument unused during compilation: '-pthread' [-Wunused-command-line-argument]\r\nUndefined symbols for architecture x86_64:\r\n \"_PyPyErr_NoMemory\", referenced from:\r\n __setup_ssl_threads in _openssl.o\r\n \"_PyPyMem_Free\", referenced from:\r\n __setup_ssl_threads in _openssl.o\r\n \"_PyPyMem_Malloc\", referenced from:\r\n __setup_ssl_threads in _openssl.o\r\n \"_PyPyThread_acquire_lock\", referenced from:\r\n __ssl_thread_locking_function in _openssl.o\r\n \"_PyPyThread_allocate_lock\", referenced from:\r\n __setup_ssl_threads in _openssl.o\r\n \"_PyPyThread_free_lock\", referenced from:\r\n __setup_ssl_threads in _openssl.o\r\n \"_PyPyThread_release_lock\", referenced from:\r\n __ssl_thread_locking_function in _openssl.o\r\nld: symbol(s) not found for architecture x86_64\r\nclang: error: linker command failed with exit code 1 (use -v to see invocation)\r\nerror: command 'cc' failed with exit status 1\r\n```\r\n\r\nGoogling the issue only leads to [this](https://bitbucket.org/pypy/pypy/issues/2538/_ssl_buildpy-fails-on-macos-with-the-py35) exact same issue on the PyPy repo.\r\n\r\nBut the PyPy team seemed to have resolved it by making some changes to their cffi backend.\r\nI'll admit that I tried to shamelessly copy these changes to cryptography's _cffi_src dir - which made the package install successfully, but then when I actually tried to use it:\r\n\r\n`from cryptography.hazmat.backends import default_backend, openssl`\r\n\r\nit blows up.. \r\n\r\n```\r\nAttributeErrorTraceback (most recent call last)\r\n<ipython-input-5-cc19c86b2edb> in <module>()\r\n 1 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\r\n----> 2 from cryptography.hazmat.backends import default_backend, openssl\r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/backends/openssl/__init__.py in <module>()\r\n 5 from __future__ import absolute_import, division, print_function\r\n 6 \r\n----> 7 from cryptography.hazmat.backends.openssl.backend import backend\r\n 8 \r\n 9 \r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/backends/openssl/backend.py in <module>()\r\n 47 _CertificateSigningRequest, _RevokedCertificate\r\n 48 )\r\n---> 49 from cryptography.hazmat.bindings.openssl import binding\r\n 50 from cryptography.hazmat.primitives import hashes, serialization\r\n 51 from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa\r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in <module>()\r\n 154 # condition registering the OpenSSL locks. On Python 3.4+ the import lock\r\n 155 # is per module so this approach will not work.\r\n--> 156 Binding.init_static_locks()\r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in init_static_locks(cls)\r\n 135 def init_static_locks(cls):\r\n 136 with cls._lock_init_lock:\r\n--> 137 cls._ensure_ffi_initialized()\r\n 138 # Use Python's implementation if available, importing _ssl triggers\r\n 139 # the setup for this.\r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in _ensure_ffi_initialized(cls)\r\n 122 with cls._init_lock:\r\n 123 if not cls._lib_loaded:\r\n--> 124 cls.lib = build_conditional_library(lib, CONDITIONAL_NAMES)\r\n 125 cls._lib_loaded = True\r\n 126 # initialize the SSL library\r\n\r\n~/Workspace/pypy/site-packages/cryptography-1.9-py3.5-macosx-10.12-x86_64.egg/cryptography/hazmat/bindings/openssl/binding.py in build_conditional_library(lib, conditional_names)\r\n 82 excluded_names = set()\r\n 83 for condition, names in conditional_names.items():\r\n---> 84 if not getattr(lib, condition):\r\n 85 excluded_names |= set(names)\r\n 86 \r\n\r\nAttributeError: cffi library 'cryptography.hazmat.bindings._openssl' has no function, constant or global variable named 'Cryptography_HAS_DTLS'\r\n```\r\n\r\nThanks for your work guys!\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nINCLUDES = \"\"\"\n#include <openssl/ssl.h>\n#include <openssl/x509.h>\n#include <openssl/x509_vfy.h>\n#include <openssl/crypto.h>\n\n#include <pythread.h>\n\"\"\"\n\nTYPES = \"\"\"\ntypedef struct {\n char *password;\n int length;\n int called;\n int error;\n int maxsize;\n} CRYPTOGRAPHY_PASSWORD_DATA;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nint _setup_ssl_threads(void);\nint Cryptography_pem_password_cb(char *, int, int, void *);\n\"\"\"\n\nMACROS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n/* This code is derived from the locking code found in the Python _ssl module's\n locking callback for OpenSSL.\n\n Copyright 2001-2016 Python Software Foundation; All Rights Reserved.\n*/\n\nstatic unsigned int _ssl_locks_count = 0;\nstatic PyThread_type_lock *_ssl_locks = NULL;\n\nstatic void _ssl_thread_locking_function(int mode, int n, const char *file,\n int line) {\n /* this function is needed to perform locking on shared data\n structures. (Note that OpenSSL uses a number of global data\n structures that will be implicitly shared whenever multiple\n threads use OpenSSL.) Multi-threaded applications will\n crash at random if it is not set.\n\n locking_function() must be able to handle up to\n CRYPTO_num_locks() different mutex locks. It sets the n-th\n lock if mode & CRYPTO_LOCK, and releases it otherwise.\n\n file and line are the file number of the function setting the\n lock. They can be useful for debugging.\n */\n\n if ((_ssl_locks == NULL) ||\n (n < 0) || ((unsigned)n >= _ssl_locks_count)) {\n return;\n }\n\n if (mode & CRYPTO_LOCK) {\n PyThread_acquire_lock(_ssl_locks[n], 1);\n } else {\n PyThread_release_lock(_ssl_locks[n]);\n }\n}\n\nint _setup_ssl_threads(void) {\n unsigned int i;\n\n if (_ssl_locks == NULL) {\n _ssl_locks_count = CRYPTO_num_locks();\n _ssl_locks = PyMem_New(PyThread_type_lock, _ssl_locks_count);\n if (_ssl_locks == NULL) {\n PyErr_NoMemory();\n return 0;\n }\n memset(_ssl_locks, 0, sizeof(PyThread_type_lock) * _ssl_locks_count);\n for (i = 0; i < _ssl_locks_count; i++) {\n _ssl_locks[i] = PyThread_allocate_lock();\n if (_ssl_locks[i] == NULL) {\n unsigned int j;\n for (j = 0; j < i; j++) {\n PyThread_free_lock(_ssl_locks[j]);\n }\n PyMem_Free(_ssl_locks);\n return 0;\n }\n }\n CRYPTO_set_locking_callback(_ssl_thread_locking_function);\n }\n return 1;\n}\n\ntypedef struct {\n char *password;\n int length;\n int called;\n int error;\n int maxsize;\n} CRYPTOGRAPHY_PASSWORD_DATA;\n\nint Cryptography_pem_password_cb(char *buf, int size,\n int rwflag, void *userdata) {\n /* The password cb is only invoked if OpenSSL decides the private\n key is encrypted. So this path only occurs if it needs a password */\n CRYPTOGRAPHY_PASSWORD_DATA *st = (CRYPTOGRAPHY_PASSWORD_DATA *)userdata;\n st->called += 1;\n st->maxsize = size;\n if (st->length == 0) {\n st->error = -1;\n return 0;\n } else if (st->length < size) {\n memcpy(buf, st->password, st->length);\n return st->length;\n } else {\n st->error = -2;\n return 0;\n }\n}\n\"\"\"\n", "path": "src/_cffi_src/openssl/callbacks.py"}]}
| 4,068 | 962 |
gh_patches_debug_54708
|
rasdani/github-patches
|
git_diff
|
qutebrowser__qutebrowser-4743
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Launching keyhint widget causes 100% usage of one CPU core
That's how it was for as long as I can remember, reproducible with all of my hardware (pressing _g_ or _;_ is enough). I don't think that's an intended behavior.
</issue>
<code>
[start of qutebrowser/misc/keyhintwidget.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2016-2019 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Small window that pops up to show hints for possible keystrings.
21
22 When a user inputs a key that forms a partial match, this shows a small window
23 with each possible completion of that keystring and the corresponding command.
24 It is intended to help discoverability of keybindings.
25 """
26
27 import html
28 import fnmatch
29 import re
30
31 from PyQt5.QtWidgets import QLabel, QSizePolicy
32 from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
33
34 from qutebrowser.config import config
35 from qutebrowser.utils import utils, usertypes
36 from qutebrowser.misc import objects
37 from qutebrowser.keyinput import keyutils
38
39
40 class KeyHintView(QLabel):
41
42 """The view showing hints for key bindings based on the current key string.
43
44 Attributes:
45 _win_id: Window ID of parent.
46
47 Signals:
48 update_geometry: Emitted when this widget should be resized/positioned.
49 """
50
51 STYLESHEET = """
52 QLabel {
53 font: {{ conf.fonts.keyhint }};
54 color: {{ conf.colors.keyhint.fg }};
55 background-color: {{ conf.colors.keyhint.bg }};
56 padding: 6px;
57 {% if conf.statusbar.position == 'top' %}
58 border-bottom-right-radius: {{ conf.keyhint.radius }}px;
59 {% else %}
60 border-top-right-radius: {{ conf.keyhint.radius }}px;
61 {% endif %}
62 }
63 """
64 update_geometry = pyqtSignal()
65
66 def __init__(self, win_id, parent=None):
67 super().__init__(parent)
68 self.setTextFormat(Qt.RichText)
69 self._win_id = win_id
70 self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
71 self.hide()
72 self._show_timer = usertypes.Timer(self, 'keyhint_show')
73 self._show_timer.timeout.connect(self.show)
74 config.set_register_stylesheet(self)
75
76 def __repr__(self):
77 return utils.get_repr(self, win_id=self._win_id)
78
79 def showEvent(self, e):
80 """Adjust the keyhint size when it's freshly shown."""
81 self.update_geometry.emit()
82 super().showEvent(e)
83
84 @pyqtSlot(str)
85 def update_keyhint(self, modename, prefix):
86 """Show hints for the given prefix (or hide if prefix is empty).
87
88 Args:
89 prefix: The current partial keystring.
90 """
91 countstr, prefix = re.fullmatch(r'(\d*)(.*)', prefix).groups()
92 if not prefix:
93 self._show_timer.stop()
94 self.hide()
95 return
96
97 def blacklisted(keychain):
98 return any(fnmatch.fnmatchcase(keychain, glob)
99 for glob in config.val.keyhint.blacklist)
100
101 def takes_count(cmdstr):
102 """Return true iff this command can take a count argument."""
103 cmdname = cmdstr.split(' ')[0]
104 cmd = objects.commands.get(cmdname)
105 return cmd and cmd.takes_count()
106
107 bindings_dict = config.key_instance.get_bindings_for(modename)
108 bindings = [(k, v) for (k, v) in sorted(bindings_dict.items())
109 if keyutils.KeySequence.parse(prefix).matches(k) and
110 not blacklisted(str(k)) and
111 (takes_count(v) or not countstr)]
112
113 if not bindings:
114 self._show_timer.stop()
115 return
116
117 # delay so a quickly typed keychain doesn't display hints
118 self._show_timer.setInterval(config.val.keyhint.delay)
119 self._show_timer.start()
120 suffix_color = html.escape(config.val.colors.keyhint.suffix.fg)
121
122 text = ''
123 for seq, cmd in bindings:
124 text += (
125 "<tr>"
126 "<td>{}</td>"
127 "<td style='color: {}'>{}</td>"
128 "<td style='padding-left: 2ex'>{}</td>"
129 "</tr>"
130 ).format(
131 html.escape(prefix),
132 suffix_color,
133 html.escape(str(seq)[len(prefix):]),
134 html.escape(cmd)
135 )
136 text = '<table>{}</table>'.format(text)
137
138 self.setText(text)
139 self.adjustSize()
140 self.update_geometry.emit()
141
[end of qutebrowser/misc/keyhintwidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/qutebrowser/misc/keyhintwidget.py b/qutebrowser/misc/keyhintwidget.py
--- a/qutebrowser/misc/keyhintwidget.py
+++ b/qutebrowser/misc/keyhintwidget.py
@@ -71,6 +71,7 @@
self.hide()
self._show_timer = usertypes.Timer(self, 'keyhint_show')
self._show_timer.timeout.connect(self.show)
+ self._show_timer.setSingleShot(True)
config.set_register_stylesheet(self)
def __repr__(self):
|
{"golden_diff": "diff --git a/qutebrowser/misc/keyhintwidget.py b/qutebrowser/misc/keyhintwidget.py\n--- a/qutebrowser/misc/keyhintwidget.py\n+++ b/qutebrowser/misc/keyhintwidget.py\n@@ -71,6 +71,7 @@\n self.hide()\n self._show_timer = usertypes.Timer(self, 'keyhint_show')\n self._show_timer.timeout.connect(self.show)\n+ self._show_timer.setSingleShot(True)\n config.set_register_stylesheet(self)\n \n def __repr__(self):\n", "issue": "Launching keyhint widget causes 100% usage of one CPU core\nThat's how it was for as long as I can remember, reproducible with all of my hardware (pressing _g_ or _;_ is enough). I don't think that's an intended behavior.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2019 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Small window that pops up to show hints for possible keystrings.\n\nWhen a user inputs a key that forms a partial match, this shows a small window\nwith each possible completion of that keystring and the corresponding command.\nIt is intended to help discoverability of keybindings.\n\"\"\"\n\nimport html\nimport fnmatch\nimport re\n\nfrom PyQt5.QtWidgets import QLabel, QSizePolicy\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import utils, usertypes\nfrom qutebrowser.misc import objects\nfrom qutebrowser.keyinput import keyutils\n\n\nclass KeyHintView(QLabel):\n\n \"\"\"The view showing hints for key bindings based on the current key string.\n\n Attributes:\n _win_id: Window ID of parent.\n\n Signals:\n update_geometry: Emitted when this widget should be resized/positioned.\n \"\"\"\n\n STYLESHEET = \"\"\"\n QLabel {\n font: {{ conf.fonts.keyhint }};\n color: {{ conf.colors.keyhint.fg }};\n background-color: {{ conf.colors.keyhint.bg }};\n padding: 6px;\n {% if conf.statusbar.position == 'top' %}\n border-bottom-right-radius: {{ conf.keyhint.radius }}px;\n {% else %}\n border-top-right-radius: {{ conf.keyhint.radius }}px;\n {% endif %}\n }\n \"\"\"\n update_geometry = pyqtSignal()\n\n def __init__(self, win_id, parent=None):\n super().__init__(parent)\n self.setTextFormat(Qt.RichText)\n self._win_id = win_id\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)\n self.hide()\n self._show_timer = usertypes.Timer(self, 'keyhint_show')\n self._show_timer.timeout.connect(self.show)\n config.set_register_stylesheet(self)\n\n def __repr__(self):\n return utils.get_repr(self, win_id=self._win_id)\n\n def showEvent(self, e):\n \"\"\"Adjust the keyhint size when it's freshly shown.\"\"\"\n self.update_geometry.emit()\n super().showEvent(e)\n\n @pyqtSlot(str)\n def update_keyhint(self, modename, prefix):\n \"\"\"Show hints for the given prefix (or hide if prefix is empty).\n\n Args:\n prefix: The current partial keystring.\n \"\"\"\n countstr, prefix = re.fullmatch(r'(\\d*)(.*)', prefix).groups()\n if not prefix:\n self._show_timer.stop()\n self.hide()\n return\n\n def blacklisted(keychain):\n return any(fnmatch.fnmatchcase(keychain, glob)\n for glob in config.val.keyhint.blacklist)\n\n def takes_count(cmdstr):\n \"\"\"Return true iff this command can take a count argument.\"\"\"\n cmdname = cmdstr.split(' ')[0]\n cmd = objects.commands.get(cmdname)\n return cmd and cmd.takes_count()\n\n bindings_dict = config.key_instance.get_bindings_for(modename)\n bindings = [(k, v) for (k, v) in sorted(bindings_dict.items())\n if keyutils.KeySequence.parse(prefix).matches(k) and\n not blacklisted(str(k)) and\n (takes_count(v) or not countstr)]\n\n if not bindings:\n self._show_timer.stop()\n return\n\n # delay so a quickly typed keychain doesn't display hints\n self._show_timer.setInterval(config.val.keyhint.delay)\n self._show_timer.start()\n suffix_color = html.escape(config.val.colors.keyhint.suffix.fg)\n\n text = ''\n for seq, cmd in bindings:\n text += (\n \"<tr>\"\n \"<td>{}</td>\"\n \"<td style='color: {}'>{}</td>\"\n \"<td style='padding-left: 2ex'>{}</td>\"\n \"</tr>\"\n ).format(\n html.escape(prefix),\n suffix_color,\n html.escape(str(seq)[len(prefix):]),\n html.escape(cmd)\n )\n text = '<table>{}</table>'.format(text)\n\n self.setText(text)\n self.adjustSize()\n self.update_geometry.emit()\n", "path": "qutebrowser/misc/keyhintwidget.py"}]}
| 1,998 | 113 |
gh_patches_debug_64526
|
rasdani/github-patches
|
git_diff
|
kartoza__prj.app-342
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Order sponsors in their groups
From @andreasneumann:
```For the sponsors listing - is there a clear order within the same level at http://changelog.qgis.org/en/qgis/version/2.16.0/ ?
In my opinion, it should either be ordered alphabetically or by date. Neither seems to be the case. I would prefer alphabetic ordering with in each sponsorship level.```
I think it is actually better to order them with most recently added sponsors first to oldest sponsors last. That we they get the most visibility when they are new, degrading over time to the bottom of the list. What do you think @andreasneumann ?
</issue>
<code>
[start of django_project/changes/models/version.py]
1 # coding=utf-8
2 from django.core.urlresolvers import reverse
3 # from django.utils.text import slugify
4 from common.utilities import version_slugify
5 import os
6 import logging
7 from core.settings.contrib import STOP_WORDS
8 from django.conf.global_settings import MEDIA_ROOT
9 from django.db import models
10 from .entry import Entry
11 from .sponsorship_period import SponsorshipPeriod
12 from django.contrib.auth.models import User
13 from django.utils.translation import ugettext_lazy as _
14
15 logger = logging.getLogger(__name__)
16
17
18 class ApprovedVersionManager(models.Manager):
19 """Custom version manager that shows only approved records."""
20
21 def get_queryset(self):
22 """Query set generator"""
23 return super(
24 ApprovedVersionManager, self).get_queryset().filter(
25 approved=True)
26
27
28 class UnapprovedVersionManager(models.Manager):
29 """Custom version manager that shows only unapproved records."""
30
31 def get_queryset(self):
32 """Query set generator"""
33 return super(
34 UnapprovedVersionManager, self).get_queryset().filter(
35 approved=False)
36
37
38 # noinspection PyUnresolvedReferences
39 class Version(models.Model):
40 """A version model that the changelog is associated with.."""
41
42 name = models.CharField(
43 help_text='Name of this release e.g. 1.0.1.',
44 max_length=255,
45 null=False,
46 blank=False,
47 unique=False)
48
49 padded_version = models.CharField(
50 help_text=(
51 'Numeric version for this release e.g. 001000001 for 1.0.1 '
52 'calculated by zero padding each component of maj/minor/bugfix '
53 'elements from name.'),
54 max_length=9,
55 null=False,
56 blank=True,
57 unique=False)
58
59 approved = models.BooleanField(
60 help_text=(
61 'Whether this version has been approved for use by the '
62 'project owner.'),
63 default=False)
64
65 image_file = models.ImageField(
66 help_text=(
67 'An optional image for this version e.g. a splashscreen. '
68 'Most browsers support dragging the image directly on to the '
69 '"Choose File" button above.'),
70 upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),
71 blank=True)
72
73 description = models.TextField(
74 null=True,
75 blank=True,
76 help_text='Describe the new version. Markdown is supported.')
77
78 release_date = models.DateField(
79 _('Release date (yyyy-mm-dd)'),
80 help_text='Date of official release',
81 null=True,
82 blank=True)
83
84 author = models.ForeignKey(User)
85 slug = models.SlugField()
86 project = models.ForeignKey('base.Project')
87 objects = models.Manager()
88 approved_objects = ApprovedVersionManager()
89 unapproved_objects = UnapprovedVersionManager()
90
91 # noinspection PyClassicStyleClass
92 class Meta:
93 """Meta options for the version class."""
94 unique_together = (
95 ('name', 'project'),
96 ('slug', 'project'),
97 )
98 app_label = 'changes'
99 # ordering = ['-datetime_created']
100
101 def save(self, *args, **kwargs):
102 if not self.pk:
103 words = self.name.split()
104 filtered_words = [t for t in words if t.lower() not in STOP_WORDS]
105 new_list = ' '.join(filtered_words)
106 self.slug = version_slugify(new_list)[:50]
107 self.padded_version = self.pad_name(self.name)
108 super(Version, self).save(*args, **kwargs)
109
110 def pad_name(self, version):
111 """Create a 0 padded version of the version name.
112
113 e.g. input: 2.10.1
114 e.g. output: 002010100
115
116 This will ensure we have sortable version names.
117
118 :param version: A text version in the form 0.0.0 - if the version is
119 not in this form, we return the version unaltered.
120 :type version: str
121
122 :returns: Zero padded representation of the version e.g. 001010100
123 :rtype: str
124
125 """
126 tokens = version.split('.')
127 if len(tokens) != 3:
128 return version
129 result = ''
130 for token in tokens:
131 result += token.zfill(3)
132 return result
133
134 def __unicode__(self):
135 return u'%s : %s' % (self.project.name, self.name)
136
137 def get_absolute_url(self):
138 return reverse('version-detail', kwargs={
139 'slug': self.slug,
140 'project_slug': self.project.slug
141 })
142
143 def entries(self):
144 """Get the entries for this version."""
145 qs = Entry.objects.filter(version=self).order_by('category__sort_number')
146 return qs
147
148 def _entries_for_category(self, category):
149 """All entries for this version and filtered by the given category.
150
151 :param category: Category to filter by.
152 :type category: Category
153
154 .. note:: only approved entries returned.
155 """
156 qs = Entry.objects.filter(
157 version=self,
158 category=category,
159 approved=True)
160 return qs
161
162 def categories(self):
163 """Get a list of categories where there are one or more entries.
164
165 Example use in template::
166 {% for row in version.categories %}
167 <h2 class="text-muted">{{ row.category.name }}</h2>
168 <ul>
169 {% for entry in row.entries %}
170 <li>{{ entry.name }}</li>
171 {% endfor %}
172 </ul>
173 {% endfor %}
174 """
175 qs = self.entries()
176 used = []
177 categories = []
178 for entry in qs:
179 category = entry.category
180 if category not in used:
181 row = {
182 'category': category,
183 'entries': self._entries_for_category(category)
184 }
185 categories.append(row)
186 used.append(category)
187 return categories
188
189 def sponsors(self):
190 """Return a list of sponsors current at time of this version release.
191
192 :returns: A list of SponsorPeriod objects for current project
193 whose release date coincides with the version release date.
194 Only approved sponsors are returned.
195 Returns None if the release date (which is optional) is not set.
196 :rtype: Queryset, None
197 """
198 if self.release_date is None:
199 return None
200 sponsors = SponsorshipPeriod.approved_objects.filter(
201 end_date__gte=self.release_date).filter(
202 start_date__lte=self.release_date).filter(
203 project=self.project).order_by(
204 'start_date').order_by(
205 '-sponsorship_level__value')
206 return sponsors
207
208 def formatted_release_date(self):
209 """"Return a long formatted released date e.g. 24 June 2016.
210
211 :returns: A string containing the long formatted date, or an empty
212 string if the date is not set.
213 :rtype: str
214 """
215 long_date = None
216 if self.release_date:
217 # %-d Day of the month as a decimal number. (Platform specific)
218 # %B Month as locale’s full name.
219 # %Y Year e.g. 2016
220 long_date = self.release_date.strftime('%-d %B, %Y')
221 return long_date
222
[end of django_project/changes/models/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py
--- a/django_project/changes/models/version.py
+++ b/django_project/changes/models/version.py
@@ -202,7 +202,7 @@
start_date__lte=self.release_date).filter(
project=self.project).order_by(
'start_date').order_by(
- '-sponsorship_level__value')
+ '-sponsorship_level__value', 'sponsor__name')
return sponsors
def formatted_release_date(self):
|
{"golden_diff": "diff --git a/django_project/changes/models/version.py b/django_project/changes/models/version.py\n--- a/django_project/changes/models/version.py\n+++ b/django_project/changes/models/version.py\n@@ -202,7 +202,7 @@\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n- '-sponsorship_level__value')\n+ '-sponsorship_level__value', 'sponsor__name')\n return sponsors\n \n def formatted_release_date(self):\n", "issue": "Order sponsors in their groups\nFrom @andreasneumann: \n\n```For the sponsors listing - is there a clear order within the same level at http://changelog.qgis.org/en/qgis/version/2.16.0/ ?\n\nIn my opinion, it should either be ordered alphabetically or by date. Neither seems to be the case. I would prefer alphabetic ordering with in each sponsorship level.```\n\nI think it is actually better to order them with most recently added sponsors first to oldest sponsors last. That we they get the most visibility when they are new, degrading over time to the bottom of the list. What do you think @andreasneumann ?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom django.core.urlresolvers import reverse\n# from django.utils.text import slugify\nfrom common.utilities import version_slugify\nimport os\nimport logging\nfrom core.settings.contrib import STOP_WORDS\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.db import models\nfrom .entry import Entry\nfrom .sponsorship_period import SponsorshipPeriod\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\nclass ApprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedVersionManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedVersionManager(models.Manager):\n \"\"\"Custom version manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedVersionManager, self).get_queryset().filter(\n approved=False)\n\n\n# noinspection PyUnresolvedReferences\nclass Version(models.Model):\n \"\"\"A version model that the changelog is associated with..\"\"\"\n\n name = models.CharField(\n help_text='Name of this release e.g. 1.0.1.',\n max_length=255,\n null=False,\n blank=False,\n unique=False)\n\n padded_version = models.CharField(\n help_text=(\n 'Numeric version for this release e.g. 001000001 for 1.0.1 '\n 'calculated by zero padding each component of maj/minor/bugfix '\n 'elements from name.'),\n max_length=9,\n null=False,\n blank=True,\n unique=False)\n\n approved = models.BooleanField(\n help_text=(\n 'Whether this version has been approved for use by the '\n 'project owner.'),\n default=False)\n\n image_file = models.ImageField(\n help_text=(\n 'An optional image for this version e.g. a splashscreen. '\n 'Most browsers support dragging the image directly on to the '\n '\"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/projects'),\n blank=True)\n\n description = models.TextField(\n null=True,\n blank=True,\n help_text='Describe the new version. Markdown is supported.')\n\n release_date = models.DateField(\n _('Release date (yyyy-mm-dd)'),\n help_text='Date of official release',\n null=True,\n blank=True)\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedVersionManager()\n unapproved_objects = UnapprovedVersionManager()\n\n # noinspection PyClassicStyleClass\n class Meta:\n \"\"\"Meta options for the version class.\"\"\"\n unique_together = (\n ('name', 'project'),\n ('slug', 'project'),\n )\n app_label = 'changes'\n # ordering = ['-datetime_created']\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = version_slugify(new_list)[:50]\n self.padded_version = self.pad_name(self.name)\n super(Version, self).save(*args, **kwargs)\n\n def pad_name(self, version):\n \"\"\"Create a 0 padded version of the version name.\n\n e.g. input: 2.10.1\n e.g. output: 002010100\n\n This will ensure we have sortable version names.\n\n :param version: A text version in the form 0.0.0 - if the version is\n not in this form, we return the version unaltered.\n :type version: str\n\n :returns: Zero padded representation of the version e.g. 001010100\n :rtype: str\n\n \"\"\"\n tokens = version.split('.')\n if len(tokens) != 3:\n return version\n result = ''\n for token in tokens:\n result += token.zfill(3)\n return result\n\n def __unicode__(self):\n return u'%s : %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n return reverse('version-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def entries(self):\n \"\"\"Get the entries for this version.\"\"\"\n qs = Entry.objects.filter(version=self).order_by('category__sort_number')\n return qs\n\n def _entries_for_category(self, category):\n \"\"\"All entries for this version and filtered by the given category.\n\n :param category: Category to filter by.\n :type category: Category\n\n .. note:: only approved entries returned.\n \"\"\"\n qs = Entry.objects.filter(\n version=self,\n category=category,\n approved=True)\n return qs\n\n def categories(self):\n \"\"\"Get a list of categories where there are one or more entries.\n\n Example use in template::\n {% for row in version.categories %}\n <h2 class=\"text-muted\">{{ row.category.name }}</h2>\n <ul>\n {% for entry in row.entries %}\n <li>{{ entry.name }}</li>\n {% endfor %}\n </ul>\n {% endfor %}\n \"\"\"\n qs = self.entries()\n used = []\n categories = []\n for entry in qs:\n category = entry.category\n if category not in used:\n row = {\n 'category': category,\n 'entries': self._entries_for_category(category)\n }\n categories.append(row)\n used.append(category)\n return categories\n\n def sponsors(self):\n \"\"\"Return a list of sponsors current at time of this version release.\n\n :returns: A list of SponsorPeriod objects for current project\n whose release date coincides with the version release date.\n Only approved sponsors are returned.\n Returns None if the release date (which is optional) is not set.\n :rtype: Queryset, None\n \"\"\"\n if self.release_date is None:\n return None\n sponsors = SponsorshipPeriod.approved_objects.filter(\n end_date__gte=self.release_date).filter(\n start_date__lte=self.release_date).filter(\n project=self.project).order_by(\n 'start_date').order_by(\n '-sponsorship_level__value')\n return sponsors\n\n def formatted_release_date(self):\n \"\"\"\"Return a long formatted released date e.g. 24 June 2016.\n\n :returns: A string containing the long formatted date, or an empty\n string if the date is not set.\n :rtype: str\n \"\"\"\n long_date = None\n if self.release_date:\n # %-d Day of the month as a decimal number. (Platform specific)\n # %B Month as locale\u2019s full name.\n # %Y Year e.g. 2016\n long_date = self.release_date.strftime('%-d %B, %Y')\n return long_date\n", "path": "django_project/changes/models/version.py"}]}
| 2,788 | 119 |
gh_patches_debug_34428
|
rasdani/github-patches
|
git_diff
|
koxudaxi__datamodel-code-generator-1829
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Black 24.1.0 breaks code formatting if wrap-string-literal is set
**Describe the bug**
Black [24.1.0](https://github.com/psf/black/releases/tag/24.1.0) was just released and removes support for the deprecated `--experimental-string-processing` flag (psf/black#4096). This breaks the code in [`format.py`](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/format.py#L146) that uses this option:
```
Traceback (most recent call last):
File ".../python3.11/site-packages/datamodel_code_generator/__main__.py", line 429, in main
generate(
File ".../python3.11/site-packages/datamodel_code_generator/__init__.py", line 463, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1156, in parse
code_formatter: Optional[CodeFormatter] = CodeFormatter(
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/format.py", line 152, in __init__
self.black_mode = black.FileMode(
^^^^^^^^^^^^^^^
TypeError: Mode.__init__() got an unexpected keyword argument 'experimental_string_processing'
```
**Expected behavior**
No crash.
**Version:**
- OS: Linux
- Python version: 3.11
- datamodel-code-generator version: 0.25.2
- black version: 0.24.1
**Additional context**
Possible mitigation:
- add a temporary upper bound to the `black` version spec in [pyproject.toml](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/pyproject.toml#L54)
- same, but in user environment definitions
- use `--preview --enable-unstable-feature string_processing` instead (as suggested by the black release notes).
</issue>
<code>
[start of datamodel_code_generator/format.py]
1 from __future__ import annotations
2
3 from enum import Enum
4 from importlib import import_module
5 from pathlib import Path
6 from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
7 from warnings import warn
8
9 import black
10 import isort
11
12 from datamodel_code_generator.util import cached_property, load_toml
13
14
15 class PythonVersion(Enum):
16 PY_36 = '3.6'
17 PY_37 = '3.7'
18 PY_38 = '3.8'
19 PY_39 = '3.9'
20 PY_310 = '3.10'
21 PY_311 = '3.11'
22 PY_312 = '3.12'
23
24 @cached_property
25 def _is_py_38_or_later(self) -> bool: # pragma: no cover
26 return self.value not in {self.PY_36.value, self.PY_37.value} # type: ignore
27
28 @cached_property
29 def _is_py_39_or_later(self) -> bool: # pragma: no cover
30 return self.value not in {self.PY_36.value, self.PY_37.value, self.PY_38.value} # type: ignore
31
32 @cached_property
33 def _is_py_310_or_later(self) -> bool: # pragma: no cover
34 return self.value not in {
35 self.PY_36.value,
36 self.PY_37.value,
37 self.PY_38.value,
38 self.PY_39.value,
39 } # type: ignore
40
41 @cached_property
42 def _is_py_311_or_later(self) -> bool: # pragma: no cover
43 return self.value not in {
44 self.PY_36.value,
45 self.PY_37.value,
46 self.PY_38.value,
47 self.PY_39.value,
48 self.PY_310.value,
49 } # type: ignore
50
51 @property
52 def has_literal_type(self) -> bool:
53 return self._is_py_38_or_later
54
55 @property
56 def has_union_operator(self) -> bool: # pragma: no cover
57 return self._is_py_310_or_later
58
59 @property
60 def has_annotated_type(self) -> bool:
61 return self._is_py_39_or_later
62
63 @property
64 def has_typed_dict(self) -> bool:
65 return self._is_py_38_or_later
66
67 @property
68 def has_typed_dict_non_required(self) -> bool:
69 return self._is_py_311_or_later
70
71
72 if TYPE_CHECKING:
73
74 class _TargetVersion(Enum):
75 ...
76
77 BLACK_PYTHON_VERSION: Dict[PythonVersion, _TargetVersion]
78 else:
79 BLACK_PYTHON_VERSION: Dict[PythonVersion, black.TargetVersion] = {
80 v: getattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
81 for v in PythonVersion
82 if hasattr(black.TargetVersion, f'PY{v.name.split("_")[-1]}')
83 }
84
85
86 def is_supported_in_black(python_version: PythonVersion) -> bool: # pragma: no cover
87 return python_version in BLACK_PYTHON_VERSION
88
89
90 def black_find_project_root(sources: Sequence[Path]) -> Path:
91 if TYPE_CHECKING:
92 from typing import Iterable, Tuple, Union
93
94 def _find_project_root(
95 srcs: Union[Sequence[str], Iterable[str]],
96 ) -> Union[Tuple[Path, str], Path]:
97 ...
98
99 else:
100 from black import find_project_root as _find_project_root
101 project_root = _find_project_root(tuple(str(s) for s in sources))
102 if isinstance(project_root, tuple):
103 return project_root[0]
104 else: # pragma: no cover
105 return project_root
106
107
108 class CodeFormatter:
109 def __init__(
110 self,
111 python_version: PythonVersion,
112 settings_path: Optional[Path] = None,
113 wrap_string_literal: Optional[bool] = None,
114 skip_string_normalization: bool = True,
115 known_third_party: Optional[List[str]] = None,
116 custom_formatters: Optional[List[str]] = None,
117 custom_formatters_kwargs: Optional[Dict[str, Any]] = None,
118 ) -> None:
119 if not settings_path:
120 settings_path = Path().resolve()
121
122 root = black_find_project_root((settings_path,))
123 path = root / 'pyproject.toml'
124 if path.is_file():
125 pyproject_toml = load_toml(path)
126 config = pyproject_toml.get('tool', {}).get('black', {})
127 else:
128 config = {}
129
130 black_kwargs: Dict[str, Any] = {}
131 if wrap_string_literal is not None:
132 experimental_string_processing = wrap_string_literal
133 else:
134 experimental_string_processing = config.get(
135 'experimental-string-processing'
136 )
137
138 if experimental_string_processing is not None: # pragma: no cover
139 if black.__version__.startswith('19.'): # type: ignore
140 warn(
141 f"black doesn't support `experimental-string-processing` option" # type: ignore
142 f' for wrapping string literal in {black.__version__}'
143 )
144 else:
145 black_kwargs[
146 'experimental_string_processing'
147 ] = experimental_string_processing
148
149 if TYPE_CHECKING:
150 self.black_mode: black.FileMode
151 else:
152 self.black_mode = black.FileMode(
153 target_versions={BLACK_PYTHON_VERSION[python_version]},
154 line_length=config.get('line-length', black.DEFAULT_LINE_LENGTH),
155 string_normalization=not skip_string_normalization
156 or not config.get('skip-string-normalization', True),
157 **black_kwargs,
158 )
159
160 self.settings_path: str = str(settings_path)
161
162 self.isort_config_kwargs: Dict[str, Any] = {}
163 if known_third_party:
164 self.isort_config_kwargs['known_third_party'] = known_third_party
165
166 if isort.__version__.startswith('4.'):
167 self.isort_config = None
168 else:
169 self.isort_config = isort.Config(
170 settings_path=self.settings_path, **self.isort_config_kwargs
171 )
172
173 self.custom_formatters_kwargs = custom_formatters_kwargs or {}
174 self.custom_formatters = self._check_custom_formatters(custom_formatters)
175
176 def _load_custom_formatter(
177 self, custom_formatter_import: str
178 ) -> CustomCodeFormatter:
179 import_ = import_module(custom_formatter_import)
180
181 if not hasattr(import_, 'CodeFormatter'):
182 raise NameError(
183 f'Custom formatter module `{import_.__name__}` must contains object with name Formatter'
184 )
185
186 formatter_class = import_.__getattribute__('CodeFormatter')
187
188 if not issubclass(formatter_class, CustomCodeFormatter):
189 raise TypeError(
190 f'The custom module {custom_formatter_import} must inherit from `datamodel-code-generator`'
191 )
192
193 return formatter_class(formatter_kwargs=self.custom_formatters_kwargs)
194
195 def _check_custom_formatters(
196 self, custom_formatters: Optional[List[str]]
197 ) -> List[CustomCodeFormatter]:
198 if custom_formatters is None:
199 return []
200
201 return [
202 self._load_custom_formatter(custom_formatter_import)
203 for custom_formatter_import in custom_formatters
204 ]
205
206 def format_code(
207 self,
208 code: str,
209 ) -> str:
210 code = self.apply_isort(code)
211 code = self.apply_black(code)
212
213 for formatter in self.custom_formatters:
214 code = formatter.apply(code)
215
216 return code
217
218 def apply_black(self, code: str) -> str:
219 return black.format_str(
220 code,
221 mode=self.black_mode,
222 )
223
224 if TYPE_CHECKING:
225
226 def apply_isort(self, code: str) -> str:
227 ...
228
229 else:
230 if isort.__version__.startswith('4.'):
231
232 def apply_isort(self, code: str) -> str:
233 return isort.SortImports(
234 file_contents=code,
235 settings_path=self.settings_path,
236 **self.isort_config_kwargs,
237 ).output
238
239 else:
240
241 def apply_isort(self, code: str) -> str:
242 return isort.code(code, config=self.isort_config)
243
244
245 class CustomCodeFormatter:
246 def __init__(self, formatter_kwargs: Dict[str, Any]) -> None:
247 self.formatter_kwargs = formatter_kwargs
248
249 def apply(self, code: str) -> str:
250 raise NotImplementedError
251
[end of datamodel_code_generator/format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -7,6 +7,7 @@
from warnings import warn
import black
+import black.mode
import isort
from datamodel_code_generator.util import cached_property, load_toml
@@ -131,9 +132,15 @@
if wrap_string_literal is not None:
experimental_string_processing = wrap_string_literal
else:
- experimental_string_processing = config.get(
- 'experimental-string-processing'
- )
+ if black.__version__ < '24.1.0': # type: ignore
+ experimental_string_processing = config.get(
+ 'experimental-string-processing'
+ )
+ else:
+ experimental_string_processing = config.get('preview', False) and (
+ config.get('unstable', False)
+ or 'string_processing' in config.get('enable-unstable-feature', [])
+ )
if experimental_string_processing is not None: # pragma: no cover
if black.__version__.startswith('19.'): # type: ignore
@@ -141,10 +148,16 @@
f"black doesn't support `experimental-string-processing` option" # type: ignore
f' for wrapping string literal in {black.__version__}'
)
- else:
+ elif black.__version__ < '24.1.0': # type: ignore
black_kwargs[
'experimental_string_processing'
] = experimental_string_processing
+ elif experimental_string_processing:
+ black_kwargs['preview'] = True
+ black_kwargs['unstable'] = config.get('unstable', False)
+ black_kwargs['enabled_features'] = {
+ black.mode.Preview.string_processing
+ }
if TYPE_CHECKING:
self.black_mode: black.FileMode
|
{"golden_diff": "diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py\n--- a/datamodel_code_generator/format.py\n+++ b/datamodel_code_generator/format.py\n@@ -7,6 +7,7 @@\n from warnings import warn\n \n import black\n+import black.mode\n import isort\n \n from datamodel_code_generator.util import cached_property, load_toml\n@@ -131,9 +132,15 @@\n if wrap_string_literal is not None:\n experimental_string_processing = wrap_string_literal\n else:\n- experimental_string_processing = config.get(\n- 'experimental-string-processing'\n- )\n+ if black.__version__ < '24.1.0': # type: ignore\n+ experimental_string_processing = config.get(\n+ 'experimental-string-processing'\n+ )\n+ else:\n+ experimental_string_processing = config.get('preview', False) and (\n+ config.get('unstable', False)\n+ or 'string_processing' in config.get('enable-unstable-feature', [])\n+ )\n \n if experimental_string_processing is not None: # pragma: no cover\n if black.__version__.startswith('19.'): # type: ignore\n@@ -141,10 +148,16 @@\n f\"black doesn't support `experimental-string-processing` option\" # type: ignore\n f' for wrapping string literal in {black.__version__}'\n )\n- else:\n+ elif black.__version__ < '24.1.0': # type: ignore\n black_kwargs[\n 'experimental_string_processing'\n ] = experimental_string_processing\n+ elif experimental_string_processing:\n+ black_kwargs['preview'] = True\n+ black_kwargs['unstable'] = config.get('unstable', False)\n+ black_kwargs['enabled_features'] = {\n+ black.mode.Preview.string_processing\n+ }\n \n if TYPE_CHECKING:\n self.black_mode: black.FileMode\n", "issue": "Black 24.1.0 breaks code formatting if wrap-string-literal is set\n**Describe the bug**\r\n\r\nBlack [24.1.0](https://github.com/psf/black/releases/tag/24.1.0) was just released and removes support for the deprecated `--experimental-string-processing` flag (psf/black#4096). This breaks the code in [`format.py`](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/format.py#L146) that uses this option:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../python3.11/site-packages/datamodel_code_generator/__main__.py\", line 429, in main\r\n generate(\r\n File \".../python3.11/site-packages/datamodel_code_generator/__init__.py\", line 463, in generate\r\n results = parser.parse()\r\n ^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/datamodel_code_generator/parser/base.py\", line 1156, in parse\r\n code_formatter: Optional[CodeFormatter] = CodeFormatter(\r\n ^^^^^^^^^^^^^^\r\n File \".../python3.11/site-packages/datamodel_code_generator/format.py\", line 152, in __init__\r\n self.black_mode = black.FileMode(\r\n ^^^^^^^^^^^^^^^\r\nTypeError: Mode.__init__() got an unexpected keyword argument 'experimental_string_processing'\r\n```\r\n\r\n**Expected behavior**\r\n\r\nNo crash.\r\n\r\n**Version:**\r\n - OS: Linux\r\n - Python version: 3.11\r\n - datamodel-code-generator version: 0.25.2\r\n - black version: 0.24.1\r\n\r\n**Additional context**\r\n\r\nPossible mitigation:\r\n- add a temporary upper bound to the `black` version spec in [pyproject.toml](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/pyproject.toml#L54)\r\n- same, but in user environment definitions\r\n- use `--preview --enable-unstable-feature string_processing` instead (as suggested by the black release notes).\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom enum import Enum\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence\nfrom warnings import warn\n\nimport black\nimport isort\n\nfrom datamodel_code_generator.util import cached_property, load_toml\n\n\nclass PythonVersion(Enum):\n PY_36 = '3.6'\n PY_37 = '3.7'\n PY_38 = '3.8'\n PY_39 = '3.9'\n PY_310 = '3.10'\n PY_311 = '3.11'\n PY_312 = '3.12'\n\n @cached_property\n def _is_py_38_or_later(self) -> bool: # pragma: no cover\n return self.value not in {self.PY_36.value, self.PY_37.value} # type: ignore\n\n @cached_property\n def _is_py_39_or_later(self) -> bool: # pragma: no cover\n return self.value not in {self.PY_36.value, self.PY_37.value, self.PY_38.value} # type: ignore\n\n @cached_property\n def _is_py_310_or_later(self) -> bool: # pragma: no cover\n return self.value not in {\n self.PY_36.value,\n self.PY_37.value,\n self.PY_38.value,\n self.PY_39.value,\n } # type: ignore\n\n @cached_property\n def _is_py_311_or_later(self) -> bool: # pragma: no cover\n return self.value not in {\n self.PY_36.value,\n self.PY_37.value,\n self.PY_38.value,\n self.PY_39.value,\n self.PY_310.value,\n } # type: ignore\n\n @property\n def has_literal_type(self) -> bool:\n return self._is_py_38_or_later\n\n @property\n def has_union_operator(self) -> bool: # pragma: no cover\n return self._is_py_310_or_later\n\n @property\n def has_annotated_type(self) -> bool:\n return self._is_py_39_or_later\n\n @property\n def has_typed_dict(self) -> bool:\n return self._is_py_38_or_later\n\n @property\n def has_typed_dict_non_required(self) -> bool:\n return self._is_py_311_or_later\n\n\nif TYPE_CHECKING:\n\n class _TargetVersion(Enum):\n ...\n\n BLACK_PYTHON_VERSION: Dict[PythonVersion, _TargetVersion]\nelse:\n BLACK_PYTHON_VERSION: Dict[PythonVersion, black.TargetVersion] = {\n v: getattr(black.TargetVersion, f'PY{v.name.split(\"_\")[-1]}')\n for v in PythonVersion\n if hasattr(black.TargetVersion, f'PY{v.name.split(\"_\")[-1]}')\n }\n\n\ndef is_supported_in_black(python_version: PythonVersion) -> bool: # pragma: no cover\n return python_version in BLACK_PYTHON_VERSION\n\n\ndef black_find_project_root(sources: Sequence[Path]) -> Path:\n if TYPE_CHECKING:\n from typing import Iterable, Tuple, Union\n\n def _find_project_root(\n srcs: Union[Sequence[str], Iterable[str]],\n ) -> Union[Tuple[Path, str], Path]:\n ...\n\n else:\n from black import find_project_root as _find_project_root\n project_root = _find_project_root(tuple(str(s) for s in sources))\n if isinstance(project_root, tuple):\n return project_root[0]\n else: # pragma: no cover\n return project_root\n\n\nclass CodeFormatter:\n def __init__(\n self,\n python_version: PythonVersion,\n settings_path: Optional[Path] = None,\n wrap_string_literal: Optional[bool] = None,\n skip_string_normalization: bool = True,\n known_third_party: Optional[List[str]] = None,\n custom_formatters: Optional[List[str]] = None,\n custom_formatters_kwargs: Optional[Dict[str, Any]] = None,\n ) -> None:\n if not settings_path:\n settings_path = Path().resolve()\n\n root = black_find_project_root((settings_path,))\n path = root / 'pyproject.toml'\n if path.is_file():\n pyproject_toml = load_toml(path)\n config = pyproject_toml.get('tool', {}).get('black', {})\n else:\n config = {}\n\n black_kwargs: Dict[str, Any] = {}\n if wrap_string_literal is not None:\n experimental_string_processing = wrap_string_literal\n else:\n experimental_string_processing = config.get(\n 'experimental-string-processing'\n )\n\n if experimental_string_processing is not None: # pragma: no cover\n if black.__version__.startswith('19.'): # type: ignore\n warn(\n f\"black doesn't support `experimental-string-processing` option\" # type: ignore\n f' for wrapping string literal in {black.__version__}'\n )\n else:\n black_kwargs[\n 'experimental_string_processing'\n ] = experimental_string_processing\n\n if TYPE_CHECKING:\n self.black_mode: black.FileMode\n else:\n self.black_mode = black.FileMode(\n target_versions={BLACK_PYTHON_VERSION[python_version]},\n line_length=config.get('line-length', black.DEFAULT_LINE_LENGTH),\n string_normalization=not skip_string_normalization\n or not config.get('skip-string-normalization', True),\n **black_kwargs,\n )\n\n self.settings_path: str = str(settings_path)\n\n self.isort_config_kwargs: Dict[str, Any] = {}\n if known_third_party:\n self.isort_config_kwargs['known_third_party'] = known_third_party\n\n if isort.__version__.startswith('4.'):\n self.isort_config = None\n else:\n self.isort_config = isort.Config(\n settings_path=self.settings_path, **self.isort_config_kwargs\n )\n\n self.custom_formatters_kwargs = custom_formatters_kwargs or {}\n self.custom_formatters = self._check_custom_formatters(custom_formatters)\n\n def _load_custom_formatter(\n self, custom_formatter_import: str\n ) -> CustomCodeFormatter:\n import_ = import_module(custom_formatter_import)\n\n if not hasattr(import_, 'CodeFormatter'):\n raise NameError(\n f'Custom formatter module `{import_.__name__}` must contains object with name Formatter'\n )\n\n formatter_class = import_.__getattribute__('CodeFormatter')\n\n if not issubclass(formatter_class, CustomCodeFormatter):\n raise TypeError(\n f'The custom module {custom_formatter_import} must inherit from `datamodel-code-generator`'\n )\n\n return formatter_class(formatter_kwargs=self.custom_formatters_kwargs)\n\n def _check_custom_formatters(\n self, custom_formatters: Optional[List[str]]\n ) -> List[CustomCodeFormatter]:\n if custom_formatters is None:\n return []\n\n return [\n self._load_custom_formatter(custom_formatter_import)\n for custom_formatter_import in custom_formatters\n ]\n\n def format_code(\n self,\n code: str,\n ) -> str:\n code = self.apply_isort(code)\n code = self.apply_black(code)\n\n for formatter in self.custom_formatters:\n code = formatter.apply(code)\n\n return code\n\n def apply_black(self, code: str) -> str:\n return black.format_str(\n code,\n mode=self.black_mode,\n )\n\n if TYPE_CHECKING:\n\n def apply_isort(self, code: str) -> str:\n ...\n\n else:\n if isort.__version__.startswith('4.'):\n\n def apply_isort(self, code: str) -> str:\n return isort.SortImports(\n file_contents=code,\n settings_path=self.settings_path,\n **self.isort_config_kwargs,\n ).output\n\n else:\n\n def apply_isort(self, code: str) -> str:\n return isort.code(code, config=self.isort_config)\n\n\nclass CustomCodeFormatter:\n def __init__(self, formatter_kwargs: Dict[str, Any]) -> None:\n self.formatter_kwargs = formatter_kwargs\n\n def apply(self, code: str) -> str:\n raise NotImplementedError\n", "path": "datamodel_code_generator/format.py"}]}
| 3,587 | 425 |
gh_patches_debug_15512
|
rasdani/github-patches
|
git_diff
|
ResonantGeoData__ResonantGeoData-411
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unassigned permissions not working
Non-admin accounts are seeing an incorrect amount of spatial entries in the search results. Here are two results: 1) from my `@kitwar`e account which is an admin and one from my `@gmail` account that has no permissions. Using the changes from #401:
https://github.com/ResonantGeoData/ResonantGeoData/blob/014ce2693a0a3e899d6af0a9d7822a5f1327268c/rgd/geodata/permissions.py#L108
You can see 475 results with the admin account (which is the correct amount) and 4949 results with the nonadmin account which hits that new code (this number is wildly incorrect):
| admin | nonadmin |
| --- | --- |
|  |  |
</issue>
<code>
[start of rgd/geodata/permissions.py]
1 from typing import Optional
2
3 from django.conf import settings
4 from django.contrib.auth.backends import BaseBackend
5 from django.core.exceptions import PermissionDenied
6 from django.db.models.functions import Coalesce
7
8 from rgd.geodata import models
9
10
11 def annotate_queryset(queryset):
12 """Annotate the queryset to include a path to a collection.
13
14 Some models don't have a direct path to `collection`
15 and must be annotated to include it.
16 """
17 model = queryset.model
18 if model == models.SpatialEntry:
19 return queryset.annotate(
20 _collection_permissions__user=Coalesce(
21 'fmventry__fmv_file__file__collection__collection_permissions__user',
22 'geometryentry__geometry_archive__file__collection__collection_permissions__user',
23 'rastermetaentry__parent_raster__image_set__images__image_file__file__collection__collection_permissions__user',
24 ),
25 _collection_permissions__role=Coalesce(
26 'fmventry__fmv_file__file__collection__collection_permissions__role',
27 'geometryentry__geometry_archive__file__collection__collection_permissions__role',
28 'rastermetaentry__parent_raster__image_set__images__image_file__file__collection__collection_permissions__role',
29 ),
30 )
31 return queryset
32
33
34 def get_collection_membership_path(model) -> Optional[str]:
35 """Get the path to the 'CollectionPermission' model.
36
37 Relationships are represented as 'dunder's ('__'). Returning `None`
38 means the model is explicitly unprotected.
39 """
40 # Collection
41 if issubclass(model, models.CollectionPermission):
42 return ''
43 if issubclass(model, models.Collection):
44 return 'collection_permissions'
45 # Common
46 if issubclass(model, models.ChecksumFile):
47 return 'collection__collection_permissions'
48 # Imagery
49 if issubclass(model, models.ImageEntry):
50 return 'image_file__file__collection__collection_permissions'
51 if issubclass(model, models.ImageSet):
52 return 'images__image_file__file__collection__collection_permissions'
53 if issubclass(model, models.RasterEntry):
54 return 'image_set__images__image_file__file__collection__collection_permissions'
55 if issubclass(model, models.RasterMetaEntry):
56 return (
57 'parent_raster__image_set__images__image_file__file__collection__collection_permissions'
58 )
59 if issubclass(model, models.BandMetaEntry):
60 return 'parent_image__image_file__file__collection__collection_permissions'
61 if issubclass(model, models.ConvertedImageFile):
62 return 'source_image__image_file__file__collection__collection_permissions'
63 if issubclass(model, models.SubsampledImage):
64 return 'source_image__image_file__file__collection__collection_permissions'
65 if issubclass(model, models.KWCOCOArchive):
66 return 'spec_file__collection__collection_permissions'
67 # Annotation
68 if issubclass(model, models.Annotation):
69 return 'image__image_file__collection__collection_permissions'
70 if issubclass(model, models.Segmentation):
71 return 'annotation__image__image_file__collection__collection_permissions'
72 # Geometry
73 if issubclass(model, models.GeometryEntry):
74 return 'geometry_archive__file__collection__collection_permissions'
75 # FMV
76 if issubclass(model, models.FMVEntry):
77 return 'fmv_file__file__collection__collection_permissions'
78 # SpatialEntry
79 if model == models.SpatialEntry:
80 return '_collection_permissions'
81
82 raise NotImplementedError
83
84
85 def filter_perm(user, queryset, role):
86 """Filter a queryset."""
87 # Called outside of view
88 if user is None:
89 return queryset
90 # Must be logged in
91 if not user.is_active or user.is_anonymous:
92 return queryset.none()
93 # Superusers can see all (not staff users)
94 if user.is_active and user.is_superuser:
95 return queryset
96 # No relationship to collection
97 path = get_collection_membership_path(queryset.model)
98 if path is None:
99 return queryset
100 # Check permissions
101 # `path` can be an empty string (meaning queryset is `CollectionPermission`)
102 user_path = (path + '__' if path != '' else path) + 'user'
103 role_path = (path + '__' if path != '' else path) + 'role'
104 queryset = annotate_queryset(queryset)
105 filtered = queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role})
106 # Check setting for unassigned permissions
107 if settings.RGD_GLOBAL_READ_ACCESS:
108 unassigned = queryset.filter(**{user_path + '__isnull': True})
109 return unassigned | filtered
110 return filtered
111
112
113 def filter_read_perm(user, queryset):
114 """Filter a queryset to what the user may read."""
115 return filter_perm(user, queryset, models.CollectionPermission.READER)
116
117
118 def filter_write_perm(user, queryset):
119 """Filter a queryset to what the user may edit."""
120 return filter_perm(user, queryset, models.CollectionPermission.OWNER)
121
122
123 def check_read_perm(user, obj):
124 """Raise 'PermissionDenied' error if user does not have read permissions."""
125 model = type(obj)
126 if not filter_read_perm(user, model.objects.filter(pk=obj.pk)).exists():
127 raise PermissionDenied
128
129
130 def check_write_perm(user, obj):
131 """Raise 'PermissionDenied' error if user does not have write permissions."""
132 # Called outside of view
133 model = type(obj)
134 if not filter_write_perm(user, model.objects.filter(pk=obj.pk)).exists():
135 raise PermissionDenied
136
137
138 class CollectionAuthorizationBackend(BaseBackend):
139 def has_perm(self, user, perm, obj=None):
140 """Supplement default Django permission backend.
141
142 Returns `True` if the user has the specified permission, where perm is in the format
143 `"<app label>.<permission codename>"`. If the user is
144 inactive, this method will always return False. For an active superuser, this method
145 will always return `True`.
146
147 https://docs.djangoproject.com/en/3.1/ref/contrib/auth/#django.contrib.auth.models.User.has_perm
148 """
149 app_label, codename = perm.split('.')
150 if app_label == 'geodata':
151 if codename.startswith('view'):
152 check_read_perm(user, obj)
153 if (
154 codename.startswith('add')
155 or codename.startswith('delete')
156 or codename.startswith('change')
157 ):
158 check_write_perm(user, obj)
159
[end of rgd/geodata/permissions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rgd/geodata/permissions.py b/rgd/geodata/permissions.py
--- a/rgd/geodata/permissions.py
+++ b/rgd/geodata/permissions.py
@@ -102,10 +102,12 @@
user_path = (path + '__' if path != '' else path) + 'user'
role_path = (path + '__' if path != '' else path) + 'role'
queryset = annotate_queryset(queryset)
- filtered = queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role})
+ filtered = (
+ queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role}).distinct()
+ )
# Check setting for unassigned permissions
if settings.RGD_GLOBAL_READ_ACCESS:
- unassigned = queryset.filter(**{user_path + '__isnull': True})
+ unassigned = queryset.filter(**{user_path + '__isnull': True}).distinct()
return unassigned | filtered
return filtered
|
{"golden_diff": "diff --git a/rgd/geodata/permissions.py b/rgd/geodata/permissions.py\n--- a/rgd/geodata/permissions.py\n+++ b/rgd/geodata/permissions.py\n@@ -102,10 +102,12 @@\n user_path = (path + '__' if path != '' else path) + 'user'\n role_path = (path + '__' if path != '' else path) + 'role'\n queryset = annotate_queryset(queryset)\n- filtered = queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role})\n+ filtered = (\n+ queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role}).distinct()\n+ )\n # Check setting for unassigned permissions\n if settings.RGD_GLOBAL_READ_ACCESS:\n- unassigned = queryset.filter(**{user_path + '__isnull': True})\n+ unassigned = queryset.filter(**{user_path + '__isnull': True}).distinct()\n return unassigned | filtered\n return filtered\n", "issue": "Unassigned permissions not working\nNon-admin accounts are seeing an incorrect amount of spatial entries in the search results. Here are two results: 1) from my `@kitwar`e account which is an admin and one from my `@gmail` account that has no permissions. Using the changes from #401: \r\n\r\nhttps://github.com/ResonantGeoData/ResonantGeoData/blob/014ce2693a0a3e899d6af0a9d7822a5f1327268c/rgd/geodata/permissions.py#L108\r\n\r\nYou can see 475 results with the admin account (which is the correct amount) and 4949 results with the nonadmin account which hits that new code (this number is wildly incorrect): \r\n\r\n| admin | nonadmin |\r\n| --- | --- |\r\n|  |  |\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nfrom django.conf import settings\nfrom django.contrib.auth.backends import BaseBackend\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models.functions import Coalesce\n\nfrom rgd.geodata import models\n\n\ndef annotate_queryset(queryset):\n \"\"\"Annotate the queryset to include a path to a collection.\n\n Some models don't have a direct path to `collection`\n and must be annotated to include it.\n \"\"\"\n model = queryset.model\n if model == models.SpatialEntry:\n return queryset.annotate(\n _collection_permissions__user=Coalesce(\n 'fmventry__fmv_file__file__collection__collection_permissions__user',\n 'geometryentry__geometry_archive__file__collection__collection_permissions__user',\n 'rastermetaentry__parent_raster__image_set__images__image_file__file__collection__collection_permissions__user',\n ),\n _collection_permissions__role=Coalesce(\n 'fmventry__fmv_file__file__collection__collection_permissions__role',\n 'geometryentry__geometry_archive__file__collection__collection_permissions__role',\n 'rastermetaentry__parent_raster__image_set__images__image_file__file__collection__collection_permissions__role',\n ),\n )\n return queryset\n\n\ndef get_collection_membership_path(model) -> Optional[str]:\n \"\"\"Get the path to the 'CollectionPermission' model.\n\n Relationships are represented as 'dunder's ('__'). Returning `None`\n means the model is explicitly unprotected.\n \"\"\"\n # Collection\n if issubclass(model, models.CollectionPermission):\n return ''\n if issubclass(model, models.Collection):\n return 'collection_permissions'\n # Common\n if issubclass(model, models.ChecksumFile):\n return 'collection__collection_permissions'\n # Imagery\n if issubclass(model, models.ImageEntry):\n return 'image_file__file__collection__collection_permissions'\n if issubclass(model, models.ImageSet):\n return 'images__image_file__file__collection__collection_permissions'\n if issubclass(model, models.RasterEntry):\n return 'image_set__images__image_file__file__collection__collection_permissions'\n if issubclass(model, models.RasterMetaEntry):\n return (\n 'parent_raster__image_set__images__image_file__file__collection__collection_permissions'\n )\n if issubclass(model, models.BandMetaEntry):\n return 'parent_image__image_file__file__collection__collection_permissions'\n if issubclass(model, models.ConvertedImageFile):\n return 'source_image__image_file__file__collection__collection_permissions'\n if issubclass(model, models.SubsampledImage):\n return 'source_image__image_file__file__collection__collection_permissions'\n if issubclass(model, models.KWCOCOArchive):\n return 'spec_file__collection__collection_permissions'\n # Annotation\n if issubclass(model, models.Annotation):\n return 'image__image_file__collection__collection_permissions'\n if issubclass(model, models.Segmentation):\n return 'annotation__image__image_file__collection__collection_permissions'\n # Geometry\n if issubclass(model, models.GeometryEntry):\n return 'geometry_archive__file__collection__collection_permissions'\n # FMV\n if issubclass(model, models.FMVEntry):\n return 'fmv_file__file__collection__collection_permissions'\n # SpatialEntry\n if model == models.SpatialEntry:\n return '_collection_permissions'\n\n raise NotImplementedError\n\n\ndef filter_perm(user, queryset, role):\n \"\"\"Filter a queryset.\"\"\"\n # Called outside of view\n if user is None:\n return queryset\n # Must be logged in\n if not user.is_active or user.is_anonymous:\n return queryset.none()\n # Superusers can see all (not staff users)\n if user.is_active and user.is_superuser:\n return queryset\n # No relationship to collection\n path = get_collection_membership_path(queryset.model)\n if path is None:\n return queryset\n # Check permissions\n # `path` can be an empty string (meaning queryset is `CollectionPermission`)\n user_path = (path + '__' if path != '' else path) + 'user'\n role_path = (path + '__' if path != '' else path) + 'role'\n queryset = annotate_queryset(queryset)\n filtered = queryset.filter(**{user_path: user.pk}).exclude(**{role_path + '__lt': role})\n # Check setting for unassigned permissions\n if settings.RGD_GLOBAL_READ_ACCESS:\n unassigned = queryset.filter(**{user_path + '__isnull': True})\n return unassigned | filtered\n return filtered\n\n\ndef filter_read_perm(user, queryset):\n \"\"\"Filter a queryset to what the user may read.\"\"\"\n return filter_perm(user, queryset, models.CollectionPermission.READER)\n\n\ndef filter_write_perm(user, queryset):\n \"\"\"Filter a queryset to what the user may edit.\"\"\"\n return filter_perm(user, queryset, models.CollectionPermission.OWNER)\n\n\ndef check_read_perm(user, obj):\n \"\"\"Raise 'PermissionDenied' error if user does not have read permissions.\"\"\"\n model = type(obj)\n if not filter_read_perm(user, model.objects.filter(pk=obj.pk)).exists():\n raise PermissionDenied\n\n\ndef check_write_perm(user, obj):\n \"\"\"Raise 'PermissionDenied' error if user does not have write permissions.\"\"\"\n # Called outside of view\n model = type(obj)\n if not filter_write_perm(user, model.objects.filter(pk=obj.pk)).exists():\n raise PermissionDenied\n\n\nclass CollectionAuthorizationBackend(BaseBackend):\n def has_perm(self, user, perm, obj=None):\n \"\"\"Supplement default Django permission backend.\n\n Returns `True` if the user has the specified permission, where perm is in the format\n `\"<app label>.<permission codename>\"`. If the user is\n inactive, this method will always return False. For an active superuser, this method\n will always return `True`.\n\n https://docs.djangoproject.com/en/3.1/ref/contrib/auth/#django.contrib.auth.models.User.has_perm\n \"\"\"\n app_label, codename = perm.split('.')\n if app_label == 'geodata':\n if codename.startswith('view'):\n check_read_perm(user, obj)\n if (\n codename.startswith('add')\n or codename.startswith('delete')\n or codename.startswith('change')\n ):\n check_write_perm(user, obj)\n", "path": "rgd/geodata/permissions.py"}]}
| 2,650 | 233 |
gh_patches_debug_28759
|
rasdani/github-patches
|
git_diff
|
numba__numba-1992
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
jitclass __doc__ passthrough to instance
Jitclass is not exposing the docstring of the class nor the methods.
</issue>
<code>
[start of numba/jitclass/boxing.py]
1 """
2 Implement logic relating to wrapping (box) and unwrapping (unbox) instances
3 of jitclasses for use inside the python interpreter.
4 """
5 from __future__ import print_function, absolute_import
6
7 from functools import wraps, partial
8
9 from llvmlite import ir
10
11 from numba import types, cgutils
12 from numba.pythonapi import box, unbox, NativeValue
13 from numba import njit
14 from numba.six import exec_
15 from . import _box
16
17
18 _getter_code_template = """
19 def accessor(__numba_self_):
20 return __numba_self_.{0}
21 """
22
23 _setter_code_template = """
24 def mutator(__numba_self_, __numba_val):
25 __numba_self_.{0} = __numba_val
26 """
27
28 _method_code_template = """
29 def method(__numba_self_, *args):
30 return __numba_self_.{method}(*args)
31 """
32
33
34 def _generate_property(field, template, fname):
35 """
36 Generate simple function that get/set a field of the instance
37 """
38 source = template.format(field)
39 glbls = {}
40 exec_(source, glbls)
41 return njit(glbls[fname])
42
43
44 _generate_getter = partial(_generate_property, template=_getter_code_template,
45 fname='accessor')
46 _generate_setter = partial(_generate_property, template=_setter_code_template,
47 fname='mutator')
48
49
50 def _generate_method(name, func):
51 """
52 Generate a wrapper for calling a method. Note the wrapper will only
53 accept positional arguments.
54 """
55 source = _method_code_template.format(method=name)
56 glbls = {}
57 exec_(source, glbls)
58 method = njit(glbls['method'])
59
60 @wraps(func)
61 def wrapper(*args, **kwargs):
62 return method(*args, **kwargs)
63
64 return wrapper
65
66
67 _cache_specialized_box = {}
68
69
70 def _specialize_box(typ):
71 """
72 Create a subclass of Box that is specialized to the jitclass.
73
74 This function caches the result to avoid code bloat.
75 """
76 # Check cache
77 if typ in _cache_specialized_box:
78 return _cache_specialized_box[typ]
79 dct = {'__slots__': (),
80 '_numba_type_': typ}
81 # Inject attributes as class properties
82 for field in typ.struct:
83 getter = _generate_getter(field)
84 setter = _generate_setter(field)
85 dct[field] = property(getter, setter)
86 # Inject properties as class properties
87 for field, impdct in typ.jitprops.items():
88 getter = None
89 setter = None
90 if 'get' in impdct:
91 getter = _generate_getter(field)
92 if 'set' in impdct:
93 setter = _generate_setter(field)
94 dct[field] = property(getter, setter)
95 # Inject methods as class members
96 for name, func in typ.methods.items():
97 if not (name.startswith('__') and name.endswith('__')):
98 dct[name] = _generate_method(name, func)
99 # Create subclass
100 subcls = type(typ.classname, (_box.Box,), dct)
101 # Store to cache
102 _cache_specialized_box[typ] = subcls
103
104 # Pre-compile attribute getter.
105 # Note: This must be done after the "box" class is created because
106 # compiling the getter requires the "box" class to be defined.
107 for k, v in dct.items():
108 if isinstance(v, property):
109 prop = getattr(subcls, k)
110 if prop.fget is not None:
111 fget = prop.fget
112 fast_fget = fget.compile((typ,))
113 fget.disable_compile()
114 setattr(subcls, k,
115 property(fast_fget, prop.fset, prop.fdel))
116
117 return subcls
118
119
120 ###############################################################################
121 # Implement box/unbox for call wrapper
122
123 @box(types.ClassInstanceType)
124 def _box_class_instance(typ, val, c):
125 meminfo, dataptr = cgutils.unpack_tuple(c.builder, val)
126
127 # Create Box instance
128 box_subclassed = _specialize_box(typ)
129 # Note: the ``box_subclassed`` is kept alive by the cache
130 int_addr_boxcls = c.context.get_constant(types.uintp, id(box_subclassed))
131
132 box_cls = c.builder.inttoptr(int_addr_boxcls, c.pyapi.pyobj)
133 box = c.pyapi.call_function_objargs(box_cls, ())
134
135 # Initialize Box instance
136 llvoidptr = ir.IntType(8).as_pointer()
137 addr_meminfo = c.builder.bitcast(meminfo, llvoidptr)
138 addr_data = c.builder.bitcast(dataptr, llvoidptr)
139
140 def set_member(member_offset, value):
141 # Access member by byte offset
142 offset = c.context.get_constant(types.uintp, member_offset)
143 ptr = cgutils.pointer_add(c.builder, box, offset)
144 casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())
145 c.builder.store(value, casted)
146
147 set_member(_box.box_meminfoptr_offset, addr_meminfo)
148 set_member(_box.box_dataptr_offset, addr_data)
149 return box
150
151
152 @unbox(types.ClassInstanceType)
153 def _unbox_class_instance(typ, val, c):
154 def access_member(member_offset):
155 # Access member by byte offset
156 offset = c.context.get_constant(types.uintp, member_offset)
157 llvoidptr = ir.IntType(8).as_pointer()
158 ptr = cgutils.pointer_add(c.builder, val, offset)
159 casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())
160 return c.builder.load(casted)
161
162 struct_cls = cgutils.create_struct_proxy(typ)
163 inst = struct_cls(c.context, c.builder)
164
165 # load from Python object
166 ptr_meminfo = access_member(_box.box_meminfoptr_offset)
167 ptr_dataptr = access_member(_box.box_dataptr_offset)
168
169 # store to native structure
170 inst.meminfo = c.builder.bitcast(ptr_meminfo, inst.meminfo.type)
171 inst.data = c.builder.bitcast(ptr_dataptr, inst.data.type)
172
173 ret = inst._getvalue()
174
175 c.context.nrt.incref(c.builder, typ, ret)
176
177 return NativeValue(ret, is_error=c.pyapi.c_api_error())
178
[end of numba/jitclass/boxing.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/numba/jitclass/boxing.py b/numba/jitclass/boxing.py
--- a/numba/jitclass/boxing.py
+++ b/numba/jitclass/boxing.py
@@ -77,7 +77,9 @@
if typ in _cache_specialized_box:
return _cache_specialized_box[typ]
dct = {'__slots__': (),
- '_numba_type_': typ}
+ '_numba_type_': typ,
+ '__doc__': typ.class_type.class_def.__doc__,
+ }
# Inject attributes as class properties
for field in typ.struct:
getter = _generate_getter(field)
@@ -91,7 +93,10 @@
getter = _generate_getter(field)
if 'set' in impdct:
setter = _generate_setter(field)
- dct[field] = property(getter, setter)
+ # get docstring from either the fget or fset
+ imp = impdct.get('get') or impdct.get('set') or None
+ doc = getattr(imp, '__doc__', None)
+ dct[field] = property(getter, setter, doc=doc)
# Inject methods as class members
for name, func in typ.methods.items():
if not (name.startswith('__') and name.endswith('__')):
@@ -112,7 +117,8 @@
fast_fget = fget.compile((typ,))
fget.disable_compile()
setattr(subcls, k,
- property(fast_fget, prop.fset, prop.fdel))
+ property(fast_fget, prop.fset, prop.fdel,
+ doc=prop.__doc__))
return subcls
|
{"golden_diff": "diff --git a/numba/jitclass/boxing.py b/numba/jitclass/boxing.py\n--- a/numba/jitclass/boxing.py\n+++ b/numba/jitclass/boxing.py\n@@ -77,7 +77,9 @@\n if typ in _cache_specialized_box:\n return _cache_specialized_box[typ]\n dct = {'__slots__': (),\n- '_numba_type_': typ}\n+ '_numba_type_': typ,\n+ '__doc__': typ.class_type.class_def.__doc__,\n+ }\n # Inject attributes as class properties\n for field in typ.struct:\n getter = _generate_getter(field)\n@@ -91,7 +93,10 @@\n getter = _generate_getter(field)\n if 'set' in impdct:\n setter = _generate_setter(field)\n- dct[field] = property(getter, setter)\n+ # get docstring from either the fget or fset\n+ imp = impdct.get('get') or impdct.get('set') or None\n+ doc = getattr(imp, '__doc__', None)\n+ dct[field] = property(getter, setter, doc=doc)\n # Inject methods as class members\n for name, func in typ.methods.items():\n if not (name.startswith('__') and name.endswith('__')):\n@@ -112,7 +117,8 @@\n fast_fget = fget.compile((typ,))\n fget.disable_compile()\n setattr(subcls, k,\n- property(fast_fget, prop.fset, prop.fdel))\n+ property(fast_fget, prop.fset, prop.fdel,\n+ doc=prop.__doc__))\n \n return subcls\n", "issue": "jitclass __doc__ passthrough to instance\nJitclass is not exposing the docstring of the class nor the methods.\n\n", "before_files": [{"content": "\"\"\"\nImplement logic relating to wrapping (box) and unwrapping (unbox) instances\nof jitclasses for use inside the python interpreter.\n\"\"\"\nfrom __future__ import print_function, absolute_import\n\nfrom functools import wraps, partial\n\nfrom llvmlite import ir\n\nfrom numba import types, cgutils\nfrom numba.pythonapi import box, unbox, NativeValue\nfrom numba import njit\nfrom numba.six import exec_\nfrom . import _box\n\n\n_getter_code_template = \"\"\"\ndef accessor(__numba_self_):\n return __numba_self_.{0}\n\"\"\"\n\n_setter_code_template = \"\"\"\ndef mutator(__numba_self_, __numba_val):\n __numba_self_.{0} = __numba_val\n\"\"\"\n\n_method_code_template = \"\"\"\ndef method(__numba_self_, *args):\n return __numba_self_.{method}(*args)\n\"\"\"\n\n\ndef _generate_property(field, template, fname):\n \"\"\"\n Generate simple function that get/set a field of the instance\n \"\"\"\n source = template.format(field)\n glbls = {}\n exec_(source, glbls)\n return njit(glbls[fname])\n\n\n_generate_getter = partial(_generate_property, template=_getter_code_template,\n fname='accessor')\n_generate_setter = partial(_generate_property, template=_setter_code_template,\n fname='mutator')\n\n\ndef _generate_method(name, func):\n \"\"\"\n Generate a wrapper for calling a method. Note the wrapper will only\n accept positional arguments.\n \"\"\"\n source = _method_code_template.format(method=name)\n glbls = {}\n exec_(source, glbls)\n method = njit(glbls['method'])\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n return method(*args, **kwargs)\n\n return wrapper\n\n\n_cache_specialized_box = {}\n\n\ndef _specialize_box(typ):\n \"\"\"\n Create a subclass of Box that is specialized to the jitclass.\n\n This function caches the result to avoid code bloat.\n \"\"\"\n # Check cache\n if typ in _cache_specialized_box:\n return _cache_specialized_box[typ]\n dct = {'__slots__': (),\n '_numba_type_': typ}\n # Inject attributes as class properties\n for field in typ.struct:\n getter = _generate_getter(field)\n setter = _generate_setter(field)\n dct[field] = property(getter, setter)\n # Inject properties as class properties\n for field, impdct in typ.jitprops.items():\n getter = None\n setter = None\n if 'get' in impdct:\n getter = _generate_getter(field)\n if 'set' in impdct:\n setter = _generate_setter(field)\n dct[field] = property(getter, setter)\n # Inject methods as class members\n for name, func in typ.methods.items():\n if not (name.startswith('__') and name.endswith('__')):\n dct[name] = _generate_method(name, func)\n # Create subclass\n subcls = type(typ.classname, (_box.Box,), dct)\n # Store to cache\n _cache_specialized_box[typ] = subcls\n\n # Pre-compile attribute getter.\n # Note: This must be done after the \"box\" class is created because\n # compiling the getter requires the \"box\" class to be defined.\n for k, v in dct.items():\n if isinstance(v, property):\n prop = getattr(subcls, k)\n if prop.fget is not None:\n fget = prop.fget\n fast_fget = fget.compile((typ,))\n fget.disable_compile()\n setattr(subcls, k,\n property(fast_fget, prop.fset, prop.fdel))\n\n return subcls\n\n\n###############################################################################\n# Implement box/unbox for call wrapper\n\n@box(types.ClassInstanceType)\ndef _box_class_instance(typ, val, c):\n meminfo, dataptr = cgutils.unpack_tuple(c.builder, val)\n\n # Create Box instance\n box_subclassed = _specialize_box(typ)\n # Note: the ``box_subclassed`` is kept alive by the cache\n int_addr_boxcls = c.context.get_constant(types.uintp, id(box_subclassed))\n\n box_cls = c.builder.inttoptr(int_addr_boxcls, c.pyapi.pyobj)\n box = c.pyapi.call_function_objargs(box_cls, ())\n\n # Initialize Box instance\n llvoidptr = ir.IntType(8).as_pointer()\n addr_meminfo = c.builder.bitcast(meminfo, llvoidptr)\n addr_data = c.builder.bitcast(dataptr, llvoidptr)\n\n def set_member(member_offset, value):\n # Access member by byte offset\n offset = c.context.get_constant(types.uintp, member_offset)\n ptr = cgutils.pointer_add(c.builder, box, offset)\n casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())\n c.builder.store(value, casted)\n\n set_member(_box.box_meminfoptr_offset, addr_meminfo)\n set_member(_box.box_dataptr_offset, addr_data)\n return box\n\n\n@unbox(types.ClassInstanceType)\ndef _unbox_class_instance(typ, val, c):\n def access_member(member_offset):\n # Access member by byte offset\n offset = c.context.get_constant(types.uintp, member_offset)\n llvoidptr = ir.IntType(8).as_pointer()\n ptr = cgutils.pointer_add(c.builder, val, offset)\n casted = c.builder.bitcast(ptr, llvoidptr.as_pointer())\n return c.builder.load(casted)\n\n struct_cls = cgutils.create_struct_proxy(typ)\n inst = struct_cls(c.context, c.builder)\n\n # load from Python object\n ptr_meminfo = access_member(_box.box_meminfoptr_offset)\n ptr_dataptr = access_member(_box.box_dataptr_offset)\n\n # store to native structure\n inst.meminfo = c.builder.bitcast(ptr_meminfo, inst.meminfo.type)\n inst.data = c.builder.bitcast(ptr_dataptr, inst.data.type)\n\n ret = inst._getvalue()\n\n c.context.nrt.incref(c.builder, typ, ret)\n\n return NativeValue(ret, is_error=c.pyapi.c_api_error())\n", "path": "numba/jitclass/boxing.py"}]}
| 2,374 | 384 |
gh_patches_debug_1554
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-520
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please expose service for manual schedule refresh
As per my understanding current setup allows refresh of the schedule to happen only once a day at the time configured in `fetch_time`.
This may cause issues if for some reason the source is not available at the given time, there is an issue with connectivity or a schedule change has been announced via different channels and update needs to happen on-demand.
Please expose `waste_collection_schedule.reload` service that would call the same routing that is normally executed at `fetch_time`, but on demand.
</issue>
<code>
[start of custom_components/waste_collection_schedule/__init__.py]
1 """Waste Collection Schedule Component."""
2 import logging
3 import site
4 from pathlib import Path
5 from random import randrange
6
7 import homeassistant.helpers.config_validation as cv
8 import homeassistant.util.dt as dt_util
9 import voluptuous as vol
10 from homeassistant.core import HomeAssistant, callback
11 from homeassistant.helpers.dispatcher import dispatcher_send
12
13 from .const import DOMAIN, UPDATE_SENSORS_SIGNAL
14
15 from homeassistant.helpers.event import async_call_later # isort:skip
16 from homeassistant.helpers.event import async_track_time_change # isort:skip
17
18 # add module directory to path
19 package_dir = Path(__file__).resolve().parents[0]
20 site.addsitedir(str(package_dir))
21 from waste_collection_schedule import Customize, SourceShell # type: ignore # isort:skip # noqa: E402
22
23 _LOGGER = logging.getLogger(__name__)
24
25 CONF_SOURCES = "sources"
26 CONF_SOURCE_NAME = "name"
27 CONF_SOURCE_ARGS = "args" # source arguments
28 CONF_SOURCE_CALENDAR_TITLE = "calendar_title"
29 CONF_SEPARATOR = "separator"
30 CONF_FETCH_TIME = "fetch_time"
31 CONF_RANDOM_FETCH_TIME_OFFSET = "random_fetch_time_offset"
32 CONF_DAY_SWITCH_TIME = "day_switch_time"
33
34 CONF_CUSTOMIZE = "customize"
35 CONF_TYPE = "type"
36 CONF_ALIAS = "alias"
37 CONF_SHOW = "show"
38 CONF_ICON = "icon"
39 CONF_PICTURE = "picture"
40 CONF_USE_DEDICATED_CALENDAR = "use_dedicated_calendar"
41 CONF_DEDICATED_CALENDAR_TITLE = "dedicated_calendar_title"
42
43 CUSTOMIZE_CONFIG = vol.Schema(
44 {
45 vol.Optional(CONF_TYPE): cv.string,
46 vol.Optional(CONF_ALIAS): cv.string,
47 vol.Optional(CONF_SHOW): cv.boolean,
48 vol.Optional(CONF_ICON): cv.icon,
49 vol.Optional(CONF_PICTURE): cv.string,
50 vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean,
51 vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string,
52 }
53 )
54
55 SOURCE_CONFIG = vol.Schema(
56 {
57 vol.Required(CONF_SOURCE_NAME): cv.string,
58 vol.Required(CONF_SOURCE_ARGS): dict,
59 vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All(
60 cv.ensure_list, [CUSTOMIZE_CONFIG]
61 ),
62 vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string,
63 }
64 )
65
66 CONFIG_SCHEMA = vol.Schema(
67 {
68 DOMAIN: vol.Schema(
69 {
70 vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]),
71 vol.Optional(CONF_SEPARATOR, default=", "): cv.string,
72 vol.Optional(CONF_FETCH_TIME, default="01:00"): cv.time,
73 vol.Optional(
74 CONF_RANDOM_FETCH_TIME_OFFSET, default=60
75 ): cv.positive_int,
76 vol.Optional(CONF_DAY_SWITCH_TIME, default="10:00"): cv.time,
77 }
78 )
79 },
80 extra=vol.ALLOW_EXTRA,
81 )
82
83
84 async def async_setup(hass: HomeAssistant, config: dict):
85 """Set up the component. config contains data from configuration.yaml."""
86 # create empty api object as singleton
87 api = WasteCollectionApi(
88 hass,
89 separator=config[DOMAIN][CONF_SEPARATOR],
90 fetch_time=config[DOMAIN][CONF_FETCH_TIME],
91 random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET],
92 day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME],
93 )
94
95 # create shells for source(s)
96 for source in config[DOMAIN][CONF_SOURCES]:
97 # create customize object
98 customize = {}
99 for c in source.get(CONF_CUSTOMIZE, {}):
100 customize[c[CONF_TYPE]] = Customize(
101 waste_type=c[CONF_TYPE],
102 alias=c.get(CONF_ALIAS),
103 show=c.get(CONF_SHOW, True),
104 icon=c.get(CONF_ICON),
105 picture=c.get(CONF_PICTURE),
106 use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False),
107 dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False),
108 )
109 api.add_source_shell(
110 source_name=source[CONF_SOURCE_NAME],
111 customize=customize,
112 calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE),
113 source_args=source.get(CONF_SOURCE_ARGS, {}),
114 )
115
116 # store api object
117 hass.data.setdefault(DOMAIN, api)
118
119 # load calendar platform
120 await hass.helpers.discovery.async_load_platform(
121 "calendar", DOMAIN, {"api": api}, config
122 )
123
124 # initial fetch of all data
125 hass.add_job(api._fetch)
126
127 return True
128
129
130 class WasteCollectionApi:
131 def __init__(
132 self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time
133 ):
134 self._hass = hass
135 self._source_shells = []
136 self._separator = separator
137 self._fetch_time = fetch_time
138 self._random_fetch_time_offset = random_fetch_time_offset
139 self._day_switch_time = day_switch_time
140
141 # start timer to fetch date once per day
142 async_track_time_change(
143 hass,
144 self._fetch_callback,
145 self._fetch_time.hour,
146 self._fetch_time.minute,
147 self._fetch_time.second,
148 )
149
150 # start timer for day-switch time
151 if self._day_switch_time != self._fetch_time:
152 async_track_time_change(
153 hass,
154 self._update_sensors_callback,
155 self._day_switch_time.hour,
156 self._day_switch_time.minute,
157 self._day_switch_time.second,
158 )
159
160 # add a timer at midnight (if not already there) to update days-to
161 midnight = dt_util.parse_time("00:00")
162 if midnight != self._fetch_time and midnight != self._day_switch_time:
163 async_track_time_change(
164 hass,
165 self._update_sensors_callback,
166 midnight.hour,
167 midnight.minute,
168 midnight.second,
169 )
170
171 @property
172 def separator(self):
173 """Separator string, used to separator waste types."""
174 return self._separator
175
176 @property
177 def fetch_time(self):
178 """When to fetch to data."""
179 return self._fetch_time
180
181 @property
182 def day_switch_time(self):
183 """When to hide entries for today."""
184 return self._day_switch_time
185
186 def add_source_shell(
187 self,
188 source_name,
189 customize,
190 source_args,
191 calendar_title,
192 ):
193 self._source_shells.append(
194 SourceShell.create(
195 source_name=source_name,
196 customize=customize,
197 source_args=source_args,
198 calendar_title=calendar_title,
199 )
200 )
201
202 def _fetch(self, *_):
203 for shell in self._source_shells:
204 shell.fetch()
205
206 self._update_sensors_callback()
207
208 @property
209 def shells(self):
210 return self._source_shells
211
212 def get_shell(self, index):
213 return self._source_shells[index] if index < len(self._source_shells) else None
214
215 @callback
216 def _fetch_callback(self, *_):
217 async_call_later(
218 self._hass,
219 randrange(0, 60 * self._random_fetch_time_offset),
220 self._fetch_now_callback,
221 )
222
223 @callback
224 def _fetch_now_callback(self, *_):
225 self._hass.add_job(self._fetch)
226
227 @callback
228 def _update_sensors_callback(self, *_):
229 dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL)
230
[end of custom_components/waste_collection_schedule/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/__init__.py b/custom_components/waste_collection_schedule/__init__.py
--- a/custom_components/waste_collection_schedule/__init__.py
+++ b/custom_components/waste_collection_schedule/__init__.py
@@ -123,6 +123,12 @@
# initial fetch of all data
hass.add_job(api._fetch)
+
+ def fetch_data():
+ hass.add_job(api._fetch)
+
+ # Register new Service fetch_data
+ hass.services.async_register(DOMAIN, 'fetch_data', fetch_data)
return True
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/__init__.py b/custom_components/waste_collection_schedule/__init__.py\n--- a/custom_components/waste_collection_schedule/__init__.py\n+++ b/custom_components/waste_collection_schedule/__init__.py\n@@ -123,6 +123,12 @@\n \n # initial fetch of all data\n hass.add_job(api._fetch)\n+ \n+ def fetch_data():\n+ hass.add_job(api._fetch)\n+\n+ # Register new Service fetch_data\n+ hass.services.async_register(DOMAIN, 'fetch_data', fetch_data)\n \n return True\n", "issue": "Please expose service for manual schedule refresh\nAs per my understanding current setup allows refresh of the schedule to happen only once a day at the time configured in `fetch_time`.\r\nThis may cause issues if for some reason the source is not available at the given time, there is an issue with connectivity or a schedule change has been announced via different channels and update needs to happen on-demand.\r\n\r\nPlease expose `waste_collection_schedule.reload` service that would call the same routing that is normally executed at `fetch_time`, but on demand.\n", "before_files": [{"content": "\"\"\"Waste Collection Schedule Component.\"\"\"\nimport logging\nimport site\nfrom pathlib import Path\nfrom random import randrange\n\nimport homeassistant.helpers.config_validation as cv\nimport homeassistant.util.dt as dt_util\nimport voluptuous as vol\nfrom homeassistant.core import HomeAssistant, callback\nfrom homeassistant.helpers.dispatcher import dispatcher_send\n\nfrom .const import DOMAIN, UPDATE_SENSORS_SIGNAL\n\nfrom homeassistant.helpers.event import async_call_later # isort:skip\nfrom homeassistant.helpers.event import async_track_time_change # isort:skip\n\n# add module directory to path\npackage_dir = Path(__file__).resolve().parents[0]\nsite.addsitedir(str(package_dir))\nfrom waste_collection_schedule import Customize, SourceShell # type: ignore # isort:skip # noqa: E402\n\n_LOGGER = logging.getLogger(__name__)\n\nCONF_SOURCES = \"sources\"\nCONF_SOURCE_NAME = \"name\"\nCONF_SOURCE_ARGS = \"args\" # source arguments\nCONF_SOURCE_CALENDAR_TITLE = \"calendar_title\"\nCONF_SEPARATOR = \"separator\"\nCONF_FETCH_TIME = \"fetch_time\"\nCONF_RANDOM_FETCH_TIME_OFFSET = \"random_fetch_time_offset\"\nCONF_DAY_SWITCH_TIME = \"day_switch_time\"\n\nCONF_CUSTOMIZE = \"customize\"\nCONF_TYPE = \"type\"\nCONF_ALIAS = \"alias\"\nCONF_SHOW = \"show\"\nCONF_ICON = \"icon\"\nCONF_PICTURE = \"picture\"\nCONF_USE_DEDICATED_CALENDAR = \"use_dedicated_calendar\"\nCONF_DEDICATED_CALENDAR_TITLE = \"dedicated_calendar_title\"\n\nCUSTOMIZE_CONFIG = vol.Schema(\n {\n vol.Optional(CONF_TYPE): cv.string,\n vol.Optional(CONF_ALIAS): cv.string,\n vol.Optional(CONF_SHOW): cv.boolean,\n vol.Optional(CONF_ICON): cv.icon,\n vol.Optional(CONF_PICTURE): cv.string,\n vol.Optional(CONF_USE_DEDICATED_CALENDAR): cv.boolean,\n vol.Optional(CONF_DEDICATED_CALENDAR_TITLE): cv.string,\n }\n)\n\nSOURCE_CONFIG = vol.Schema(\n {\n vol.Required(CONF_SOURCE_NAME): cv.string,\n vol.Required(CONF_SOURCE_ARGS): dict,\n vol.Optional(CONF_CUSTOMIZE, default=[]): vol.All(\n cv.ensure_list, [CUSTOMIZE_CONFIG]\n ),\n vol.Optional(CONF_SOURCE_CALENDAR_TITLE): cv.string,\n }\n)\n\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n vol.Required(CONF_SOURCES): vol.All(cv.ensure_list, [SOURCE_CONFIG]),\n vol.Optional(CONF_SEPARATOR, default=\", \"): cv.string,\n vol.Optional(CONF_FETCH_TIME, default=\"01:00\"): cv.time,\n vol.Optional(\n CONF_RANDOM_FETCH_TIME_OFFSET, default=60\n ): cv.positive_int,\n vol.Optional(CONF_DAY_SWITCH_TIME, default=\"10:00\"): cv.time,\n }\n )\n },\n extra=vol.ALLOW_EXTRA,\n)\n\n\nasync def async_setup(hass: HomeAssistant, config: dict):\n \"\"\"Set up the component. config contains data from configuration.yaml.\"\"\"\n # create empty api object as singleton\n api = WasteCollectionApi(\n hass,\n separator=config[DOMAIN][CONF_SEPARATOR],\n fetch_time=config[DOMAIN][CONF_FETCH_TIME],\n random_fetch_time_offset=config[DOMAIN][CONF_RANDOM_FETCH_TIME_OFFSET],\n day_switch_time=config[DOMAIN][CONF_DAY_SWITCH_TIME],\n )\n\n # create shells for source(s)\n for source in config[DOMAIN][CONF_SOURCES]:\n # create customize object\n customize = {}\n for c in source.get(CONF_CUSTOMIZE, {}):\n customize[c[CONF_TYPE]] = Customize(\n waste_type=c[CONF_TYPE],\n alias=c.get(CONF_ALIAS),\n show=c.get(CONF_SHOW, True),\n icon=c.get(CONF_ICON),\n picture=c.get(CONF_PICTURE),\n use_dedicated_calendar=c.get(CONF_USE_DEDICATED_CALENDAR, False),\n dedicated_calendar_title=c.get(CONF_DEDICATED_CALENDAR_TITLE, False),\n )\n api.add_source_shell(\n source_name=source[CONF_SOURCE_NAME],\n customize=customize,\n calendar_title=source.get(CONF_SOURCE_CALENDAR_TITLE),\n source_args=source.get(CONF_SOURCE_ARGS, {}),\n )\n\n # store api object\n hass.data.setdefault(DOMAIN, api)\n\n # load calendar platform\n await hass.helpers.discovery.async_load_platform(\n \"calendar\", DOMAIN, {\"api\": api}, config\n )\n\n # initial fetch of all data\n hass.add_job(api._fetch)\n\n return True\n\n\nclass WasteCollectionApi:\n def __init__(\n self, hass, separator, fetch_time, random_fetch_time_offset, day_switch_time\n ):\n self._hass = hass\n self._source_shells = []\n self._separator = separator\n self._fetch_time = fetch_time\n self._random_fetch_time_offset = random_fetch_time_offset\n self._day_switch_time = day_switch_time\n\n # start timer to fetch date once per day\n async_track_time_change(\n hass,\n self._fetch_callback,\n self._fetch_time.hour,\n self._fetch_time.minute,\n self._fetch_time.second,\n )\n\n # start timer for day-switch time\n if self._day_switch_time != self._fetch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n self._day_switch_time.hour,\n self._day_switch_time.minute,\n self._day_switch_time.second,\n )\n\n # add a timer at midnight (if not already there) to update days-to\n midnight = dt_util.parse_time(\"00:00\")\n if midnight != self._fetch_time and midnight != self._day_switch_time:\n async_track_time_change(\n hass,\n self._update_sensors_callback,\n midnight.hour,\n midnight.minute,\n midnight.second,\n )\n\n @property\n def separator(self):\n \"\"\"Separator string, used to separator waste types.\"\"\"\n return self._separator\n\n @property\n def fetch_time(self):\n \"\"\"When to fetch to data.\"\"\"\n return self._fetch_time\n\n @property\n def day_switch_time(self):\n \"\"\"When to hide entries for today.\"\"\"\n return self._day_switch_time\n\n def add_source_shell(\n self,\n source_name,\n customize,\n source_args,\n calendar_title,\n ):\n self._source_shells.append(\n SourceShell.create(\n source_name=source_name,\n customize=customize,\n source_args=source_args,\n calendar_title=calendar_title,\n )\n )\n\n def _fetch(self, *_):\n for shell in self._source_shells:\n shell.fetch()\n\n self._update_sensors_callback()\n\n @property\n def shells(self):\n return self._source_shells\n\n def get_shell(self, index):\n return self._source_shells[index] if index < len(self._source_shells) else None\n\n @callback\n def _fetch_callback(self, *_):\n async_call_later(\n self._hass,\n randrange(0, 60 * self._random_fetch_time_offset),\n self._fetch_now_callback,\n )\n\n @callback\n def _fetch_now_callback(self, *_):\n self._hass.add_job(self._fetch)\n\n @callback\n def _update_sensors_callback(self, *_):\n dispatcher_send(self._hass, UPDATE_SENSORS_SIGNAL)\n", "path": "custom_components/waste_collection_schedule/__init__.py"}]}
| 2,833 | 133 |
gh_patches_debug_11218
|
rasdani/github-patches
|
git_diff
|
openstates__openstates-scrapers-2984
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FL failing since at least 2019-06-03
FL has been failing since 2019-06-03
Based on automated runs it appears that FL has not run successfully in 2 days (2019-06-03).
```
04:01:17 CRITICAL pupa: Session(s) 2009B, 2003C, 2003B, 2002E, 2004A, 2012 Org., 2007D, 1998 Org, 2000A (Jan.), 2007C, 2007A, 2000A (Dec.), 2006 Org., 2000 Org., 2001C, 2005B, 2002D, 2008 Org., 2018 Org., 2003A, 2010 Org., 2004 Org., 2003D, 2007B, 2009A, 2001B, 2014 Org., 2002 Org., 2016 Org., 2010C, 2003E were reported by Florida.get_session_list() but were not found in Florida.legislative_sessions or Florida.ignored_scraped_sessions.
loaded Open States pupa settings...
fl (scrape, import)
bills: {}
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of openstates/fl/__init__.py]
1 # encoding=utf-8
2 import logging
3 from pupa.scrape import Jurisdiction, Organization
4 from .bills import FlBillScraper
5 from .people import FlPersonScraper
6 # from .committees import FlCommitteeScraper
7 # from .events import FlEventScraper
8 from openstates.utils import url_xpath
9
10 logging.getLogger(__name__).addHandler(logging.NullHandler())
11
12
13 class Florida(Jurisdiction):
14 division_id = "ocd-division/country:us/state:fl"
15 classification = "government"
16 name = "Florida"
17 url = "http://myflorida.com"
18
19 scrapers = {
20 "bills": FlBillScraper,
21 "people": FlPersonScraper,
22 # "committees": FlCommitteeScraper,
23 # "events": FlEventScraper,
24 }
25 legislative_sessions = [
26 {'name': '2011 Regular Session', 'identifier': '2011',
27 'classification': 'primary'},
28 {'name': '2012 Regular Session', 'identifier': '2012',
29 'classification': 'primary'},
30 {'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B',
31 'classification': 'special'},
32 {'name': '2013 Regular Session', 'identifier': '2013',
33 'classification': 'primary'},
34 {'name': '2014 Regular Session', 'identifier': '2014',
35 'classification': 'primary'},
36 {'name': '2014 Special Session A',
37 'identifier': '2014A', 'classification': 'special'},
38 # data for the below
39 {'name': '2015 Regular Session', 'identifier': '2015',
40 'classification': 'primary'},
41 {'name': '2015 Special Session A',
42 'identifier': '2015A', 'classification': 'special'},
43 {'name': '2015 Special Session B',
44 'identifier': '2015B', 'classification': 'special'},
45 {'name': '2015 Special Session C',
46 'identifier': '2015C', 'classification': 'special'},
47 {'name': '2016 Regular Session', 'identifier': '2016',
48 'classification': 'primary'},
49 {'name': '2017 Regular Session', 'identifier': '2017', 'classification': 'primary',
50 'start_date': '2017-03-07', 'end_date': '2017-05-05'},
51 {'name': '2017 Special Session A',
52 'identifier': '2017A', 'classification': 'special'},
53 {'name': '2018 Regular Session', 'identifier': '2018', 'classification': 'primary',
54 'start_date': '2018-01-08', 'end_date': '2018-03-09'},
55 {'name': '2019 Regular Session', 'identifier': '2019', 'classification': 'primary',
56 'start_date': '2019-03-05', 'end_date': '2019-05-03'},
57 ]
58 ignored_scraped_sessions = [
59 *(str(each) for each in range(1997, 2010)),
60 '2010', '2010A', '2010O',
61 '2012O',
62 '2014O',
63 '2016O',
64 '2018O',
65 ]
66
67 def get_organizations(self):
68 legis = Organization(name="Florida Legislature",
69 classification="legislature")
70
71 upper = Organization(
72 'Florida Senate', classification='upper', parent_id=legis._id)
73 lower = Organization('Florida House of Representatives', classification='lower',
74 parent_id=legis._id)
75
76 yield legis
77 yield upper
78 yield lower
79
80 def get_session_list(self):
81 return url_xpath('http://flsenate.gov', '//option/text()')
82
[end of openstates/fl/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openstates/fl/__init__.py b/openstates/fl/__init__.py
--- a/openstates/fl/__init__.py
+++ b/openstates/fl/__init__.py
@@ -62,6 +62,37 @@
'2014O',
'2016O',
'2018O',
+ '2018 Org.',
+ '2016 Org.',
+ '2014 Org.',
+ '2012 Org.',
+ '2010 Org.',
+ '2010C',
+ '2009B',
+ '2009A',
+ '2008 Org.',
+ '2007D',
+ '2007C',
+ '2007B',
+ '2007A',
+ '2006 Org.',
+ '2005B',
+ '2004A',
+ '2004 Org.',
+ '2003E',
+ '2003D',
+ '2003C',
+ '2003B',
+ '2003A',
+ '2002E',
+ '2002D',
+ '2002 Org.',
+ '2001C',
+ '2001B',
+ '2000A (Jan.)',
+ '2000A (Dec.)',
+ '2000 Org.',
+ '1998 Org',
]
def get_organizations(self):
|
{"golden_diff": "diff --git a/openstates/fl/__init__.py b/openstates/fl/__init__.py\n--- a/openstates/fl/__init__.py\n+++ b/openstates/fl/__init__.py\n@@ -62,6 +62,37 @@\n '2014O',\n '2016O',\n '2018O',\n+ '2018 Org.',\n+ '2016 Org.',\n+ '2014 Org.',\n+ '2012 Org.',\n+ '2010 Org.',\n+ '2010C',\n+ '2009B',\n+ '2009A',\n+ '2008 Org.',\n+ '2007D',\n+ '2007C',\n+ '2007B',\n+ '2007A',\n+ '2006 Org.',\n+ '2005B',\n+ '2004A',\n+ '2004 Org.',\n+ '2003E',\n+ '2003D',\n+ '2003C',\n+ '2003B',\n+ '2003A',\n+ '2002E',\n+ '2002D',\n+ '2002 Org.',\n+ '2001C',\n+ '2001B',\n+ '2000A (Jan.)',\n+ '2000A (Dec.)',\n+ '2000 Org.',\n+ '1998 Org',\n ]\n \n def get_organizations(self):\n", "issue": "FL failing since at least 2019-06-03\nFL has been failing since 2019-06-03\n\nBased on automated runs it appears that FL has not run successfully in 2 days (2019-06-03).\n\n\n```\n 04:01:17 CRITICAL pupa: Session(s) 2009B, 2003C, 2003B, 2002E, 2004A, 2012 Org., 2007D, 1998 Org, 2000A (Jan.), 2007C, 2007A, 2000A (Dec.), 2006 Org., 2000 Org., 2001C, 2005B, 2002D, 2008 Org., 2018 Org., 2003A, 2010 Org., 2004 Org., 2003D, 2007B, 2009A, 2001B, 2014 Org., 2002 Org., 2016 Org., 2010C, 2003E were reported by Florida.get_session_list() but were not found in Florida.legislative_sessions or Florida.ignored_scraped_sessions.\nloaded Open States pupa settings...\nfl (scrape, import)\n bills: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "# encoding=utf-8\nimport logging\nfrom pupa.scrape import Jurisdiction, Organization\nfrom .bills import FlBillScraper\nfrom .people import FlPersonScraper\n# from .committees import FlCommitteeScraper\n# from .events import FlEventScraper\nfrom openstates.utils import url_xpath\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n\nclass Florida(Jurisdiction):\n division_id = \"ocd-division/country:us/state:fl\"\n classification = \"government\"\n name = \"Florida\"\n url = \"http://myflorida.com\"\n\n scrapers = {\n \"bills\": FlBillScraper,\n \"people\": FlPersonScraper,\n # \"committees\": FlCommitteeScraper,\n # \"events\": FlEventScraper,\n }\n legislative_sessions = [\n {'name': '2011 Regular Session', 'identifier': '2011',\n 'classification': 'primary'},\n {'name': '2012 Regular Session', 'identifier': '2012',\n 'classification': 'primary'},\n {'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B',\n 'classification': 'special'},\n {'name': '2013 Regular Session', 'identifier': '2013',\n 'classification': 'primary'},\n {'name': '2014 Regular Session', 'identifier': '2014',\n 'classification': 'primary'},\n {'name': '2014 Special Session A',\n 'identifier': '2014A', 'classification': 'special'},\n # data for the below\n {'name': '2015 Regular Session', 'identifier': '2015',\n 'classification': 'primary'},\n {'name': '2015 Special Session A',\n 'identifier': '2015A', 'classification': 'special'},\n {'name': '2015 Special Session B',\n 'identifier': '2015B', 'classification': 'special'},\n {'name': '2015 Special Session C',\n 'identifier': '2015C', 'classification': 'special'},\n {'name': '2016 Regular Session', 'identifier': '2016',\n 'classification': 'primary'},\n {'name': '2017 Regular Session', 'identifier': '2017', 'classification': 'primary',\n 'start_date': '2017-03-07', 'end_date': '2017-05-05'},\n {'name': '2017 Special Session A',\n 'identifier': '2017A', 'classification': 'special'},\n {'name': '2018 Regular Session', 'identifier': '2018', 'classification': 'primary',\n 'start_date': '2018-01-08', 'end_date': '2018-03-09'},\n {'name': '2019 Regular Session', 'identifier': '2019', 'classification': 'primary',\n 'start_date': '2019-03-05', 'end_date': '2019-05-03'},\n ]\n ignored_scraped_sessions = [\n *(str(each) for each in range(1997, 2010)),\n '2010', '2010A', '2010O',\n '2012O',\n '2014O',\n '2016O',\n '2018O',\n ]\n\n def get_organizations(self):\n legis = Organization(name=\"Florida Legislature\",\n classification=\"legislature\")\n\n upper = Organization(\n 'Florida Senate', classification='upper', parent_id=legis._id)\n lower = Organization('Florida House of Representatives', classification='lower',\n parent_id=legis._id)\n\n yield legis\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath('http://flsenate.gov', '//option/text()')\n", "path": "openstates/fl/__init__.py"}]}
| 1,972 | 372 |
gh_patches_debug_7337
|
rasdani/github-patches
|
git_diff
|
frappe__frappe-15233
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot search for keywords via Global Search
Steps:
1. Enter any keyword in global search
2. Hit enter
3. Instead of returning relevant records, system throws error message for Relevant Doctype

```
Traceback (most recent call last):
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/app.py", line 66, in application
response = frappe.api.handle()
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/api.py", line 54, in handle
return frappe.handler.handle()
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/handler.py", line 31, in handle
data = execute_cmd(cmd)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/handler.py", line 67, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 1213, in call
return fn(*args, **newargs)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/utils/global_search.py", line 422, in search
allowed_doctypes = get_doctypes_for_global_search()
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/desk/doctype/global_search_settings/global_search_settings.py", line 39, in get_doctypes_for_global_search
return frappe.cache().hget("global_search", "search_priorities", get_from_db)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/utils/redis_wrapper.py", line 194, in hget
value = generator()
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/desk/doctype/global_search_settings/global_search_settings.py", line 36, in get_from_db
doctypes = frappe.get_list("Global Search DocType", fields=["document_type"], order_by="idx ASC")
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 1446, in get_list
return frappe.model.db_query.DatabaseQuery(doctype).execute(*args, **kwargs)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/model/db_query.py", line 40, in execute
not frappe.has_permission(self.doctype, "select", user=user, parent_doctype=parent_doctype) and \
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 743, in has_permission
raise_exception=throw, parent_doctype=parent_doctype)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py", line 24, in inner
result = func(*args, **kwargs)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py", line 55, in has_permission
user, raise_exception, parent_doctype)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py", line 585, in has_child_table_permission
), title=_("Parent DocType Required"))
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 438, in throw
msgprint(msg, raise_exception=exc, title=title, indicator='red', is_minimizable=is_minimizable, wide=wide, as_list=as_list)
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 417, in msgprint
_raise_exception()
File "/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py", line 371, in _raise_exception
raise raise_exception(msg)
frappe.exceptions.ValidationError: Please specify a valid parent DocType for <strong>Global Search DocType</strong>
```
ERPNext: v13.x.x-develop () (develop)
Frappe Framework: v14.x.x-develop () (develop)
</issue>
<code>
[start of frappe/desk/doctype/global_search_settings/global_search_settings.py]
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2019, Frappe Technologies and contributors
3 # License: MIT. See LICENSE
4
5 import frappe
6 from frappe.model.document import Document
7 from frappe import _
8
9 class GlobalSearchSettings(Document):
10
11 def validate(self):
12 dts, core_dts, repeated_dts = [], [], []
13
14 for dt in self.allowed_in_global_search:
15 if dt.document_type in dts:
16 repeated_dts.append(dt.document_type)
17
18 if frappe.get_meta(dt.document_type).module == "Core":
19 core_dts.append(dt.document_type)
20
21 dts.append(dt.document_type)
22
23 if core_dts:
24 core_dts = ", ".join(frappe.bold(dt) for dt in core_dts)
25 frappe.throw(_("Core Modules {0} cannot be searched in Global Search.").format(core_dts))
26
27 if repeated_dts:
28 repeated_dts = (", ".join([frappe.bold(dt) for dt in repeated_dts]))
29 frappe.throw(_("Document Type {0} has been repeated.").format(repeated_dts))
30
31 # reset cache
32 frappe.cache().hdel('global_search', 'search_priorities')
33
34 def get_doctypes_for_global_search():
35 def get_from_db():
36 doctypes = frappe.get_list("Global Search DocType", fields=["document_type"], order_by="idx ASC")
37 return [d.document_type for d in doctypes] or []
38
39 return frappe.cache().hget("global_search", "search_priorities", get_from_db)
40
41
42 @frappe.whitelist()
43 def reset_global_search_settings_doctypes():
44 update_global_search_doctypes()
45
46 def update_global_search_doctypes():
47 global_search_doctypes = []
48 show_message(1, _("Fetching default Global Search documents."))
49
50 installed_apps = [app for app in frappe.get_installed_apps() if app]
51 active_domains = [domain for domain in frappe.get_active_domains() if domain]
52 active_domains.append("Default")
53
54 for app in installed_apps:
55 search_doctypes = frappe.get_hooks(hook="global_search_doctypes", app_name=app)
56 if not search_doctypes:
57 continue
58
59 for domain in active_domains:
60 if search_doctypes.get(domain):
61 global_search_doctypes.extend(search_doctypes.get(domain))
62
63 doctype_list = {dt.name for dt in frappe.get_all("DocType")}
64 allowed_in_global_search = []
65
66 for dt in global_search_doctypes:
67 if dt.get("index") is not None:
68 allowed_in_global_search.insert(dt.get("index"), dt.get("doctype"))
69 continue
70
71 allowed_in_global_search.append(dt.get("doctype"))
72
73 show_message(2, _("Setting up Global Search documents."))
74 global_search_settings = frappe.get_single("Global Search Settings")
75 global_search_settings.allowed_in_global_search = []
76 for dt in allowed_in_global_search:
77 if dt not in doctype_list:
78 continue
79
80 global_search_settings.append("allowed_in_global_search", {
81 "document_type": dt
82 })
83 global_search_settings.save(ignore_permissions=True)
84 show_message(3, "Global Search Documents have been reset.")
85
86 def show_message(progress, msg):
87 frappe.publish_realtime('global_search_settings', {"progress":progress, "total":3, "msg": msg}, user=frappe.session.user)
88
[end of frappe/desk/doctype/global_search_settings/global_search_settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/frappe/desk/doctype/global_search_settings/global_search_settings.py b/frappe/desk/doctype/global_search_settings/global_search_settings.py
--- a/frappe/desk/doctype/global_search_settings/global_search_settings.py
+++ b/frappe/desk/doctype/global_search_settings/global_search_settings.py
@@ -33,7 +33,7 @@
def get_doctypes_for_global_search():
def get_from_db():
- doctypes = frappe.get_list("Global Search DocType", fields=["document_type"], order_by="idx ASC")
+ doctypes = frappe.get_all("Global Search DocType", fields=["document_type"], order_by="idx ASC")
return [d.document_type for d in doctypes] or []
return frappe.cache().hget("global_search", "search_priorities", get_from_db)
|
{"golden_diff": "diff --git a/frappe/desk/doctype/global_search_settings/global_search_settings.py b/frappe/desk/doctype/global_search_settings/global_search_settings.py\n--- a/frappe/desk/doctype/global_search_settings/global_search_settings.py\n+++ b/frappe/desk/doctype/global_search_settings/global_search_settings.py\n@@ -33,7 +33,7 @@\n \n def get_doctypes_for_global_search():\n \tdef get_from_db():\n-\t\tdoctypes = frappe.get_list(\"Global Search DocType\", fields=[\"document_type\"], order_by=\"idx ASC\")\n+\t\tdoctypes = frappe.get_all(\"Global Search DocType\", fields=[\"document_type\"], order_by=\"idx ASC\")\n \t\treturn [d.document_type for d in doctypes] or []\n \n \treturn frappe.cache().hget(\"global_search\", \"search_priorities\", get_from_db)\n", "issue": "Cannot search for keywords via Global Search\nSteps:\r\n\r\n1. Enter any keyword in global search\r\n2. Hit enter\r\n3. Instead of returning relevant records, system throws error message for Relevant Doctype\r\n\r\n\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/app.py\", line 66, in application\r\n response = frappe.api.handle()\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/api.py\", line 54, in handle\r\n return frappe.handler.handle()\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/handler.py\", line 31, in handle\r\n data = execute_cmd(cmd)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/handler.py\", line 67, in execute_cmd\r\n return frappe.call(method, **frappe.form_dict)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 1213, in call\r\n return fn(*args, **newargs)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/utils/global_search.py\", line 422, in search\r\n allowed_doctypes = get_doctypes_for_global_search()\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/desk/doctype/global_search_settings/global_search_settings.py\", line 39, in get_doctypes_for_global_search\r\n return frappe.cache().hget(\"global_search\", \"search_priorities\", get_from_db)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/utils/redis_wrapper.py\", line 194, in hget\r\n value = generator()\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/desk/doctype/global_search_settings/global_search_settings.py\", line 36, in get_from_db\r\n doctypes = frappe.get_list(\"Global Search DocType\", fields=[\"document_type\"], order_by=\"idx ASC\")\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 1446, in get_list\r\n return frappe.model.db_query.DatabaseQuery(doctype).execute(*args, **kwargs)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/model/db_query.py\", line 40, in execute\r\n not frappe.has_permission(self.doctype, \"select\", user=user, parent_doctype=parent_doctype) and \\\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 743, in has_permission\r\n raise_exception=throw, parent_doctype=parent_doctype)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py\", line 24, in inner\r\n result = func(*args, **kwargs)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py\", line 55, in has_permission\r\n user, raise_exception, parent_doctype)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/permissions.py\", line 585, in has_child_table_permission\r\n ), title=_(\"Parent DocType Required\"))\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 438, in throw\r\n msgprint(msg, raise_exception=exc, title=title, indicator='red', is_minimizable=is_minimizable, wide=wide, as_list=as_list)\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 417, in msgprint\r\n _raise_exception()\r\n File \"/home/frappe/frappe-io-bench/apps/frappe/frappe/__init__.py\", line 371, in _raise_exception\r\n raise raise_exception(msg)\r\nfrappe.exceptions.ValidationError: Please specify a valid parent DocType for <strong>Global Search DocType</strong>\r\n```\r\n\r\nERPNext: v13.x.x-develop () (develop)\r\n\r\nFrappe Framework: v14.x.x-develop () (develop)\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2019, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\n\nclass GlobalSearchSettings(Document):\n\n\tdef validate(self):\n\t\tdts, core_dts, repeated_dts = [], [], []\n\n\t\tfor dt in self.allowed_in_global_search:\n\t\t\tif dt.document_type in dts:\n\t\t\t\trepeated_dts.append(dt.document_type)\n\n\t\t\tif frappe.get_meta(dt.document_type).module == \"Core\":\n\t\t\t\tcore_dts.append(dt.document_type)\n\n\t\t\tdts.append(dt.document_type)\n\n\t\tif core_dts:\n\t\t\tcore_dts = \", \".join(frappe.bold(dt) for dt in core_dts)\n\t\t\tfrappe.throw(_(\"Core Modules {0} cannot be searched in Global Search.\").format(core_dts))\n\n\t\tif repeated_dts:\n\t\t\trepeated_dts = (\", \".join([frappe.bold(dt) for dt in repeated_dts]))\n\t\t\tfrappe.throw(_(\"Document Type {0} has been repeated.\").format(repeated_dts))\n\n\t\t# reset cache\n\t\tfrappe.cache().hdel('global_search', 'search_priorities')\n\ndef get_doctypes_for_global_search():\n\tdef get_from_db():\n\t\tdoctypes = frappe.get_list(\"Global Search DocType\", fields=[\"document_type\"], order_by=\"idx ASC\")\n\t\treturn [d.document_type for d in doctypes] or []\n\n\treturn frappe.cache().hget(\"global_search\", \"search_priorities\", get_from_db)\n\n\[email protected]()\ndef reset_global_search_settings_doctypes():\n\tupdate_global_search_doctypes()\n\ndef update_global_search_doctypes():\n\tglobal_search_doctypes = []\n\tshow_message(1, _(\"Fetching default Global Search documents.\"))\n\n\tinstalled_apps = [app for app in frappe.get_installed_apps() if app]\n\tactive_domains = [domain for domain in frappe.get_active_domains() if domain]\n\tactive_domains.append(\"Default\")\n\n\tfor app in installed_apps:\n\t\tsearch_doctypes = frappe.get_hooks(hook=\"global_search_doctypes\", app_name=app)\n\t\tif not search_doctypes:\n\t\t\tcontinue\n\n\t\tfor domain in active_domains:\n\t\t\tif search_doctypes.get(domain):\n\t\t\t\tglobal_search_doctypes.extend(search_doctypes.get(domain))\n\n\tdoctype_list = {dt.name for dt in frappe.get_all(\"DocType\")}\n\tallowed_in_global_search = []\n\n\tfor dt in global_search_doctypes:\n\t\tif dt.get(\"index\") is not None:\n\t\t\tallowed_in_global_search.insert(dt.get(\"index\"), dt.get(\"doctype\"))\n\t\t\tcontinue\n\n\t\tallowed_in_global_search.append(dt.get(\"doctype\"))\n\n\tshow_message(2, _(\"Setting up Global Search documents.\"))\n\tglobal_search_settings = frappe.get_single(\"Global Search Settings\")\n\tglobal_search_settings.allowed_in_global_search = []\n\tfor dt in allowed_in_global_search:\n\t\tif dt not in doctype_list:\n\t\t\tcontinue\n\n\t\tglobal_search_settings.append(\"allowed_in_global_search\", {\n\t\t\t\"document_type\": dt\n\t\t})\n\tglobal_search_settings.save(ignore_permissions=True)\n\tshow_message(3, \"Global Search Documents have been reset.\")\n\ndef show_message(progress, msg):\n\tfrappe.publish_realtime('global_search_settings', {\"progress\":progress, \"total\":3, \"msg\": msg}, user=frappe.session.user)\n", "path": "frappe/desk/doctype/global_search_settings/global_search_settings.py"}]}
| 2,447 | 183 |
gh_patches_debug_51797
|
rasdani/github-patches
|
git_diff
|
HypothesisWorks__hypothesis-1379
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ImportError: cannot import name canonical_filename
Hi, I'm getting an import error on startup:
```
File "/Users/adaszko/repos/fieldaware/fieldaware-venv/lib/python2.7/site-packages/hypothesis/core.py", line 38, in <module>
from coverage.files import canonical_filename
ImportError: cannot import name canonical_filename
```
I've downloaded https://files.pythonhosted.org/packages/4b/e4/5ebf3220993de03f2120a16d9e91cfd053f4c11ada0cf033f2bfe9683fcf/hypothesis-3.65.0-py2-none-any.whl and the `METADATA` file there specifies dependency on `coverage` without any version number:
```
% grep coverage METADATA
Requires-Dist: coverage
```
My local `coverage` is at `3.7.1`. It works if I upgrade `coverage` to `4.4.1`, so I think there's an issue in hypothesis in that it doesn't specify the version bound on `coverage`.
</issue>
<code>
[start of hypothesis-python/setup.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import os
21 import sys
22 import warnings
23
24 import setuptools
25
26
27 def local_file(name):
28 return os.path.relpath(os.path.join(os.path.dirname(__file__), name))
29
30
31 SOURCE = local_file('src')
32 README = local_file('README.rst')
33
34 setuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))
35
36 if setuptools_version < (36, 2):
37 # Warning only - very bad if uploading bdist but fine if installing sdist.
38 warnings.warn(
39 'This version of setuptools is too old to correctly store '
40 'conditional dependencies in binary wheels. For more info, see: '
41 'https://hynek.me/articles/conditional-python-dependencies/'
42 )
43
44
45 # Assignment to placate pyflakes. The actual version is from the exec that
46 # follows.
47 __version__ = None
48
49 with open(local_file('src/hypothesis/version.py')) as o:
50 exec(o.read())
51
52 assert __version__ is not None
53
54
55 extras = {
56 'datetime': ['pytz'],
57 'pytz': ['pytz'],
58 'dateutil': ['python-dateutil'],
59 'fakefactory': ['Faker>=0.7'],
60 'numpy': ['numpy>=1.9.0'],
61 'pytest': ['pytest>=2.8.0'],
62 # We only support Django versions with upstream support - see
63 # https://www.djangoproject.com/download/#supported-versions
64 'django': ['pytz', 'django>=1.11'],
65 }
66
67 extras['faker'] = extras['fakefactory']
68 extras['all'] = sorted(sum(extras.values(), []))
69
70
71 install_requires = ['attrs>=16.0.0', 'coverage']
72 # Using an environment marker on enum34 makes the dependency condition
73 # independent of the build environemnt, which is important for wheels.
74 # https://www.python.org/dev/peps/pep-0345/#environment-markers
75 if sys.version_info[0] < 3 and setuptools_version < (8, 0):
76 # Except really old systems, where we give up and install unconditionally
77 install_requires.append('enum34')
78 else:
79 install_requires.append('enum34; python_version=="2.7"')
80
81
82 setuptools.setup(
83 name='hypothesis',
84 version=__version__,
85 author='David R. MacIver',
86 author_email='[email protected]',
87 packages=setuptools.find_packages(SOURCE),
88 package_dir={'': SOURCE},
89 package_data={'hypothesis': ['py.typed']},
90 url=(
91 'https://github.com/HypothesisWorks/hypothesis/'
92 'tree/master/hypothesis-python'
93 ),
94 license='MPL v2',
95 description='A library for property based testing',
96 zip_safe=False,
97 extras_require=extras,
98 install_requires=install_requires,
99 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
100 classifiers=[
101 'Development Status :: 5 - Production/Stable',
102 'Intended Audience :: Developers',
103 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
104 'Operating System :: Unix',
105 'Operating System :: POSIX',
106 'Operating System :: Microsoft :: Windows',
107 'Programming Language :: Python',
108 'Programming Language :: Python :: 2.7',
109 'Programming Language :: Python :: 3',
110 'Programming Language :: Python :: 3.4',
111 'Programming Language :: Python :: 3.5',
112 'Programming Language :: Python :: 3.6',
113 'Programming Language :: Python :: Implementation :: CPython',
114 'Programming Language :: Python :: Implementation :: PyPy',
115 'Topic :: Software Development :: Testing',
116 'Framework :: Pytest',
117 ],
118 entry_points={
119 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],
120 },
121 long_description=open(README).read(),
122 )
123
[end of hypothesis-python/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py
--- a/hypothesis-python/setup.py
+++ b/hypothesis-python/setup.py
@@ -68,7 +68,7 @@
extras['all'] = sorted(sum(extras.values(), []))
-install_requires = ['attrs>=16.0.0', 'coverage']
+install_requires = ['attrs>=16.0.0', 'coverage>=4.0']
# Using an environment marker on enum34 makes the dependency condition
# independent of the build environemnt, which is important for wheels.
# https://www.python.org/dev/peps/pep-0345/#environment-markers
|
{"golden_diff": "diff --git a/hypothesis-python/setup.py b/hypothesis-python/setup.py\n--- a/hypothesis-python/setup.py\n+++ b/hypothesis-python/setup.py\n@@ -68,7 +68,7 @@\n extras['all'] = sorted(sum(extras.values(), []))\n \n \n-install_requires = ['attrs>=16.0.0', 'coverage']\n+install_requires = ['attrs>=16.0.0', 'coverage>=4.0']\n # Using an environment marker on enum34 makes the dependency condition\n # independent of the build environemnt, which is important for wheels.\n # https://www.python.org/dev/peps/pep-0345/#environment-markers\n", "issue": "ImportError: cannot import name canonical_filename\nHi, I'm getting an import error on startup:\r\n\r\n```\r\n File \"/Users/adaszko/repos/fieldaware/fieldaware-venv/lib/python2.7/site-packages/hypothesis/core.py\", line 38, in <module>\r\n from coverage.files import canonical_filename\r\nImportError: cannot import name canonical_filename\r\n```\r\n\r\nI've downloaded https://files.pythonhosted.org/packages/4b/e4/5ebf3220993de03f2120a16d9e91cfd053f4c11ada0cf033f2bfe9683fcf/hypothesis-3.65.0-py2-none-any.whl and the `METADATA` file there specifies dependency on `coverage` without any version number:\r\n\r\n```\r\n% grep coverage METADATA\r\nRequires-Dist: coverage\r\n```\r\n\r\nMy local `coverage` is at `3.7.1`. It works if I upgrade `coverage` to `4.4.1`, so I think there's an issue in hypothesis in that it doesn't specify the version bound on `coverage`.\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport os\nimport sys\nimport warnings\n\nimport setuptools\n\n\ndef local_file(name):\n return os.path.relpath(os.path.join(os.path.dirname(__file__), name))\n\n\nSOURCE = local_file('src')\nREADME = local_file('README.rst')\n\nsetuptools_version = tuple(map(int, setuptools.__version__.split('.')[:2]))\n\nif setuptools_version < (36, 2):\n # Warning only - very bad if uploading bdist but fine if installing sdist.\n warnings.warn(\n 'This version of setuptools is too old to correctly store '\n 'conditional dependencies in binary wheels. For more info, see: '\n 'https://hynek.me/articles/conditional-python-dependencies/'\n )\n\n\n# Assignment to placate pyflakes. The actual version is from the exec that\n# follows.\n__version__ = None\n\nwith open(local_file('src/hypothesis/version.py')) as o:\n exec(o.read())\n\nassert __version__ is not None\n\n\nextras = {\n 'datetime': ['pytz'],\n 'pytz': ['pytz'],\n 'dateutil': ['python-dateutil'],\n 'fakefactory': ['Faker>=0.7'],\n 'numpy': ['numpy>=1.9.0'],\n 'pytest': ['pytest>=2.8.0'],\n # We only support Django versions with upstream support - see\n # https://www.djangoproject.com/download/#supported-versions\n 'django': ['pytz', 'django>=1.11'],\n}\n\nextras['faker'] = extras['fakefactory']\nextras['all'] = sorted(sum(extras.values(), []))\n\n\ninstall_requires = ['attrs>=16.0.0', 'coverage']\n# Using an environment marker on enum34 makes the dependency condition\n# independent of the build environemnt, which is important for wheels.\n# https://www.python.org/dev/peps/pep-0345/#environment-markers\nif sys.version_info[0] < 3 and setuptools_version < (8, 0):\n # Except really old systems, where we give up and install unconditionally\n install_requires.append('enum34')\nelse:\n install_requires.append('enum34; python_version==\"2.7\"')\n\n\nsetuptools.setup(\n name='hypothesis',\n version=__version__,\n author='David R. MacIver',\n author_email='[email protected]',\n packages=setuptools.find_packages(SOURCE),\n package_dir={'': SOURCE},\n package_data={'hypothesis': ['py.typed']},\n url=(\n 'https://github.com/HypothesisWorks/hypothesis/'\n 'tree/master/hypothesis-python'\n ),\n license='MPL v2',\n description='A library for property based testing',\n zip_safe=False,\n extras_require=extras,\n install_requires=install_requires,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',\n 'Operating System :: Unix',\n 'Operating System :: POSIX',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Testing',\n 'Framework :: Pytest',\n ],\n entry_points={\n 'pytest11': ['hypothesispytest = hypothesis.extra.pytestplugin'],\n },\n long_description=open(README).read(),\n)\n", "path": "hypothesis-python/setup.py"}]}
| 2,098 | 151 |
gh_patches_debug_3905
|
rasdani/github-patches
|
git_diff
|
microsoft__hi-ml-65
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add all items required for making the repository public
Ensure that all files have copyright notices, and that editors are set up to automatically insert them (PyCharm does it correctly on InnerEye)
You must run the following source code analysis tools:
CredScan
CodeQL (Semmle)
Component Governance Detection
The easiest way to run these tools is to add thems in your build pipeline in a Microsoft-managed Azure DevOps account.
For CodeQL, please ensure the following (detailed instructions for CodeQL can be found here):
Select the source code language in the CodeQL task.
If your application was developed using multiple languages, add multiple CodeQL tasks.
Define the build variable LGTM.UploadSnapshot=true.
Configure the build to allow scripts to access OAuth token.
If the code is hosted in Github, create Azure DevOps PAT token with code read scope for dev.azure.com/Microsoft (or ‘all’) organization and set the local task variable System_AccessToken with it. (Note: This only works for YAML-based pipelines.)
Review security issues by navigating to semmleportal.azurewebsites.net/lookup. It may take up to one day to process results.
</issue>
<code>
[start of src/health/azure/datasets.py]
1 import logging
2 from pathlib import Path
3 from typing import List, Optional, Union
4
5 from azureml.core import Dataset, Datastore, Workspace
6 from azureml.data import FileDataset, OutputFileDatasetConfig
7 from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
8
9
10 def get_datastore(workspace: Workspace, datastore_name: str) -> Datastore:
11 """
12 Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if
13 the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name.
14 :param workspace: The AzureML workspace to read from.
15 :param datastore_name: The name of the datastore to retrieve.
16 :return: An AzureML datastore.
17 """
18 datastores = workspace.datastores
19 existing_stores = list(datastores.keys())
20 if not datastore_name:
21 if len(existing_stores) == 1:
22 return datastores[existing_stores[0]]
23 raise ValueError("No datastore name provided. This is only possible if the workspace has a single datastore. "
24 f"However, the workspace has {len(existing_stores)} datastores: {existing_stores}")
25 if datastore_name in datastores:
26 return datastores[datastore_name]
27 raise ValueError(f"Datastore {datastore_name} was not found in the workspace. Existing datastores: "
28 f"{existing_stores}")
29
30
31 def get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset:
32 """
33 Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is
34 created and registered, assuming that the files are in a folder that has the same name as the dataset.
35 For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder
36 <container_root>/datasets/dataset_name/
37 """
38 if not dataset_name:
39 raise ValueError("No dataset name provided.")
40 try:
41 logging.info(f"Trying to retrieve AzureML Dataset '{dataset_name}'")
42 azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name)
43 logging.info("Dataset found.")
44 except Exception:
45 logging.info(f"Retrieving datastore '{datastore_name}' from AzureML workspace")
46 datastore = get_datastore(workspace, datastore_name)
47 logging.info(f"Creating a new dataset from data in folder '{dataset_name}' in the datastore")
48 # Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create
49 # trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from "foo")
50 azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + "/"))
51 logging.info("Registering the dataset for future use.")
52 azureml_dataset.register(workspace, name=dataset_name)
53 return azureml_dataset
54
55
56 def _input_dataset_key(index: int) -> str:
57 return f"INPUT_{index}"
58
59
60 def _output_dataset_key(index: int) -> str:
61 return f"OUTPUT_{index}"
62
63
64 class DatasetConfig:
65 """
66 Contains information to use AzureML datasets as inputs or outputs.
67 """
68
69 def __init__(self,
70 name: str,
71 datastore: str = "",
72 version: Optional[int] = None,
73 use_mounting: Optional[bool] = None,
74 target_folder: str = "",
75 local_folder: Optional[Path] = None):
76 """
77 Creates a new configuration for using an AzureML dataset.
78 :param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets,
79 this will be the name given to the newly created dataset.
80 :param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML
81 workspace has only a single datastore, or if the default datastore should be used.
82 :param version: The version of the dataset that should be used. This is only used for input datasets.
83 If the version is not specified, the latest version will be used.
84 :param use_mounting: If True, the dataset will be "mounted", that is, individual files will be read
85 or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts,
86 respectively fully uploaded at job end for output datasets.
87 Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are script
88 outputs.
89 :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a
90 random folder on /tmp will be chosen.
91 :param local_folder: The folder on the local machine at which the dataset is available. This
92 is used only for runs outside of AzureML.
93 """
94 # This class would be a good candidate for a dataclass, but having an explicit constructor makes
95 # documentation tools in the editor work nicer.
96 name = name.strip()
97 if not name:
98 raise ValueError("The name of the dataset must be a non-empty string.")
99 self.name = name
100 self.datastore = datastore
101 self.version = version
102 self.use_mounting = use_mounting
103 self.target_folder = target_folder
104 self.local_folder = local_folder
105
106 def to_input_dataset(self,
107 workspace: Workspace,
108 dataset_index: int) -> DatasetConsumptionConfig:
109 """
110 Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML
111 dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0.
112 :param workspace: The AzureML workspace to read from.
113 :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index}
114 """
115 status = f"Dataset {self.name} (index {dataset_index}) will be "
116 azureml_dataset = get_or_create_dataset(workspace=workspace,
117 dataset_name=self.name,
118 datastore_name=self.datastore)
119 named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))
120 path_on_compute = self.target_folder or None
121 use_mounting = False if self.use_mounting is None else self.use_mounting
122 if use_mounting:
123 status += "mounted at "
124 result = named_input.as_mount(path_on_compute)
125 else:
126 status += "downloaded to "
127 result = named_input.as_download(path_on_compute)
128 if path_on_compute:
129 status += f"{path_on_compute}."
130 else:
131 status += "a randomly chosen folder."
132 logging.info(status)
133 return result
134
135 def to_output_dataset(self,
136 workspace: Workspace,
137 dataset_index: int) -> OutputFileDatasetConfig:
138 """
139 Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new
140 dataset will be taken from the present object.
141 :param workspace: The AzureML workspace to read from.
142 :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index}
143 :return:
144 """
145 status = f"Output dataset {self.name} (index {dataset_index}) will be "
146 datastore = get_datastore(workspace, self.datastore)
147 dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index),
148 destination=(datastore, self.name + "/"))
149 # TODO: Can we get tags into here too?
150 dataset = dataset.register_on_complete(name=self.name)
151 if self.target_folder:
152 raise ValueError("Output datasets can't have a target_folder set.")
153 use_mounting = True if self.use_mounting is None else self.use_mounting
154 if use_mounting:
155 status += "uploaded while the job runs."
156 result = dataset.as_mount()
157 else:
158 status += "uploaded when the job completes."
159 result = dataset.as_upload()
160 logging.info(status)
161 return result
162
163
164 StrOrDatasetConfig = Union[str, DatasetConfig]
165
166
167 def _replace_string_datasets(datasets: List[StrOrDatasetConfig],
168 default_datastore_name: str) -> List[DatasetConfig]:
169 """
170 Processes a list of input or output datasets. All entries in the list that are strings are turned into
171 DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore.
172 :param datasets: A list of datasets, each given either as a string or a DatasetConfig object.
173 :param default_datastore_name: The datastore to use for all datasets that are only specified via their name.
174 :return: A list of DatasetConfig objects, in the same order as the input list.
175 """
176 return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) else d
177 for d in datasets]
178
[end of src/health/azure/datasets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/health/azure/datasets.py b/src/health/azure/datasets.py
--- a/src/health/azure/datasets.py
+++ b/src/health/azure/datasets.py
@@ -1,3 +1,7 @@
+# ------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
+# ------------------------------------------------------------------------------------------
import logging
from pathlib import Path
from typing import List, Optional, Union
|
{"golden_diff": "diff --git a/src/health/azure/datasets.py b/src/health/azure/datasets.py\n--- a/src/health/azure/datasets.py\n+++ b/src/health/azure/datasets.py\n@@ -1,3 +1,7 @@\n+# ------------------------------------------------------------------------------------------\n+# Copyright (c) Microsoft Corporation. All rights reserved.\n+# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n+# ------------------------------------------------------------------------------------------\n import logging\n from pathlib import Path\n from typing import List, Optional, Union\n", "issue": "Add all items required for making the repository public\nEnsure that all files have copyright notices, and that editors are set up to automatically insert them (PyCharm does it correctly on InnerEye)\r\n\r\nYou must run the following source code analysis tools:\r\nCredScan\r\nCodeQL (Semmle)\r\nComponent Governance Detection\r\nThe easiest way to run these tools is to add thems in your build pipeline in a Microsoft-managed Azure DevOps account.\r\n\r\nFor CodeQL, please ensure the following (detailed instructions for CodeQL can be found here):\r\nSelect the source code language in the CodeQL task.\r\nIf your application was developed using multiple languages, add multiple CodeQL tasks.\r\nDefine the build variable LGTM.UploadSnapshot=true.\r\nConfigure the build to allow scripts to access OAuth token.\r\nIf the code is hosted in Github, create Azure DevOps PAT token with code read scope for dev.azure.com/Microsoft (or \u2018all\u2019) organization and set the local task variable System_AccessToken with it. (Note: This only works for YAML-based pipelines.)\r\nReview security issues by navigating to semmleportal.azurewebsites.net/lookup. It may take up to one day to process results.\n", "before_files": [{"content": "import logging\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nfrom azureml.core import Dataset, Datastore, Workspace\nfrom azureml.data import FileDataset, OutputFileDatasetConfig\nfrom azureml.data.dataset_consumption_config import DatasetConsumptionConfig\n\n\ndef get_datastore(workspace: Workspace, datastore_name: str) -> Datastore:\n \"\"\"\n Retrieves a datastore of a given name from an AzureML workspace. The datastore_name argument can be omitted if\n the workspace only contains a single datastore. Raises a ValueError if there is no datastore of the given name.\n :param workspace: The AzureML workspace to read from.\n :param datastore_name: The name of the datastore to retrieve.\n :return: An AzureML datastore.\n \"\"\"\n datastores = workspace.datastores\n existing_stores = list(datastores.keys())\n if not datastore_name:\n if len(existing_stores) == 1:\n return datastores[existing_stores[0]]\n raise ValueError(\"No datastore name provided. This is only possible if the workspace has a single datastore. \"\n f\"However, the workspace has {len(existing_stores)} datastores: {existing_stores}\")\n if datastore_name in datastores:\n return datastores[datastore_name]\n raise ValueError(f\"Datastore {datastore_name} was not found in the workspace. Existing datastores: \"\n f\"{existing_stores}\")\n\n\ndef get_or_create_dataset(workspace: Workspace, datastore_name: str, dataset_name: str) -> FileDataset:\n \"\"\"\n Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is\n created and registered, assuming that the files are in a folder that has the same name as the dataset.\n For example, if dataset_name is 'foo', then the 'foo' dataset should be pointing to the folder\n <container_root>/datasets/dataset_name/\n \"\"\"\n if not dataset_name:\n raise ValueError(\"No dataset name provided.\")\n try:\n logging.info(f\"Trying to retrieve AzureML Dataset '{dataset_name}'\")\n azureml_dataset = Dataset.get_by_name(workspace, name=dataset_name)\n logging.info(\"Dataset found.\")\n except Exception:\n logging.info(f\"Retrieving datastore '{datastore_name}' from AzureML workspace\")\n datastore = get_datastore(workspace, datastore_name)\n logging.info(f\"Creating a new dataset from data in folder '{dataset_name}' in the datastore\")\n # Ensure that there is a / at the end of the file path, otherwise folder that share a prefix could create\n # trouble (for example, folders foo and foo_bar exist, and I'm trying to create a dataset from \"foo\")\n azureml_dataset = Dataset.File.from_files(path=(datastore, dataset_name + \"/\"))\n logging.info(\"Registering the dataset for future use.\")\n azureml_dataset.register(workspace, name=dataset_name)\n return azureml_dataset\n\n\ndef _input_dataset_key(index: int) -> str:\n return f\"INPUT_{index}\"\n\n\ndef _output_dataset_key(index: int) -> str:\n return f\"OUTPUT_{index}\"\n\n\nclass DatasetConfig:\n \"\"\"\n Contains information to use AzureML datasets as inputs or outputs.\n \"\"\"\n\n def __init__(self,\n name: str,\n datastore: str = \"\",\n version: Optional[int] = None,\n use_mounting: Optional[bool] = None,\n target_folder: str = \"\",\n local_folder: Optional[Path] = None):\n \"\"\"\n Creates a new configuration for using an AzureML dataset.\n :param name: The name of the dataset, as it was registered in the AzureML workspace. For output datasets,\n this will be the name given to the newly created dataset.\n :param datastore: The name of the AzureML datastore that holds the dataset. This can be empty if the AzureML\n workspace has only a single datastore, or if the default datastore should be used.\n :param version: The version of the dataset that should be used. This is only used for input datasets.\n If the version is not specified, the latest version will be used.\n :param use_mounting: If True, the dataset will be \"mounted\", that is, individual files will be read\n or written on-demand over the network. If False, the dataset will be fully downloaded before the job starts,\n respectively fully uploaded at job end for output datasets.\n Defaults: False (downloading) for datasets that are script inputs, True (mounting) for datasets that are script\n outputs.\n :param target_folder: The folder into which the dataset should be downloaded or mounted. If left empty, a\n random folder on /tmp will be chosen.\n :param local_folder: The folder on the local machine at which the dataset is available. This\n is used only for runs outside of AzureML.\n \"\"\"\n # This class would be a good candidate for a dataclass, but having an explicit constructor makes\n # documentation tools in the editor work nicer.\n name = name.strip()\n if not name:\n raise ValueError(\"The name of the dataset must be a non-empty string.\")\n self.name = name\n self.datastore = datastore\n self.version = version\n self.use_mounting = use_mounting\n self.target_folder = target_folder\n self.local_folder = local_folder\n\n def to_input_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> DatasetConsumptionConfig:\n \"\"\"\n Creates a configuration for using an AzureML dataset inside of an AzureML run. This will make the AzureML\n dataset with given name available as a named input, using INPUT_0 as the key for dataset index 0.\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked INPUT_{index}\n \"\"\"\n status = f\"Dataset {self.name} (index {dataset_index}) will be \"\n azureml_dataset = get_or_create_dataset(workspace=workspace,\n dataset_name=self.name,\n datastore_name=self.datastore)\n named_input = azureml_dataset.as_named_input(_input_dataset_key(index=dataset_index))\n path_on_compute = self.target_folder or None\n use_mounting = False if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"mounted at \"\n result = named_input.as_mount(path_on_compute)\n else:\n status += \"downloaded to \"\n result = named_input.as_download(path_on_compute)\n if path_on_compute:\n status += f\"{path_on_compute}.\"\n else:\n status += \"a randomly chosen folder.\"\n logging.info(status)\n return result\n\n def to_output_dataset(self,\n workspace: Workspace,\n dataset_index: int) -> OutputFileDatasetConfig:\n \"\"\"\n Creates a configuration to write a script output to an AzureML dataset. The name and datastore of this new\n dataset will be taken from the present object.\n :param workspace: The AzureML workspace to read from.\n :param dataset_index: Suffix for using datasets as named inputs, the dataset will be marked OUTPUT_{index}\n :return:\n \"\"\"\n status = f\"Output dataset {self.name} (index {dataset_index}) will be \"\n datastore = get_datastore(workspace, self.datastore)\n dataset = OutputFileDatasetConfig(name=_output_dataset_key(index=dataset_index),\n destination=(datastore, self.name + \"/\"))\n # TODO: Can we get tags into here too?\n dataset = dataset.register_on_complete(name=self.name)\n if self.target_folder:\n raise ValueError(\"Output datasets can't have a target_folder set.\")\n use_mounting = True if self.use_mounting is None else self.use_mounting\n if use_mounting:\n status += \"uploaded while the job runs.\"\n result = dataset.as_mount()\n else:\n status += \"uploaded when the job completes.\"\n result = dataset.as_upload()\n logging.info(status)\n return result\n\n\nStrOrDatasetConfig = Union[str, DatasetConfig]\n\n\ndef _replace_string_datasets(datasets: List[StrOrDatasetConfig],\n default_datastore_name: str) -> List[DatasetConfig]:\n \"\"\"\n Processes a list of input or output datasets. All entries in the list that are strings are turned into\n DatasetConfig objects, using the string as the dataset name, and pointing to the default datastore.\n :param datasets: A list of datasets, each given either as a string or a DatasetConfig object.\n :param default_datastore_name: The datastore to use for all datasets that are only specified via their name.\n :return: A list of DatasetConfig objects, in the same order as the input list.\n \"\"\"\n return [DatasetConfig(name=d, datastore=default_datastore_name) if isinstance(d, str) else d\n for d in datasets]\n", "path": "src/health/azure/datasets.py"}]}
| 3,096 | 112 |
gh_patches_debug_36606
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-3442
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TW production parser down
## Description
This is an automatic error report generated for Taiwan (TW).
Issues:
- No recent data found for `production` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser TW production`
- <a href="https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!t,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:TW,type:phrase),type:phrase,value:TW),query:(match:(extra.key:(query:TW,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
</issue>
<code>
[start of parsers/TW.py]
1 #!/usr/bin/env python3
2 import arrow
3 import requests
4 import pandas
5 import dateutil
6
7
8 def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'
13 s = session or requests.Session()
14 response = s.get(url)
15 data = response.json()
16
17 dumpDate = data['']
18 prodData = data['aaData']
19
20 tz = 'Asia/Taipei'
21 dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
22
23 objData = pandas.DataFrame(prodData)
24
25 objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
26 'additional']
27
28 objData['fueltype'] = objData.fueltype.str.split('(').str[1]
29 objData['fueltype'] = objData.fueltype.str.split(')').str[0]
30 objData.drop('additional', axis=1, inplace=True)
31 objData.drop('percentage', axis=1, inplace=True)
32
33 objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')
34 objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')
35 production = pandas.DataFrame(objData.groupby('fueltype').sum())
36 production.columns = ['capacity', 'output']
37
38 coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity
39 gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity
40 oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity
41
42 coal_production = production.loc['Coal'].output + production.loc['IPP-Coal'].output
43 gas_production = production.loc['LNG'].output + production.loc['IPP-LNG'].output
44 oil_production = production.loc['Oil'].output + production.loc['Diesel'].output
45
46 # For storage, note that load will be negative, and generation positive.
47 # We require the opposite
48
49 returndata = {
50 'zoneKey': zone_key,
51 'datetime': dumpDate.datetime,
52 'production': {
53 'coal': coal_production,
54 'gas': gas_production,
55 'oil': oil_production,
56 'hydro': production.loc['Hydro'].output,
57 'nuclear': production.loc['Nuclear'].output,
58 'solar': production.loc['Solar'].output,
59 'wind': production.loc['Wind'].output,
60 'unknown': production.loc['Co-Gen'].output
61 },
62 'capacity': {
63 'coal': coal_capacity,
64 'gas': gas_capacity,
65 'oil': oil_capacity,
66 'hydro': production.loc['Hydro'].capacity,
67 'hydro storage':production.loc['Pumping Gen'].capacity,
68 'nuclear': production.loc['Nuclear'].capacity,
69 'solar': production.loc['Solar'].capacity,
70 'wind': production.loc['Wind'].capacity,
71 'unknown': production.loc['Co-Gen'].capacity
72 },
73 'storage': {
74 'hydro': -1 * production.loc['Pumping Load'].output - production.loc['Pumping Gen'].output
75 },
76 'source': 'taipower.com.tw'
77 }
78
79 return returndata
80
81
82 if __name__ == '__main__':
83 print(fetch_production())
84
[end of parsers/TW.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parsers/TW.py b/parsers/TW.py
--- a/parsers/TW.py
+++ b/parsers/TW.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
import arrow
-import requests
-import pandas
import dateutil
+import pandas as pd
+import requests
def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:
@@ -20,21 +20,27 @@
tz = 'Asia/Taipei'
dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
- objData = pandas.DataFrame(prodData)
+ objData = pd.DataFrame(prodData)
- objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
- 'additional']
+ columns = ['fueltype', 'additional_1', 'name', 'capacity', 'output', 'percentage', 'additional_2']
+ assert len(objData.iloc[0]) == len(columns), "number of input columns changed"
+ objData.columns = columns
objData['fueltype'] = objData.fueltype.str.split('(').str[1]
objData['fueltype'] = objData.fueltype.str.split(')').str[0]
- objData.drop('additional', axis=1, inplace=True)
- objData.drop('percentage', axis=1, inplace=True)
+ objData.loc[:,['capacity', 'output']] = objData[['capacity', 'output']].apply(pd.to_numeric, errors='coerce')
+ assert not objData.capacity.isna().all(), "capacity data is entirely NaN - input column order may have changed"
+ assert not objData.output.isna().all(), "output data is entirely NaN - input column order may have changed"
- objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')
- objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')
- production = pandas.DataFrame(objData.groupby('fueltype').sum())
+ objData.drop(columns=['additional_1', 'name', 'additional_2', 'percentage'], axis=1, inplace=True)
+ # summing because items in returned object are for each power plant and operational units
+ production = pd.DataFrame(objData.groupby('fueltype').sum())
production.columns = ['capacity', 'output']
+ # check output values coincide with total capacity by fuel type
+ check_values = production.output <= production.capacity
+ assert check_values.loc[~check_values.index.isin(["Co-Gen"])].all(), "output > capacity" # HACK: Co-Gen capacity is underestimated
+
coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity
gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity
oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity
|
{"golden_diff": "diff --git a/parsers/TW.py b/parsers/TW.py\n--- a/parsers/TW.py\n+++ b/parsers/TW.py\n@@ -1,8 +1,8 @@\n #!/usr/bin/env python3\n import arrow\n-import requests\n-import pandas\n import dateutil\n+import pandas as pd\n+import requests\n \n \n def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:\n@@ -20,21 +20,27 @@\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n \n- objData = pandas.DataFrame(prodData)\n+ objData = pd.DataFrame(prodData)\n \n- objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n- 'additional']\n+ columns = ['fueltype', 'additional_1', 'name', 'capacity', 'output', 'percentage', 'additional_2']\n+ assert len(objData.iloc[0]) == len(columns), \"number of input columns changed\"\n+ objData.columns = columns\n \n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n- objData.drop('additional', axis=1, inplace=True)\n- objData.drop('percentage', axis=1, inplace=True)\n+ objData.loc[:,['capacity', 'output']] = objData[['capacity', 'output']].apply(pd.to_numeric, errors='coerce')\n+ assert not objData.capacity.isna().all(), \"capacity data is entirely NaN - input column order may have changed\"\n+ assert not objData.output.isna().all(), \"output data is entirely NaN - input column order may have changed\"\n \n- objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')\n- objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')\n- production = pandas.DataFrame(objData.groupby('fueltype').sum())\n+ objData.drop(columns=['additional_1', 'name', 'additional_2', 'percentage'], axis=1, inplace=True)\n+ # summing because items in returned object are for each power plant and operational units\n+ production = pd.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n \n+ # check output values coincide with total capacity by fuel type\n+ check_values = production.output <= production.capacity\n+ assert check_values.loc[~check_values.index.isin([\"Co-Gen\"])].all(), \"output > capacity\" # HACK: Co-Gen capacity is underestimated\n+\n coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity\n gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity\n oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity\n", "issue": "TW production parser down\n## Description\n\nThis is an automatic error report generated for Taiwan (TW).\n\nIssues:\n- No recent data found for `production` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser TW production`\n- <a href=\"https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!t,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:TW,type:phrase),type:phrase,value:TW),query:(match:(extra.key:(query:TW,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n s = session or requests.Session()\n response = s.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')\n objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity\n gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity\n oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity\n\n coal_production = production.loc['Coal'].output + production.loc['IPP-Coal'].output\n gas_production = production.loc['LNG'].output + production.loc['IPP-LNG'].output\n oil_production = production.loc['Oil'].output + production.loc['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.loc['Hydro'].output,\n 'nuclear': production.loc['Nuclear'].output,\n 'solar': production.loc['Solar'].output,\n 'wind': production.loc['Wind'].output,\n 'unknown': production.loc['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.loc['Hydro'].capacity,\n 'hydro storage':production.loc['Pumping Gen'].capacity,\n 'nuclear': production.loc['Nuclear'].capacity,\n 'solar': production.loc['Solar'].capacity,\n 'wind': production.loc['Wind'].capacity,\n 'unknown': production.loc['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.loc['Pumping Load'].output - production.loc['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}]}
| 1,930 | 665 |
gh_patches_debug_9786
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-430
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to add Table of Contents to docs
When I build a markdown file containing the following information with, `mkdocs build --clean`, mkdocs throws: `AttributeError: 'Markdown' object has no attribute 'toc'`. Adding `[TOC]` like this was working before, but for some reason its throwing an exception now. I'm running version `0.11.1`.
Markdown file:
``` markdown
For api overview and usages, check out [this page](overview.md).
[TOC]
Auth
=================================================
## Check if user is registered
`POST` `/auth/is_registered`
**paramaters**
- `email`
## Login
`POST` `/auth`
**Parameters**
- `email`
- `password`
**Response**
The response will be something like this:
```
Stack Trace:
``` bash
Traceback (most recent call last):
"/usr/local/Cellar/python/2.7.8_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/observers/api.py", line 199, in run
self.dispatch_events(self.event_queue, self.timeout)
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/observers/api.py", line 368, in dispatch_events
handler.dispatch(event)
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/events.py", line 322, in dispatch
self.on_any_event(event)
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/serve.py", line 28, in on_any_event
build(config, live_server=True)
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py", line 223, in build
build_pages(config)
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py", line 170, in build_pages
extensions=config['markdown_extensions'], strict=config['strict']
File "/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py", line 36, in convert_markdown
toc_html = md.toc
AttributeError: 'Markdown' object has no attribute 'toc'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10
11 name = 'mkdocs'
12 package = 'mkdocs'
13 description = 'Project documentation with Markdown.'
14 url = 'http://www.mkdocs.org'
15 author = 'Tom Christie'
16 author_email = '[email protected]'
17 license = 'BSD'
18 install_requires = [
19 'Jinja2>=2.7.1',
20 'Markdown>=2.3.1,<2.5',
21 'PyYAML>=3.10',
22 'watchdog>=0.7.0',
23 'ghp-import>=0.4.1'
24 ]
25
26 long_description = (
27 "MkDocs is a fast, simple and downright gorgeous static site generator "
28 "that's geared towards building project documentation. Documentation "
29 "source files are written in Markdown, and configured with a single YAML "
30 "configuration file."
31 )
32
33
34 def get_version(package):
35 """
36 Return package version as listed in `__version__` in `init.py`.
37 """
38 init_py = open(os.path.join(package, '__init__.py')).read()
39 return re.search("^__version__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1)
40
41
42 def get_packages(package):
43 """
44 Return root package and all sub-packages.
45 """
46 return [dirpath
47 for dirpath, dirnames, filenames in os.walk(package)
48 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
49
50
51 def get_package_data(package):
52 """
53 Return all files under the root package, that are not in a
54 package themselves.
55 """
56 walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
57 for dirpath, dirnames, filenames in os.walk(package)
58 if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
59
60 filepaths = []
61 for base, filenames in walk:
62 filepaths.extend([os.path.join(base, filename)
63 for filename in filenames])
64 return {package: filepaths}
65
66
67 if sys.argv[-1] == 'publish':
68 os.system("python setup.py sdist upload")
69 args = {'version': get_version(package)}
70 print("You probably want to also tag the version now:")
71 print(" git tag -a %(version)s -m 'version %(version)s'" % args)
72 print(" git push --tags")
73 sys.exit()
74
75
76 setup(
77 name=name,
78 version=get_version(package),
79 url=url,
80 license=license,
81 description=description,
82 long_description=long_description,
83 author=author,
84 author_email=author_email,
85 packages=get_packages(package),
86 package_data=get_package_data(package),
87 install_requires=install_requires,
88 entry_points={
89 'console_scripts': [
90 'mkdocs = mkdocs.main:run_main',
91 ],
92 },
93 classifiers=[
94 'Development Status :: 5 - Production/Stable',
95 'Environment :: Console',
96 'Environment :: Web Environment',
97 'Intended Audience :: Developers',
98 'License :: OSI Approved :: BSD License',
99 'Operating System :: OS Independent',
100 'Programming Language :: Python',
101 'Programming Language :: Python :: 2',
102 'Programming Language :: Python :: 2.6',
103 'Programming Language :: Python :: 2.7',
104 'Programming Language :: Python :: 3',
105 'Programming Language :: Python :: 3.3',
106 'Programming Language :: Python :: 3.4',
107 'Topic :: Documentation',
108 'Topic :: Text Processing',
109 ]
110 )
111
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,8 @@
import os
import sys
+PY26 = sys.version_info[:2] == (2, 6)
+
name = 'mkdocs'
package = 'mkdocs'
@@ -16,11 +18,11 @@
author_email = '[email protected]'
license = 'BSD'
install_requires = [
+ 'ghp-import>=0.4.1',
'Jinja2>=2.7.1',
- 'Markdown>=2.3.1,<2.5',
+ 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',
'PyYAML>=3.10',
'watchdog>=0.7.0',
- 'ghp-import>=0.4.1'
]
long_description = (
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,6 +7,8 @@\n import os\n import sys\n \n+PY26 = sys.version_info[:2] == (2, 6)\n+\n \n name = 'mkdocs'\n package = 'mkdocs'\n@@ -16,11 +18,11 @@\n author_email = '[email protected]'\n license = 'BSD'\n install_requires = [\n+ 'ghp-import>=0.4.1',\n 'Jinja2>=2.7.1',\n- 'Markdown>=2.3.1,<2.5',\n+ 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n- 'ghp-import>=0.4.1'\n ]\n \n long_description = (\n", "issue": "Unable to add Table of Contents to docs\nWhen I build a markdown file containing the following information with, `mkdocs build --clean`, mkdocs throws: `AttributeError: 'Markdown' object has no attribute 'toc'`. Adding `[TOC]` like this was working before, but for some reason its throwing an exception now. I'm running version `0.11.1`.\n\nMarkdown file:\n\n``` markdown\nFor api overview and usages, check out [this page](overview.md).\n\n[TOC]\n\nAuth\n=================================================\n\n## Check if user is registered\n\n`POST` `/auth/is_registered`\n\n**paramaters**\n\n- `email`\n\n## Login\n\n`POST` `/auth`\n\n**Parameters**\n\n- `email`\n- `password`\n\n**Response**\n\nThe response will be something like this:\n```\n\nStack Trace:\n\n``` bash\nTraceback (most recent call last):\n\"/usr/local/Cellar/python/2.7.8_2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py\", line 810, in __bootstrap_inner\n self.run()\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/observers/api.py\", line 199, in run\n self.dispatch_events(self.event_queue, self.timeout)\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/observers/api.py\", line 368, in dispatch_events\n handler.dispatch(event)\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/watchdog/events.py\", line 322, in dispatch\n self.on_any_event(event)\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/serve.py\", line 28, in on_any_event\n build(config, live_server=True)\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py\", line 223, in build\n build_pages(config)\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py\", line 170, in build_pages\n extensions=config['markdown_extensions'], strict=config['strict']\n File \"/Users/administrator/dev/meet-web/venv/lib/python2.7/site-packages/mkdocs/build.py\", line 36, in convert_markdown\n toc_html = md.toc\nAttributeError: 'Markdown' object has no attribute 'toc'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nname = 'mkdocs'\npackage = 'mkdocs'\ndescription = 'Project documentation with Markdown.'\nurl = 'http://www.mkdocs.org'\nauthor = 'Tom Christie'\nauthor_email = '[email protected]'\nlicense = 'BSD'\ninstall_requires = [\n 'Jinja2>=2.7.1',\n 'Markdown>=2.3.1,<2.5',\n 'PyYAML>=3.10',\n 'watchdog>=0.7.0',\n 'ghp-import>=0.4.1'\n]\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"^__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py, re.MULTILINE).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\n\nif sys.argv[-1] == 'publish':\n os.system(\"python setup.py sdist upload\")\n args = {'version': get_version(package)}\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a %(version)s -m 'version %(version)s'\" % args)\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=name,\n version=get_version(package),\n url=url,\n license=license,\n description=description,\n long_description=long_description,\n author=author,\n author_email=author_email,\n packages=get_packages(package),\n package_data=get_package_data(package),\n install_requires=install_requires,\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.main:run_main',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ]\n)\n", "path": "setup.py"}]}
| 2,095 | 212 |
gh_patches_debug_7033
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-1942
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Organisations list gives timeout
## Test plan
The organisations list should not give a timeout. Since this only happened on Live, it is hard to debug.
## Sentry
See http://sentry.support.akvo-ops.org/rsr/live/group/742/
</issue>
<code>
[start of akvo/rsr/views/organisation.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please
7 see < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 from django.db.models import Prefetch
11 from django.db.models import Count
12 from django.shortcuts import get_object_or_404, render
13
14 from ..filters import location_choices, OrganisationFilter, remove_empty_querydict_items
15 from ..models import Employment, Organisation, Project, ProjectUpdate
16 from ...utils import pagination, filter_query_string
17 from .utils import apply_keywords, org_projects, show_filter_class
18
19 ###############################################################################
20 # Organisation directory
21 ###############################################################################
22
23
24 def _public_projects():
25 """Return all public projects."""
26 return Project.objects.public().published().select_related('partners').order_by('-id')
27
28
29 def _page_organisations(page):
30 """Dig out the list or organisations to use."""
31 projects = org_projects(page.organisation) if page.partner_projects else _public_projects()
32 keyword_projects = apply_keywords(page, projects)
33 return keyword_projects.all_partners()
34
35
36 def _organisation_directory_coll(request):
37 """Dig out and pass correct organisations to the view."""
38 page = request.rsr_page
39 if not page:
40 return Organisation.objects.all()
41 return _page_organisations(page)
42
43
44 def directory(request):
45 """The Organisation list view."""
46 qs = remove_empty_querydict_items(request.GET)
47
48 # Set show_filters to "in" if any filter is selected
49 filter_class = show_filter_class(qs, ['location', ])
50
51 # Yank Organisation collection
52 all_organisations = _organisation_directory_coll(request)
53
54 # Easter egg feature
55 creator_organisations = request.GET.get('creator', False)
56 if creator_organisations:
57 all_organisations = all_organisations.filter(can_create_projects=True)
58
59 f = OrganisationFilter(qs, queryset=all_organisations)
60
61 # Change filter options further when on an Akvo Page
62 if request.rsr_page:
63 # Filter location filter list to only populated locations
64 f.filters['location'].extra['choices'] = location_choices(all_organisations)
65
66 # Build page
67 page = request.GET.get('page')
68 page, paginator, page_range = pagination(page, f.qs.distinct(), 10)
69
70 # Get organisations to be displayed on the map
71 if request.rsr_page and request.rsr_page.all_maps:
72 map_orgs = all_organisations
73 else:
74 map_orgs = page.object_list
75 map_orgs = map_orgs
76
77 # Get related objects of page at once
78 page.object_list = page.object_list.select_related(
79 'primary_location__country',
80 ).annotate(
81 num_employees=Count('employees', distinct=True),
82 num_projects=Count('projects', distinct=True),
83 num_updates=Count('projects__project_updates', distinct=True),
84 )
85
86 return render(request, 'organisation_directory.html', {
87 'orgs_count': f.qs.distinct().count(),
88 'filter': f,
89 'page': page,
90 'paginator': paginator,
91 'page_range': page_range,
92 'show_filters': filter_class,
93 'q': filter_query_string(qs),
94 'map_organisations': map_orgs,
95 })
96
97
98 ###############################################################################
99 # Organisation main
100 ###############################################################################
101
102
103 def main(request, organisation_id):
104 """The organisation main view."""
105 return render(request, 'organisation_main.html', {
106 'organisation': get_object_or_404(Organisation, pk=organisation_id)})
107
[end of akvo/rsr/views/organisation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rsr/views/organisation.py b/akvo/rsr/views/organisation.py
--- a/akvo/rsr/views/organisation.py
+++ b/akvo/rsr/views/organisation.py
@@ -77,10 +77,6 @@
# Get related objects of page at once
page.object_list = page.object_list.select_related(
'primary_location__country',
- ).annotate(
- num_employees=Count('employees', distinct=True),
- num_projects=Count('projects', distinct=True),
- num_updates=Count('projects__project_updates', distinct=True),
)
return render(request, 'organisation_directory.html', {
|
{"golden_diff": "diff --git a/akvo/rsr/views/organisation.py b/akvo/rsr/views/organisation.py\n--- a/akvo/rsr/views/organisation.py\n+++ b/akvo/rsr/views/organisation.py\n@@ -77,10 +77,6 @@\n # Get related objects of page at once\n page.object_list = page.object_list.select_related(\n 'primary_location__country',\n- ).annotate(\n- num_employees=Count('employees', distinct=True),\n- num_projects=Count('projects', distinct=True),\n- num_updates=Count('projects__project_updates', distinct=True),\n )\n \n return render(request, 'organisation_directory.html', {\n", "issue": "Organisations list gives timeout\n## Test plan\n\nThe organisations list should not give a timeout. Since this only happened on Live, it is hard to debug.\n## Sentry\n\nSee http://sentry.support.akvo-ops.org/rsr/live/group/742/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.db.models import Prefetch\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, render\n\nfrom ..filters import location_choices, OrganisationFilter, remove_empty_querydict_items\nfrom ..models import Employment, Organisation, Project, ProjectUpdate\nfrom ...utils import pagination, filter_query_string\nfrom .utils import apply_keywords, org_projects, show_filter_class\n\n###############################################################################\n# Organisation directory\n###############################################################################\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners').order_by('-id')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _organisation_directory_coll(request):\n \"\"\"Dig out and pass correct organisations to the view.\"\"\"\n page = request.rsr_page\n if not page:\n return Organisation.objects.all()\n return _page_organisations(page)\n\n\ndef directory(request):\n \"\"\"The Organisation list view.\"\"\"\n qs = remove_empty_querydict_items(request.GET)\n\n # Set show_filters to \"in\" if any filter is selected\n filter_class = show_filter_class(qs, ['location', ])\n\n # Yank Organisation collection\n all_organisations = _organisation_directory_coll(request)\n\n # Easter egg feature\n creator_organisations = request.GET.get('creator', False)\n if creator_organisations:\n all_organisations = all_organisations.filter(can_create_projects=True)\n\n f = OrganisationFilter(qs, queryset=all_organisations)\n\n # Change filter options further when on an Akvo Page\n if request.rsr_page:\n # Filter location filter list to only populated locations\n f.filters['location'].extra['choices'] = location_choices(all_organisations)\n\n # Build page\n page = request.GET.get('page')\n page, paginator, page_range = pagination(page, f.qs.distinct(), 10)\n\n # Get organisations to be displayed on the map\n if request.rsr_page and request.rsr_page.all_maps:\n map_orgs = all_organisations\n else:\n map_orgs = page.object_list\n map_orgs = map_orgs\n\n # Get related objects of page at once\n page.object_list = page.object_list.select_related(\n 'primary_location__country',\n ).annotate(\n num_employees=Count('employees', distinct=True),\n num_projects=Count('projects', distinct=True),\n num_updates=Count('projects__project_updates', distinct=True),\n )\n\n return render(request, 'organisation_directory.html', {\n 'orgs_count': f.qs.distinct().count(),\n 'filter': f,\n 'page': page,\n 'paginator': paginator,\n 'page_range': page_range,\n 'show_filters': filter_class,\n 'q': filter_query_string(qs),\n 'map_organisations': map_orgs,\n })\n\n\n###############################################################################\n# Organisation main\n###############################################################################\n\n\ndef main(request, organisation_id):\n \"\"\"The organisation main view.\"\"\"\n return render(request, 'organisation_main.html', {\n 'organisation': get_object_or_404(Organisation, pk=organisation_id)})\n", "path": "akvo/rsr/views/organisation.py"}]}
| 1,598 | 148 |
gh_patches_debug_16749
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-1962
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Always print warning message about min_package
**Describe the bug**
/opt/monai/monai/utils/module.py:100: UserWarning: <module 'pkg_resources' from '/opt/conda/lib/python3.8/site-packages/pkg_resources/__init__.py'> has no attribute __version__ in min_version check.
warnings.warn(f"{the_module} has no attribute __version__ in min_version check.")
</issue>
<code>
[start of monai/utils/module.py]
1 # Copyright 2020 - 2021 MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import inspect
13 import sys
14 import warnings
15 from importlib import import_module
16 from pkgutil import walk_packages
17 from re import match
18 from typing import Any, Callable, List, Sequence, Tuple, Union
19
20 import torch
21
22 from .misc import ensure_tuple
23
24 OPTIONAL_IMPORT_MSG_FMT = "{}"
25
26 __all__ = [
27 "InvalidPyTorchVersionError",
28 "OptionalImportError",
29 "exact_version",
30 "export",
31 "min_version",
32 "optional_import",
33 "load_submodules",
34 "get_full_type_name",
35 "has_option",
36 "get_package_version",
37 "get_torch_version_tuple",
38 "PT_BEFORE_1_7",
39 ]
40
41
42 def export(modname):
43 """
44 Make the decorated object a member of the named module. This will also add the object under its aliases if it has
45 a `__aliases__` member, thus this decorator should be before the `alias` decorator to pick up those names. Alias
46 names which conflict with package names or existing members will be ignored.
47 """
48
49 def _inner(obj):
50 mod = import_module(modname)
51 if not hasattr(mod, obj.__name__):
52 setattr(mod, obj.__name__, obj)
53
54 # add the aliases for `obj` to the target module
55 for alias in getattr(obj, "__aliases__", ()):
56 if not hasattr(mod, alias):
57 setattr(mod, alias, obj)
58
59 return obj
60
61 return _inner
62
63
64 def load_submodules(basemod, load_all: bool = True, exclude_pattern: str = "(.*[tT]est.*)|(_.*)"):
65 """
66 Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if
67 `load_all` is True, excluding anything whose name matches `exclude_pattern`.
68 """
69 submodules = []
70 err_mod: List[str] = []
71 for importer, name, is_pkg in walk_packages(
72 basemod.__path__, prefix=basemod.__name__ + ".", onerror=err_mod.append
73 ):
74 if (is_pkg or load_all) and name not in sys.modules and match(exclude_pattern, name) is None:
75 try:
76 mod = import_module(name)
77 importer.find_module(name).load_module(name) # type: ignore
78 submodules.append(mod)
79 except OptionalImportError:
80 pass # could not import the optional deps., they are ignored
81
82 return submodules, err_mod
83
84
85 def get_full_type_name(typeobj):
86 module = typeobj.__module__
87 if module is None or module == str.__class__.__module__:
88 return typeobj.__name__ # Avoid reporting __builtin__
89 return module + "." + typeobj.__name__
90
91
92 def min_version(the_module, min_version_str: str = "") -> bool:
93 """
94 Convert version strings into tuples of int and compare them.
95
96 Returns True if the module's version is greater or equal to the 'min_version'.
97 When min_version_str is not provided, it always returns True.
98 """
99 if not min_version_str:
100 return True # always valid version
101 if not hasattr(the_module, "__version__"):
102 warnings.warn(f"{the_module} has no attribute __version__ in min_version check.")
103 return True # min_version is the default, shouldn't be noisy
104 mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2])
105 required = tuple(int(x) for x in min_version_str.split(".")[:2])
106 return mod_version >= required
107
108
109 def exact_version(the_module, version_str: str = "") -> bool:
110 """
111 Returns True if the module's __version__ matches version_str
112 """
113 if not hasattr(the_module, "__version__"):
114 warnings.warn(f"{the_module} has no attribute __version__ in exact_version check.")
115 return False
116 return bool(the_module.__version__ == version_str)
117
118
119 class InvalidPyTorchVersionError(Exception):
120 """
121 Raised when called function or method requires a more recent
122 PyTorch version than that installed.
123 """
124
125 def __init__(self, required_version, name):
126 message = f"{name} requires PyTorch version {required_version} or later"
127 super().__init__(message)
128
129
130 class OptionalImportError(ImportError):
131 """
132 Could not import APIs from an optional dependency.
133 """
134
135
136 def optional_import(
137 module: str,
138 version: str = "",
139 version_checker: Callable[..., bool] = min_version,
140 name: str = "",
141 descriptor: str = OPTIONAL_IMPORT_MSG_FMT,
142 version_args=None,
143 allow_namespace_pkg: bool = False,
144 ) -> Tuple[Any, bool]:
145 """
146 Imports an optional module specified by `module` string.
147 Any importing related exceptions will be stored, and exceptions raise lazily
148 when attempting to use the failed-to-import module.
149
150 Args:
151 module: name of the module to be imported.
152 version: version string used by the version_checker.
153 version_checker: a callable to check the module version, Defaults to monai.utils.min_version.
154 name: a non-module attribute (such as method/class) to import from the imported module.
155 descriptor: a format string for the final error message when using a not imported module.
156 version_args: additional parameters to the version checker.
157 allow_namespace_pkg: whether importing a namespace package is allowed. Defaults to False.
158
159 Returns:
160 The imported module and a boolean flag indicating whether the import is successful.
161
162 Examples::
163
164 >>> torch, flag = optional_import('torch', '1.1')
165 >>> print(torch, flag)
166 <module 'torch' from 'python/lib/python3.6/site-packages/torch/__init__.py'> True
167
168 >>> the_module, flag = optional_import('unknown_module')
169 >>> print(flag)
170 False
171 >>> the_module.method # trying to access a module which is not imported
172 OptionalImportError: import unknown_module (No module named 'unknown_module').
173
174 >>> torch, flag = optional_import('torch', '42', exact_version)
175 >>> torch.nn # trying to access a module for which there isn't a proper version imported
176 OptionalImportError: import torch (requires version '42' by 'exact_version').
177
178 >>> conv, flag = optional_import('torch.nn.functional', '1.0', name='conv1d')
179 >>> print(conv)
180 <built-in method conv1d of type object at 0x11a49eac0>
181
182 >>> conv, flag = optional_import('torch.nn.functional', '42', name='conv1d')
183 >>> conv() # trying to use a function from the not successfully imported module (due to unmatched version)
184 OptionalImportError: from torch.nn.functional import conv1d (requires version '42' by 'min_version').
185 """
186
187 tb = None
188 exception_str = ""
189 if name:
190 actual_cmd = f"from {module} import {name}"
191 else:
192 actual_cmd = f"import {module}"
193 try:
194 pkg = __import__(module) # top level module
195 the_module = import_module(module)
196 if not allow_namespace_pkg:
197 is_namespace = getattr(the_module, "__file__", None) is None and hasattr(the_module, "__path__")
198 if is_namespace:
199 raise AssertionError
200 if name: # user specified to load class/function/... from the module
201 the_module = getattr(the_module, name)
202 except Exception as import_exception: # any exceptions during import
203 tb = import_exception.__traceback__
204 exception_str = f"{import_exception}"
205 else: # found the module
206 if version_args and version_checker(pkg, f"{version}", version_args):
207 return the_module, True
208 if not version_args and version_checker(pkg, f"{version}"):
209 return the_module, True
210
211 # preparing lazy error message
212 msg = descriptor.format(actual_cmd)
213 if version and tb is None: # a pure version issue
214 msg += f" (requires '{module} {version}' by '{version_checker.__name__}')"
215 if exception_str:
216 msg += f" ({exception_str})"
217
218 class _LazyRaise:
219 def __init__(self, *_args, **_kwargs):
220 _default_msg = (
221 f"{msg}."
222 + "\n\nFor details about installing the optional dependencies, please visit:"
223 + "\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies"
224 )
225 if tb is None:
226 self._exception = OptionalImportError(_default_msg)
227 else:
228 self._exception = OptionalImportError(_default_msg).with_traceback(tb)
229
230 def __getattr__(self, name):
231 """
232 Raises:
233 OptionalImportError: When you call this method.
234 """
235 raise self._exception
236
237 def __call__(self, *_args, **_kwargs):
238 """
239 Raises:
240 OptionalImportError: When you call this method.
241 """
242 raise self._exception
243
244 return _LazyRaise(), False
245
246
247 def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool:
248 """
249 Return a boolean indicating whether the given callable `obj` has the `keywords` in its signature.
250 """
251 if not callable(obj):
252 return False
253 sig = inspect.signature(obj)
254 return all(key in sig.parameters for key in ensure_tuple(keywords))
255
256
257 def get_package_version(dep_name, default="NOT INSTALLED or UNKNOWN VERSION."):
258 """
259 Try to load package and get version. If not found, return `default`.
260 """
261 dep, has_dep = optional_import(dep_name)
262 if has_dep and hasattr(dep, "__version__"):
263 return dep.__version__
264 return default
265
266
267 def get_torch_version_tuple():
268 """
269 Returns:
270 tuple of ints represents the pytorch major/minor version.
271 """
272 return tuple((int(x) for x in torch.__version__.split(".")[:2]))
273
274
275 PT_BEFORE_1_7 = True
276 ver, has_ver = optional_import("pkg_resources", name="parse_version")
277 try:
278 if has_ver:
279 PT_BEFORE_1_7 = ver(torch.__version__) < ver("1.7")
280 else:
281 PT_BEFORE_1_7 = get_torch_version_tuple() < (1, 7)
282 except (AttributeError, TypeError):
283 pass
284
[end of monai/utils/module.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/utils/module.py b/monai/utils/module.py
--- a/monai/utils/module.py
+++ b/monai/utils/module.py
@@ -96,11 +96,9 @@
Returns True if the module's version is greater or equal to the 'min_version'.
When min_version_str is not provided, it always returns True.
"""
- if not min_version_str:
+ if not min_version_str or not hasattr(the_module, "__version__"):
return True # always valid version
- if not hasattr(the_module, "__version__"):
- warnings.warn(f"{the_module} has no attribute __version__ in min_version check.")
- return True # min_version is the default, shouldn't be noisy
+
mod_version = tuple(int(x) for x in the_module.__version__.split(".")[:2])
required = tuple(int(x) for x in min_version_str.split(".")[:2])
return mod_version >= required
|
{"golden_diff": "diff --git a/monai/utils/module.py b/monai/utils/module.py\n--- a/monai/utils/module.py\n+++ b/monai/utils/module.py\n@@ -96,11 +96,9 @@\n Returns True if the module's version is greater or equal to the 'min_version'.\n When min_version_str is not provided, it always returns True.\n \"\"\"\n- if not min_version_str:\n+ if not min_version_str or not hasattr(the_module, \"__version__\"):\n return True # always valid version\n- if not hasattr(the_module, \"__version__\"):\n- warnings.warn(f\"{the_module} has no attribute __version__ in min_version check.\")\n- return True # min_version is the default, shouldn't be noisy\n+\n mod_version = tuple(int(x) for x in the_module.__version__.split(\".\")[:2])\n required = tuple(int(x) for x in min_version_str.split(\".\")[:2])\n return mod_version >= required\n", "issue": "Always print warning message about min_package\n**Describe the bug**\r\n/opt/monai/monai/utils/module.py:100: UserWarning: <module 'pkg_resources' from '/opt/conda/lib/python3.8/site-packages/pkg_resources/__init__.py'> has no attribute __version__ in min_version check.\r\n warnings.warn(f\"{the_module} has no attribute __version__ in min_version check.\")\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport sys\nimport warnings\nfrom importlib import import_module\nfrom pkgutil import walk_packages\nfrom re import match\nfrom typing import Any, Callable, List, Sequence, Tuple, Union\n\nimport torch\n\nfrom .misc import ensure_tuple\n\nOPTIONAL_IMPORT_MSG_FMT = \"{}\"\n\n__all__ = [\n \"InvalidPyTorchVersionError\",\n \"OptionalImportError\",\n \"exact_version\",\n \"export\",\n \"min_version\",\n \"optional_import\",\n \"load_submodules\",\n \"get_full_type_name\",\n \"has_option\",\n \"get_package_version\",\n \"get_torch_version_tuple\",\n \"PT_BEFORE_1_7\",\n]\n\n\ndef export(modname):\n \"\"\"\n Make the decorated object a member of the named module. This will also add the object under its aliases if it has\n a `__aliases__` member, thus this decorator should be before the `alias` decorator to pick up those names. Alias\n names which conflict with package names or existing members will be ignored.\n \"\"\"\n\n def _inner(obj):\n mod = import_module(modname)\n if not hasattr(mod, obj.__name__):\n setattr(mod, obj.__name__, obj)\n\n # add the aliases for `obj` to the target module\n for alias in getattr(obj, \"__aliases__\", ()):\n if not hasattr(mod, alias):\n setattr(mod, alias, obj)\n\n return obj\n\n return _inner\n\n\ndef load_submodules(basemod, load_all: bool = True, exclude_pattern: str = \"(.*[tT]est.*)|(_.*)\"):\n \"\"\"\n Traverse the source of the module structure starting with module `basemod`, loading all packages plus all files if\n `load_all` is True, excluding anything whose name matches `exclude_pattern`.\n \"\"\"\n submodules = []\n err_mod: List[str] = []\n for importer, name, is_pkg in walk_packages(\n basemod.__path__, prefix=basemod.__name__ + \".\", onerror=err_mod.append\n ):\n if (is_pkg or load_all) and name not in sys.modules and match(exclude_pattern, name) is None:\n try:\n mod = import_module(name)\n importer.find_module(name).load_module(name) # type: ignore\n submodules.append(mod)\n except OptionalImportError:\n pass # could not import the optional deps., they are ignored\n\n return submodules, err_mod\n\n\ndef get_full_type_name(typeobj):\n module = typeobj.__module__\n if module is None or module == str.__class__.__module__:\n return typeobj.__name__ # Avoid reporting __builtin__\n return module + \".\" + typeobj.__name__\n\n\ndef min_version(the_module, min_version_str: str = \"\") -> bool:\n \"\"\"\n Convert version strings into tuples of int and compare them.\n\n Returns True if the module's version is greater or equal to the 'min_version'.\n When min_version_str is not provided, it always returns True.\n \"\"\"\n if not min_version_str:\n return True # always valid version\n if not hasattr(the_module, \"__version__\"):\n warnings.warn(f\"{the_module} has no attribute __version__ in min_version check.\")\n return True # min_version is the default, shouldn't be noisy\n mod_version = tuple(int(x) for x in the_module.__version__.split(\".\")[:2])\n required = tuple(int(x) for x in min_version_str.split(\".\")[:2])\n return mod_version >= required\n\n\ndef exact_version(the_module, version_str: str = \"\") -> bool:\n \"\"\"\n Returns True if the module's __version__ matches version_str\n \"\"\"\n if not hasattr(the_module, \"__version__\"):\n warnings.warn(f\"{the_module} has no attribute __version__ in exact_version check.\")\n return False\n return bool(the_module.__version__ == version_str)\n\n\nclass InvalidPyTorchVersionError(Exception):\n \"\"\"\n Raised when called function or method requires a more recent\n PyTorch version than that installed.\n \"\"\"\n\n def __init__(self, required_version, name):\n message = f\"{name} requires PyTorch version {required_version} or later\"\n super().__init__(message)\n\n\nclass OptionalImportError(ImportError):\n \"\"\"\n Could not import APIs from an optional dependency.\n \"\"\"\n\n\ndef optional_import(\n module: str,\n version: str = \"\",\n version_checker: Callable[..., bool] = min_version,\n name: str = \"\",\n descriptor: str = OPTIONAL_IMPORT_MSG_FMT,\n version_args=None,\n allow_namespace_pkg: bool = False,\n) -> Tuple[Any, bool]:\n \"\"\"\n Imports an optional module specified by `module` string.\n Any importing related exceptions will be stored, and exceptions raise lazily\n when attempting to use the failed-to-import module.\n\n Args:\n module: name of the module to be imported.\n version: version string used by the version_checker.\n version_checker: a callable to check the module version, Defaults to monai.utils.min_version.\n name: a non-module attribute (such as method/class) to import from the imported module.\n descriptor: a format string for the final error message when using a not imported module.\n version_args: additional parameters to the version checker.\n allow_namespace_pkg: whether importing a namespace package is allowed. Defaults to False.\n\n Returns:\n The imported module and a boolean flag indicating whether the import is successful.\n\n Examples::\n\n >>> torch, flag = optional_import('torch', '1.1')\n >>> print(torch, flag)\n <module 'torch' from 'python/lib/python3.6/site-packages/torch/__init__.py'> True\n\n >>> the_module, flag = optional_import('unknown_module')\n >>> print(flag)\n False\n >>> the_module.method # trying to access a module which is not imported\n OptionalImportError: import unknown_module (No module named 'unknown_module').\n\n >>> torch, flag = optional_import('torch', '42', exact_version)\n >>> torch.nn # trying to access a module for which there isn't a proper version imported\n OptionalImportError: import torch (requires version '42' by 'exact_version').\n\n >>> conv, flag = optional_import('torch.nn.functional', '1.0', name='conv1d')\n >>> print(conv)\n <built-in method conv1d of type object at 0x11a49eac0>\n\n >>> conv, flag = optional_import('torch.nn.functional', '42', name='conv1d')\n >>> conv() # trying to use a function from the not successfully imported module (due to unmatched version)\n OptionalImportError: from torch.nn.functional import conv1d (requires version '42' by 'min_version').\n \"\"\"\n\n tb = None\n exception_str = \"\"\n if name:\n actual_cmd = f\"from {module} import {name}\"\n else:\n actual_cmd = f\"import {module}\"\n try:\n pkg = __import__(module) # top level module\n the_module = import_module(module)\n if not allow_namespace_pkg:\n is_namespace = getattr(the_module, \"__file__\", None) is None and hasattr(the_module, \"__path__\")\n if is_namespace:\n raise AssertionError\n if name: # user specified to load class/function/... from the module\n the_module = getattr(the_module, name)\n except Exception as import_exception: # any exceptions during import\n tb = import_exception.__traceback__\n exception_str = f\"{import_exception}\"\n else: # found the module\n if version_args and version_checker(pkg, f\"{version}\", version_args):\n return the_module, True\n if not version_args and version_checker(pkg, f\"{version}\"):\n return the_module, True\n\n # preparing lazy error message\n msg = descriptor.format(actual_cmd)\n if version and tb is None: # a pure version issue\n msg += f\" (requires '{module} {version}' by '{version_checker.__name__}')\"\n if exception_str:\n msg += f\" ({exception_str})\"\n\n class _LazyRaise:\n def __init__(self, *_args, **_kwargs):\n _default_msg = (\n f\"{msg}.\"\n + \"\\n\\nFor details about installing the optional dependencies, please visit:\"\n + \"\\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\"\n )\n if tb is None:\n self._exception = OptionalImportError(_default_msg)\n else:\n self._exception = OptionalImportError(_default_msg).with_traceback(tb)\n\n def __getattr__(self, name):\n \"\"\"\n Raises:\n OptionalImportError: When you call this method.\n \"\"\"\n raise self._exception\n\n def __call__(self, *_args, **_kwargs):\n \"\"\"\n Raises:\n OptionalImportError: When you call this method.\n \"\"\"\n raise self._exception\n\n return _LazyRaise(), False\n\n\ndef has_option(obj, keywords: Union[str, Sequence[str]]) -> bool:\n \"\"\"\n Return a boolean indicating whether the given callable `obj` has the `keywords` in its signature.\n \"\"\"\n if not callable(obj):\n return False\n sig = inspect.signature(obj)\n return all(key in sig.parameters for key in ensure_tuple(keywords))\n\n\ndef get_package_version(dep_name, default=\"NOT INSTALLED or UNKNOWN VERSION.\"):\n \"\"\"\n Try to load package and get version. If not found, return `default`.\n \"\"\"\n dep, has_dep = optional_import(dep_name)\n if has_dep and hasattr(dep, \"__version__\"):\n return dep.__version__\n return default\n\n\ndef get_torch_version_tuple():\n \"\"\"\n Returns:\n tuple of ints represents the pytorch major/minor version.\n \"\"\"\n return tuple((int(x) for x in torch.__version__.split(\".\")[:2]))\n\n\nPT_BEFORE_1_7 = True\nver, has_ver = optional_import(\"pkg_resources\", name=\"parse_version\")\ntry:\n if has_ver:\n PT_BEFORE_1_7 = ver(torch.__version__) < ver(\"1.7\")\n else:\n PT_BEFORE_1_7 = get_torch_version_tuple() < (1, 7)\nexcept (AttributeError, TypeError):\n pass\n", "path": "monai/utils/module.py"}]}
| 3,750 | 213 |
gh_patches_debug_23098
|
rasdani/github-patches
|
git_diff
|
easybuilders__easybuild-framework-4292
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regression with versionsuffix types
Commit https://github.com/easybuilders/easybuild-framework/commit/0e5ba5c858
introduced a check for string-type for `versionsuffix`, while `None` used to be an accepted value for `versionsuffix`. Our hooks replace many version suffixes with `None`.
</issue>
<code>
[start of easybuild/tools/module_naming_scheme/utilities.py]
1 ##
2 # Copyright 2009-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 Utility functions for implementating module naming schemes.
27
28 Authors:
29
30 * Stijn De Weirdt (Ghent University)
31 * Dries Verdegem (Ghent University)
32 * Kenneth Hoste (Ghent University)
33 * Pieter De Baets (Ghent University)
34 * Jens Timmerman (Ghent University)
35 * Fotis Georgatos (Uni.Lu, NTUA)
36 """
37 import os
38 import string
39
40 from easybuild.base import fancylogger
41 from easybuild.tools.build_log import EasyBuildError
42 from easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme
43 from easybuild.tools.py2vs3 import string_type
44 from easybuild.tools.toolchain.toolchain import SYSTEM_TOOLCHAIN_NAME, is_system_toolchain
45 from easybuild.tools.utilities import get_subclasses, import_available_modules
46
47 _log = fancylogger.getLogger('module_naming_scheme.utilities', fname=False)
48
49
50 def det_full_ec_version(ec):
51 """
52 Determine exact install version, based on supplied easyconfig.
53 e.g. 1.2.3-goalf-1.1.0-no-OFED or 1.2.3 (for system toolchains)
54 """
55
56 ecver = None
57 toolchain = ec.get('toolchain', {'name': SYSTEM_TOOLCHAIN_NAME})
58
59 # determine main install version based on toolchain
60 if is_system_toolchain(toolchain['name']):
61 ecver = ec['version']
62 else:
63 ecver = "%s-%s-%s" % (ec['version'], toolchain['name'], toolchain['version'])
64
65 # prepend/append version prefix/suffix
66 versionprefix = ec.get('versionprefix', '')
67 if not isinstance(versionprefix, string_type):
68 raise EasyBuildError("versionprefix value should be a string, found '%s': %s (full spec: %s)",
69 type(versionprefix).__name__, versionprefix, ec)
70
71 versionsuffix = ec.get('versionsuffix', '')
72 if not isinstance(versionsuffix, string_type):
73 raise EasyBuildError("versionsuffix value should be a string, found '%s': %s (full spec: %s)",
74 type(versionsuffix).__name__, versionsuffix, ec)
75
76 ecver = ''.join([x for x in [versionprefix, ecver, versionsuffix] if x])
77
78 return ecver
79
80
81 def avail_module_naming_schemes():
82 """
83 Returns a list of available module naming schemes.
84 """
85 # all ModuleNamingScheme subclasses available in easybuild.tools.module_naming_scheme namespace are eligible
86 import_available_modules('easybuild.tools.module_naming_scheme')
87
88 # construct name-to-class dict of available module naming scheme
89 avail_mnss = dict([(x.__name__, x) for x in get_subclasses(ModuleNamingScheme)])
90
91 return avail_mnss
92
93
94 def is_valid_module_name(mod_name):
95 """Check whether the specified value is a valid module name."""
96 # module name must be a string
97 if not isinstance(mod_name, string_type):
98 _log.warning("Wrong type for module name %s (%s), should be a string" % (mod_name, type(mod_name)))
99 return False
100 # module name must be relative path
101 elif mod_name.startswith(os.path.sep):
102 _log.warning("Module name (%s) should be a relative file path" % mod_name)
103 return False
104 # module name should not be empty
105 elif not len(mod_name) > 0:
106 _log.warning("Module name (%s) should have length > 0." % mod_name)
107 return False
108 else:
109 # check whether module name only contains printable characters, since it's used as a filename
110 # (except for carriage-control characters \r, \x0b and \xoc)
111 invalid_chars = [x for x in mod_name if x not in string.printable or x in '\r\x0b\x0c']
112 if len(invalid_chars) > 0:
113 _log.warning("Module name %s contains invalid characters: %s" % (mod_name, invalid_chars))
114 return False
115 _log.debug("Module name %s validated" % mod_name)
116 return True
117
118
119 def det_hidden_modname(modname):
120 """Determine the hidden equivalent of the specified module name."""
121 moddir = os.path.dirname(modname)
122 modfile = os.path.basename(modname)
123 return os.path.join(moddir, '.%s' % modfile).lstrip(os.path.sep)
124
[end of easybuild/tools/module_naming_scheme/utilities.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/easybuild/tools/module_naming_scheme/utilities.py b/easybuild/tools/module_naming_scheme/utilities.py
--- a/easybuild/tools/module_naming_scheme/utilities.py
+++ b/easybuild/tools/module_naming_scheme/utilities.py
@@ -64,16 +64,16 @@
# prepend/append version prefix/suffix
versionprefix = ec.get('versionprefix', '')
- if not isinstance(versionprefix, string_type):
+ if versionprefix and not isinstance(versionprefix, string_type):
raise EasyBuildError("versionprefix value should be a string, found '%s': %s (full spec: %s)",
type(versionprefix).__name__, versionprefix, ec)
versionsuffix = ec.get('versionsuffix', '')
- if not isinstance(versionsuffix, string_type):
+ if versionsuffix and not isinstance(versionsuffix, string_type):
raise EasyBuildError("versionsuffix value should be a string, found '%s': %s (full spec: %s)",
type(versionsuffix).__name__, versionsuffix, ec)
- ecver = ''.join([x for x in [versionprefix, ecver, versionsuffix] if x])
+ ecver = ''.join([x for x in [versionprefix or '', ecver, versionsuffix or ''] if x])
return ecver
|
{"golden_diff": "diff --git a/easybuild/tools/module_naming_scheme/utilities.py b/easybuild/tools/module_naming_scheme/utilities.py\n--- a/easybuild/tools/module_naming_scheme/utilities.py\n+++ b/easybuild/tools/module_naming_scheme/utilities.py\n@@ -64,16 +64,16 @@\n \n # prepend/append version prefix/suffix\n versionprefix = ec.get('versionprefix', '')\n- if not isinstance(versionprefix, string_type):\n+ if versionprefix and not isinstance(versionprefix, string_type):\n raise EasyBuildError(\"versionprefix value should be a string, found '%s': %s (full spec: %s)\",\n type(versionprefix).__name__, versionprefix, ec)\n \n versionsuffix = ec.get('versionsuffix', '')\n- if not isinstance(versionsuffix, string_type):\n+ if versionsuffix and not isinstance(versionsuffix, string_type):\n raise EasyBuildError(\"versionsuffix value should be a string, found '%s': %s (full spec: %s)\",\n type(versionsuffix).__name__, versionsuffix, ec)\n \n- ecver = ''.join([x for x in [versionprefix, ecver, versionsuffix] if x])\n+ ecver = ''.join([x for x in [versionprefix or '', ecver, versionsuffix or ''] if x])\n \n return ecver\n", "issue": "Regression with versionsuffix types\nCommit https://github.com/easybuilders/easybuild-framework/commit/0e5ba5c858\r\nintroduced a check for string-type for `versionsuffix`, while `None` used to be an accepted value for `versionsuffix`. Our hooks replace many version suffixes with `None`. \n", "before_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nUtility functions for implementating module naming schemes.\n\nAuthors:\n\n* Stijn De Weirdt (Ghent University)\n* Dries Verdegem (Ghent University)\n* Kenneth Hoste (Ghent University)\n* Pieter De Baets (Ghent University)\n* Jens Timmerman (Ghent University)\n* Fotis Georgatos (Uni.Lu, NTUA)\n\"\"\"\nimport os\nimport string\n\nfrom easybuild.base import fancylogger\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.module_naming_scheme.mns import ModuleNamingScheme\nfrom easybuild.tools.py2vs3 import string_type\nfrom easybuild.tools.toolchain.toolchain import SYSTEM_TOOLCHAIN_NAME, is_system_toolchain\nfrom easybuild.tools.utilities import get_subclasses, import_available_modules\n\n_log = fancylogger.getLogger('module_naming_scheme.utilities', fname=False)\n\n\ndef det_full_ec_version(ec):\n \"\"\"\n Determine exact install version, based on supplied easyconfig.\n e.g. 1.2.3-goalf-1.1.0-no-OFED or 1.2.3 (for system toolchains)\n \"\"\"\n\n ecver = None\n toolchain = ec.get('toolchain', {'name': SYSTEM_TOOLCHAIN_NAME})\n\n # determine main install version based on toolchain\n if is_system_toolchain(toolchain['name']):\n ecver = ec['version']\n else:\n ecver = \"%s-%s-%s\" % (ec['version'], toolchain['name'], toolchain['version'])\n\n # prepend/append version prefix/suffix\n versionprefix = ec.get('versionprefix', '')\n if not isinstance(versionprefix, string_type):\n raise EasyBuildError(\"versionprefix value should be a string, found '%s': %s (full spec: %s)\",\n type(versionprefix).__name__, versionprefix, ec)\n\n versionsuffix = ec.get('versionsuffix', '')\n if not isinstance(versionsuffix, string_type):\n raise EasyBuildError(\"versionsuffix value should be a string, found '%s': %s (full spec: %s)\",\n type(versionsuffix).__name__, versionsuffix, ec)\n\n ecver = ''.join([x for x in [versionprefix, ecver, versionsuffix] if x])\n\n return ecver\n\n\ndef avail_module_naming_schemes():\n \"\"\"\n Returns a list of available module naming schemes.\n \"\"\"\n # all ModuleNamingScheme subclasses available in easybuild.tools.module_naming_scheme namespace are eligible\n import_available_modules('easybuild.tools.module_naming_scheme')\n\n # construct name-to-class dict of available module naming scheme\n avail_mnss = dict([(x.__name__, x) for x in get_subclasses(ModuleNamingScheme)])\n\n return avail_mnss\n\n\ndef is_valid_module_name(mod_name):\n \"\"\"Check whether the specified value is a valid module name.\"\"\"\n # module name must be a string\n if not isinstance(mod_name, string_type):\n _log.warning(\"Wrong type for module name %s (%s), should be a string\" % (mod_name, type(mod_name)))\n return False\n # module name must be relative path\n elif mod_name.startswith(os.path.sep):\n _log.warning(\"Module name (%s) should be a relative file path\" % mod_name)\n return False\n # module name should not be empty\n elif not len(mod_name) > 0:\n _log.warning(\"Module name (%s) should have length > 0.\" % mod_name)\n return False\n else:\n # check whether module name only contains printable characters, since it's used as a filename\n # (except for carriage-control characters \\r, \\x0b and \\xoc)\n invalid_chars = [x for x in mod_name if x not in string.printable or x in '\\r\\x0b\\x0c']\n if len(invalid_chars) > 0:\n _log.warning(\"Module name %s contains invalid characters: %s\" % (mod_name, invalid_chars))\n return False\n _log.debug(\"Module name %s validated\" % mod_name)\n return True\n\n\ndef det_hidden_modname(modname):\n \"\"\"Determine the hidden equivalent of the specified module name.\"\"\"\n moddir = os.path.dirname(modname)\n modfile = os.path.basename(modname)\n return os.path.join(moddir, '.%s' % modfile).lstrip(os.path.sep)\n", "path": "easybuild/tools/module_naming_scheme/utilities.py"}]}
| 2,107 | 295 |
gh_patches_debug_23096
|
rasdani/github-patches
|
git_diff
|
pypa__pip-4355
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecate/Drop Support for Python 3.3?
Currently Python 3.3 support is not a major headache for support (unlike 3.2) because we're largely being limited by Python 2.6 and 2.7 in terms of what features we can support. However, I think it's important to periodically look at the usage and make sure that we're not supporting Python versions that are not really being used as no matter what, each version of Python we support incurs a cost in terms of overhead for support (more build matrix items, minor incompatibilities, etc).
With that in mind, I took a look at what % of the pip initiated traffic that PyPI received in the last month to see what our usage numbers look like.
Only pip 8 initiated traffic:
```
2.7 83.8%
3.5 6.3%
3.4 5.5%
2.6 3.8%
3.3 0.3%
3.6 0.1%
3.2 0.03%
```
All pip initiated traffic:
```
2.7 86.8%
3.4 5.2%
3.5 3.9%
2.6 3.4%
3.3 0.4%
3.2 0.07%
3.6 0.04%
```
Given that 3.3 support is well under 1%, do we want to deprecate support for Python 3.3 with intent to drop support for it in either pip 9 or pip 10? For myself, I say yes-- either as a pip 9 or a pip 10 deprecation.
@pypa/pip-committers ?
</issue>
<code>
[start of pip/basecommand.py]
1 """Base Command class, and related routines"""
2 from __future__ import absolute_import
3
4 import logging
5 import logging.config
6 import os
7 import sys
8 import optparse
9
10 from pip import cmdoptions
11 from pip.index import PackageFinder
12 from pip.locations import running_under_virtualenv
13 from pip.download import PipSession
14 from pip.exceptions import (BadCommand, InstallationError, UninstallationError,
15 CommandError, PreviousBuildDirError)
16
17 from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
18 from pip.req import InstallRequirement, parse_requirements
19 from pip.status_codes import (
20 SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
21 PREVIOUS_BUILD_DIR_ERROR,
22 )
23 from pip.utils import get_prog, normalize_path
24 from pip.utils.logging import IndentingFormatter
25 from pip.utils.outdated import pip_version_check
26
27
28 __all__ = ['Command']
29
30
31 logger = logging.getLogger(__name__)
32
33
34 class Command(object):
35 name = None
36 usage = None
37 hidden = False
38 log_streams = ("ext://sys.stdout", "ext://sys.stderr")
39
40 def __init__(self, isolated=False):
41 parser_kw = {
42 'usage': self.usage,
43 'prog': '%s %s' % (get_prog(), self.name),
44 'formatter': UpdatingDefaultsHelpFormatter(),
45 'add_help_option': False,
46 'name': self.name,
47 'description': self.__doc__,
48 'isolated': isolated,
49 }
50
51 self.parser = ConfigOptionParser(**parser_kw)
52
53 # Commands should add options to this option group
54 optgroup_name = '%s Options' % self.name.capitalize()
55 self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
56
57 # Add the general options
58 gen_opts = cmdoptions.make_option_group(
59 cmdoptions.general_group,
60 self.parser,
61 )
62 self.parser.add_option_group(gen_opts)
63
64 def _build_session(self, options, retries=None, timeout=None):
65 session = PipSession(
66 cache=(
67 normalize_path(os.path.join(options.cache_dir, "http"))
68 if options.cache_dir else None
69 ),
70 retries=retries if retries is not None else options.retries,
71 insecure_hosts=options.trusted_hosts,
72 )
73
74 # Handle custom ca-bundles from the user
75 if options.cert:
76 session.verify = options.cert
77
78 # Handle SSL client certificate
79 if options.client_cert:
80 session.cert = options.client_cert
81
82 # Handle timeouts
83 if options.timeout or timeout:
84 session.timeout = (
85 timeout if timeout is not None else options.timeout
86 )
87
88 # Handle configured proxies
89 if options.proxy:
90 session.proxies = {
91 "http": options.proxy,
92 "https": options.proxy,
93 }
94
95 # Determine if we can prompt the user for authentication or not
96 session.auth.prompting = not options.no_input
97
98 return session
99
100 def parse_args(self, args):
101 # factored out for testability
102 return self.parser.parse_args(args)
103
104 def main(self, args):
105 options, args = self.parse_args(args)
106
107 if options.quiet:
108 if options.quiet == 1:
109 level = "WARNING"
110 if options.quiet == 2:
111 level = "ERROR"
112 else:
113 level = "CRITICAL"
114 elif options.verbose:
115 level = "DEBUG"
116 else:
117 level = "INFO"
118
119 # The root logger should match the "console" level *unless* we
120 # specified "--log" to send debug logs to a file.
121 root_level = level
122 if options.log:
123 root_level = "DEBUG"
124
125 logging.config.dictConfig({
126 "version": 1,
127 "disable_existing_loggers": False,
128 "filters": {
129 "exclude_warnings": {
130 "()": "pip.utils.logging.MaxLevelFilter",
131 "level": logging.WARNING,
132 },
133 },
134 "formatters": {
135 "indent": {
136 "()": IndentingFormatter,
137 "format": "%(message)s",
138 },
139 },
140 "handlers": {
141 "console": {
142 "level": level,
143 "class": "pip.utils.logging.ColorizedStreamHandler",
144 "stream": self.log_streams[0],
145 "filters": ["exclude_warnings"],
146 "formatter": "indent",
147 },
148 "console_errors": {
149 "level": "WARNING",
150 "class": "pip.utils.logging.ColorizedStreamHandler",
151 "stream": self.log_streams[1],
152 "formatter": "indent",
153 },
154 "user_log": {
155 "level": "DEBUG",
156 "class": "pip.utils.logging.BetterRotatingFileHandler",
157 "filename": options.log or "/dev/null",
158 "delay": True,
159 "formatter": "indent",
160 },
161 },
162 "root": {
163 "level": root_level,
164 "handlers": list(filter(None, [
165 "console",
166 "console_errors",
167 "user_log" if options.log else None,
168 ])),
169 },
170 # Disable any logging besides WARNING unless we have DEBUG level
171 # logging enabled. These use both pip._vendor and the bare names
172 # for the case where someone unbundles our libraries.
173 "loggers": dict(
174 (
175 name,
176 {
177 "level": (
178 "WARNING"
179 if level in ["INFO", "ERROR"]
180 else "DEBUG"
181 ),
182 },
183 )
184 for name in ["pip._vendor", "distlib", "requests", "urllib3"]
185 ),
186 })
187
188 # TODO: try to get these passing down from the command?
189 # without resorting to os.environ to hold these.
190
191 if options.no_input:
192 os.environ['PIP_NO_INPUT'] = '1'
193
194 if options.exists_action:
195 os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)
196
197 if options.require_venv:
198 # If a venv is required check if it can really be found
199 if not running_under_virtualenv():
200 logger.critical(
201 'Could not find an activated virtualenv (required).'
202 )
203 sys.exit(VIRTUALENV_NOT_FOUND)
204
205 try:
206 status = self.run(options, args)
207 # FIXME: all commands should return an exit status
208 # and when it is done, isinstance is not needed anymore
209 if isinstance(status, int):
210 return status
211 except PreviousBuildDirError as exc:
212 logger.critical(str(exc))
213 logger.debug('Exception information:', exc_info=True)
214
215 return PREVIOUS_BUILD_DIR_ERROR
216 except (InstallationError, UninstallationError, BadCommand) as exc:
217 logger.critical(str(exc))
218 logger.debug('Exception information:', exc_info=True)
219
220 return ERROR
221 except CommandError as exc:
222 logger.critical('ERROR: %s', exc)
223 logger.debug('Exception information:', exc_info=True)
224
225 return ERROR
226 except KeyboardInterrupt:
227 logger.critical('Operation cancelled by user')
228 logger.debug('Exception information:', exc_info=True)
229
230 return ERROR
231 except:
232 logger.critical('Exception:', exc_info=True)
233
234 return UNKNOWN_ERROR
235 finally:
236 # Check if we're using the latest version of pip available
237 if (not options.disable_pip_version_check and not
238 getattr(options, "no_index", False)):
239 with self._build_session(
240 options,
241 retries=0,
242 timeout=min(5, options.timeout)) as session:
243 pip_version_check(session)
244
245 return SUCCESS
246
247
248 class RequirementCommand(Command):
249
250 @staticmethod
251 def populate_requirement_set(requirement_set, args, options, finder,
252 session, name, wheel_cache):
253 """
254 Marshal cmd line args into a requirement set.
255 """
256 for filename in options.constraints:
257 for req in parse_requirements(
258 filename,
259 constraint=True, finder=finder, options=options,
260 session=session, wheel_cache=wheel_cache):
261 requirement_set.add_requirement(req)
262
263 for req in args:
264 requirement_set.add_requirement(
265 InstallRequirement.from_line(
266 req, None, isolated=options.isolated_mode,
267 wheel_cache=wheel_cache
268 )
269 )
270
271 for req in options.editables:
272 requirement_set.add_requirement(
273 InstallRequirement.from_editable(
274 req,
275 isolated=options.isolated_mode,
276 wheel_cache=wheel_cache
277 )
278 )
279
280 for filename in options.requirements:
281 for req in parse_requirements(
282 filename,
283 finder=finder, options=options, session=session,
284 wheel_cache=wheel_cache):
285 requirement_set.add_requirement(req)
286 # If --require-hashes was a line in a requirements file, tell
287 # RequirementSet about it:
288 requirement_set.require_hashes = options.require_hashes
289
290 if not (args or options.editables or options.requirements):
291 opts = {'name': name}
292 if options.find_links:
293 raise CommandError(
294 'You must give at least one requirement to %(name)s '
295 '(maybe you meant "pip %(name)s %(links)s"?)' %
296 dict(opts, links=' '.join(options.find_links)))
297 else:
298 raise CommandError(
299 'You must give at least one requirement to %(name)s '
300 '(see "pip help %(name)s")' % opts)
301
302 def _build_package_finder(self, options, session,
303 platform=None, python_versions=None,
304 abi=None, implementation=None):
305 """
306 Create a package finder appropriate to this requirement command.
307 """
308 index_urls = [options.index_url] + options.extra_index_urls
309 if options.no_index:
310 logger.debug('Ignoring indexes: %s', ','.join(index_urls))
311 index_urls = []
312
313 return PackageFinder(
314 find_links=options.find_links,
315 format_control=options.format_control,
316 index_urls=index_urls,
317 trusted_hosts=options.trusted_hosts,
318 allow_all_prereleases=options.pre,
319 process_dependency_links=options.process_dependency_links,
320 session=session,
321 platform=platform,
322 versions=python_versions,
323 abi=abi,
324 implementation=implementation,
325 )
326
[end of pip/basecommand.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/basecommand.py b/pip/basecommand.py
--- a/pip/basecommand.py
+++ b/pip/basecommand.py
@@ -6,6 +6,7 @@
import os
import sys
import optparse
+import warnings
from pip import cmdoptions
from pip.index import PackageFinder
@@ -20,7 +21,7 @@
SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,
PREVIOUS_BUILD_DIR_ERROR,
)
-from pip.utils import get_prog, normalize_path
+from pip.utils import deprecation, get_prog, normalize_path
from pip.utils.logging import IndentingFormatter
from pip.utils.outdated import pip_version_check
@@ -185,6 +186,13 @@
),
})
+ if sys.version_info[:2] == (3, 3):
+ warnings.warn(
+ "Python 3.3 supported has been deprecated and support for it "
+ "will be dropped in the future. Please upgrade your Python.",
+ deprecation.RemovedInPip11Warning,
+ )
+
# TODO: try to get these passing down from the command?
# without resorting to os.environ to hold these.
|
{"golden_diff": "diff --git a/pip/basecommand.py b/pip/basecommand.py\n--- a/pip/basecommand.py\n+++ b/pip/basecommand.py\n@@ -6,6 +6,7 @@\n import os\n import sys\n import optparse\n+import warnings\n \n from pip import cmdoptions\n from pip.index import PackageFinder\n@@ -20,7 +21,7 @@\n SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,\n PREVIOUS_BUILD_DIR_ERROR,\n )\n-from pip.utils import get_prog, normalize_path\n+from pip.utils import deprecation, get_prog, normalize_path\n from pip.utils.logging import IndentingFormatter\n from pip.utils.outdated import pip_version_check\n \n@@ -185,6 +186,13 @@\n ),\n })\n \n+ if sys.version_info[:2] == (3, 3):\n+ warnings.warn(\n+ \"Python 3.3 supported has been deprecated and support for it \"\n+ \"will be dropped in the future. Please upgrade your Python.\",\n+ deprecation.RemovedInPip11Warning,\n+ )\n+\n # TODO: try to get these passing down from the command?\n # without resorting to os.environ to hold these.\n", "issue": "Deprecate/Drop Support for Python 3.3?\nCurrently Python 3.3 support is not a major headache for support (unlike 3.2) because we're largely being limited by Python 2.6 and 2.7 in terms of what features we can support. However, I think it's important to periodically look at the usage and make sure that we're not supporting Python versions that are not really being used as no matter what, each version of Python we support incurs a cost in terms of overhead for support (more build matrix items, minor incompatibilities, etc).\n\nWith that in mind, I took a look at what % of the pip initiated traffic that PyPI received in the last month to see what our usage numbers look like.\n\nOnly pip 8 initiated traffic:\n\n```\n2.7 83.8%\n3.5 6.3%\n3.4 5.5%\n2.6 3.8%\n3.3 0.3%\n3.6 0.1%\n3.2 0.03%\n```\n\nAll pip initiated traffic:\n\n```\n2.7 86.8%\n3.4 5.2%\n3.5 3.9%\n2.6 3.4%\n3.3 0.4%\n3.2 0.07%\n3.6 0.04%\n```\n\nGiven that 3.3 support is well under 1%, do we want to deprecate support for Python 3.3 with intent to drop support for it in either pip 9 or pip 10? For myself, I say yes-- either as a pip 9 or a pip 10 deprecation.\n\n@pypa/pip-committers ?\n\n", "before_files": [{"content": "\"\"\"Base Command class, and related routines\"\"\"\nfrom __future__ import absolute_import\n\nimport logging\nimport logging.config\nimport os\nimport sys\nimport optparse\n\nfrom pip import cmdoptions\nfrom pip.index import PackageFinder\nfrom pip.locations import running_under_virtualenv\nfrom pip.download import PipSession\nfrom pip.exceptions import (BadCommand, InstallationError, UninstallationError,\n CommandError, PreviousBuildDirError)\n\nfrom pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter\nfrom pip.req import InstallRequirement, parse_requirements\nfrom pip.status_codes import (\n SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND,\n PREVIOUS_BUILD_DIR_ERROR,\n)\nfrom pip.utils import get_prog, normalize_path\nfrom pip.utils.logging import IndentingFormatter\nfrom pip.utils.outdated import pip_version_check\n\n\n__all__ = ['Command']\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Command(object):\n name = None\n usage = None\n hidden = False\n log_streams = (\"ext://sys.stdout\", \"ext://sys.stderr\")\n\n def __init__(self, isolated=False):\n parser_kw = {\n 'usage': self.usage,\n 'prog': '%s %s' % (get_prog(), self.name),\n 'formatter': UpdatingDefaultsHelpFormatter(),\n 'add_help_option': False,\n 'name': self.name,\n 'description': self.__doc__,\n 'isolated': isolated,\n }\n\n self.parser = ConfigOptionParser(**parser_kw)\n\n # Commands should add options to this option group\n optgroup_name = '%s Options' % self.name.capitalize()\n self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)\n\n # Add the general options\n gen_opts = cmdoptions.make_option_group(\n cmdoptions.general_group,\n self.parser,\n )\n self.parser.add_option_group(gen_opts)\n\n def _build_session(self, options, retries=None, timeout=None):\n session = PipSession(\n cache=(\n normalize_path(os.path.join(options.cache_dir, \"http\"))\n if options.cache_dir else None\n ),\n retries=retries if retries is not None else options.retries,\n insecure_hosts=options.trusted_hosts,\n )\n\n # Handle custom ca-bundles from the user\n if options.cert:\n session.verify = options.cert\n\n # Handle SSL client certificate\n if options.client_cert:\n session.cert = options.client_cert\n\n # Handle timeouts\n if options.timeout or timeout:\n session.timeout = (\n timeout if timeout is not None else options.timeout\n )\n\n # Handle configured proxies\n if options.proxy:\n session.proxies = {\n \"http\": options.proxy,\n \"https\": options.proxy,\n }\n\n # Determine if we can prompt the user for authentication or not\n session.auth.prompting = not options.no_input\n\n return session\n\n def parse_args(self, args):\n # factored out for testability\n return self.parser.parse_args(args)\n\n def main(self, args):\n options, args = self.parse_args(args)\n\n if options.quiet:\n if options.quiet == 1:\n level = \"WARNING\"\n if options.quiet == 2:\n level = \"ERROR\"\n else:\n level = \"CRITICAL\"\n elif options.verbose:\n level = \"DEBUG\"\n else:\n level = \"INFO\"\n\n # The root logger should match the \"console\" level *unless* we\n # specified \"--log\" to send debug logs to a file.\n root_level = level\n if options.log:\n root_level = \"DEBUG\"\n\n logging.config.dictConfig({\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n \"exclude_warnings\": {\n \"()\": \"pip.utils.logging.MaxLevelFilter\",\n \"level\": logging.WARNING,\n },\n },\n \"formatters\": {\n \"indent\": {\n \"()\": IndentingFormatter,\n \"format\": \"%(message)s\",\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": level,\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[0],\n \"filters\": [\"exclude_warnings\"],\n \"formatter\": \"indent\",\n },\n \"console_errors\": {\n \"level\": \"WARNING\",\n \"class\": \"pip.utils.logging.ColorizedStreamHandler\",\n \"stream\": self.log_streams[1],\n \"formatter\": \"indent\",\n },\n \"user_log\": {\n \"level\": \"DEBUG\",\n \"class\": \"pip.utils.logging.BetterRotatingFileHandler\",\n \"filename\": options.log or \"/dev/null\",\n \"delay\": True,\n \"formatter\": \"indent\",\n },\n },\n \"root\": {\n \"level\": root_level,\n \"handlers\": list(filter(None, [\n \"console\",\n \"console_errors\",\n \"user_log\" if options.log else None,\n ])),\n },\n # Disable any logging besides WARNING unless we have DEBUG level\n # logging enabled. These use both pip._vendor and the bare names\n # for the case where someone unbundles our libraries.\n \"loggers\": dict(\n (\n name,\n {\n \"level\": (\n \"WARNING\"\n if level in [\"INFO\", \"ERROR\"]\n else \"DEBUG\"\n ),\n },\n )\n for name in [\"pip._vendor\", \"distlib\", \"requests\", \"urllib3\"]\n ),\n })\n\n # TODO: try to get these passing down from the command?\n # without resorting to os.environ to hold these.\n\n if options.no_input:\n os.environ['PIP_NO_INPUT'] = '1'\n\n if options.exists_action:\n os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action)\n\n if options.require_venv:\n # If a venv is required check if it can really be found\n if not running_under_virtualenv():\n logger.critical(\n 'Could not find an activated virtualenv (required).'\n )\n sys.exit(VIRTUALENV_NOT_FOUND)\n\n try:\n status = self.run(options, args)\n # FIXME: all commands should return an exit status\n # and when it is done, isinstance is not needed anymore\n if isinstance(status, int):\n return status\n except PreviousBuildDirError as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return PREVIOUS_BUILD_DIR_ERROR\n except (InstallationError, UninstallationError, BadCommand) as exc:\n logger.critical(str(exc))\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except CommandError as exc:\n logger.critical('ERROR: %s', exc)\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except KeyboardInterrupt:\n logger.critical('Operation cancelled by user')\n logger.debug('Exception information:', exc_info=True)\n\n return ERROR\n except:\n logger.critical('Exception:', exc_info=True)\n\n return UNKNOWN_ERROR\n finally:\n # Check if we're using the latest version of pip available\n if (not options.disable_pip_version_check and not\n getattr(options, \"no_index\", False)):\n with self._build_session(\n options,\n retries=0,\n timeout=min(5, options.timeout)) as session:\n pip_version_check(session)\n\n return SUCCESS\n\n\nclass RequirementCommand(Command):\n\n @staticmethod\n def populate_requirement_set(requirement_set, args, options, finder,\n session, name, wheel_cache):\n \"\"\"\n Marshal cmd line args into a requirement set.\n \"\"\"\n for filename in options.constraints:\n for req in parse_requirements(\n filename,\n constraint=True, finder=finder, options=options,\n session=session, wheel_cache=wheel_cache):\n requirement_set.add_requirement(req)\n\n for req in args:\n requirement_set.add_requirement(\n InstallRequirement.from_line(\n req, None, isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n for req in options.editables:\n requirement_set.add_requirement(\n InstallRequirement.from_editable(\n req,\n isolated=options.isolated_mode,\n wheel_cache=wheel_cache\n )\n )\n\n for filename in options.requirements:\n for req in parse_requirements(\n filename,\n finder=finder, options=options, session=session,\n wheel_cache=wheel_cache):\n requirement_set.add_requirement(req)\n # If --require-hashes was a line in a requirements file, tell\n # RequirementSet about it:\n requirement_set.require_hashes = options.require_hashes\n\n if not (args or options.editables or options.requirements):\n opts = {'name': name}\n if options.find_links:\n raise CommandError(\n 'You must give at least one requirement to %(name)s '\n '(maybe you meant \"pip %(name)s %(links)s\"?)' %\n dict(opts, links=' '.join(options.find_links)))\n else:\n raise CommandError(\n 'You must give at least one requirement to %(name)s '\n '(see \"pip help %(name)s\")' % opts)\n\n def _build_package_finder(self, options, session,\n platform=None, python_versions=None,\n abi=None, implementation=None):\n \"\"\"\n Create a package finder appropriate to this requirement command.\n \"\"\"\n index_urls = [options.index_url] + options.extra_index_urls\n if options.no_index:\n logger.debug('Ignoring indexes: %s', ','.join(index_urls))\n index_urls = []\n\n return PackageFinder(\n find_links=options.find_links,\n format_control=options.format_control,\n index_urls=index_urls,\n trusted_hosts=options.trusted_hosts,\n allow_all_prereleases=options.pre,\n process_dependency_links=options.process_dependency_links,\n session=session,\n platform=platform,\n versions=python_versions,\n abi=abi,\n implementation=implementation,\n )\n", "path": "pip/basecommand.py"}]}
| 3,923 | 265 |
gh_patches_debug_18476
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-11433
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow site title to be customised
## Overview
Allow the site title to be customised; it’s currently hardcoded as ‘Kolibri’.
#### Description and outcomes
The site title is used in only a few places: the `<title>` of the base page and the ‘unsupported browser’ page, and the name in the PWA manifest.
Almost all of the time, the title is overridden by the plugin being used, via vuejs, so users will typically see something like ‘Explore’ or ‘Library’ instead of ‘Kolibri’.
The place where the default ‘Kolibri’ title is slightly problematic at the moment is in the PWA plugin: the name of the PWA is set to ‘Kolibri’, and that’s shown much more prominently in the browser’s list of PWA apps, or on the desktop app chooser when trying to run it.
For Endless Key in particular, that’s a bit problematic because users will likely try to find the PWA from their desktop by searching for ‘Endless Key’ rather than ‘Kolibri’.
So it would be good to either be able to:
- Separate the site title from the name of the platform (which will always be Kolibri), and allow the site title to be customised.
- Or, specifically set the site title in the configuration for the PWA plugin.
The second option is much more self-contained, but doesn’t seem semantically correct to me. The PWA manifest should be reflecting the main site’s configuration.
#### Resources
- https://developer.mozilla.org/en-US/docs/Web/Manifest/name
- https://developer.mozilla.org/en-US/docs/Web/Manifest/short_name
#### Accessibility Requirements
Having an installed PWA use the name the users will be most familiar with it seems like an accessibility issue, although I have not been approaching it from that angle and don’t know which specific accessibility spec applies here.
</issue>
<code>
[start of kolibri/core/templatetags/core_tags.py]
1 """
2 Kolibri template tags
3 =====================
4 """
5 from __future__ import absolute_import
6 from __future__ import print_function
7 from __future__ import unicode_literals
8
9 from django import template
10 from django.templatetags.static import static
11 from django.utils.html import format_html
12
13 from kolibri.core.hooks import FrontEndBaseASyncHook
14 from kolibri.core.hooks import FrontEndBaseHeadHook
15 from kolibri.core.hooks import FrontEndBaseSyncHook
16 from kolibri.core.theme_hook import ThemeHook
17
18 register = template.Library()
19
20
21 @register.simple_tag()
22 def frontend_base_assets():
23 """
24 This is a script tag for all ``FrontEndAssetHook`` hooks that implement a
25 render_to_html() method - this is used in ``/base.html`` template to
26 populate any Javascript and CSS that should be loaded at page load.
27
28 :return: HTML of script tags to insert into base.html
29 """
30 return FrontEndBaseSyncHook.html()
31
32
33 @register.simple_tag()
34 def frontend_base_async_assets():
35 """
36 This is a script tag for all ``FrontEndAssetHook`` hooks that implement a
37 render_to_html() method - this is used in ``/base.html`` template to
38 populate any Javascript and CSS that should be loaded at page load.
39
40 :return: HTML of script tags to insert into base.html
41 """
42 return FrontEndBaseASyncHook.html()
43
44
45 @register.simple_tag()
46 def frontend_base_head_markup():
47 """
48 This is a script tag for all ``FrontEndBaseHeadHook`` hooks that implement
49 a render_to_html() method - this is used in the ``/base.html`` template to
50 inject arbitrary markup into the ``<head>`` element.
51
52 :return: HTML to insert into head of base.html
53 """
54 return FrontEndBaseHeadHook.html()
55
56
57 @register.simple_tag()
58 def theme_favicon():
59 """
60 Render a favicon link to put in the <head> tag of base.html, if a favicon is
61 provided by the theme. If not, a default will be returned.
62 """
63 favicon_urls = [
64 logo["src"]
65 for logo in ThemeHook.get_theme().get("logos", [])
66 if logo.get("content_type", "") == "image/vnd.microsoft.icon"
67 ]
68
69 # Choose the first available .ico file. It's unlikely there's more than
70 # one specified in the theme.
71 favicon_url = favicon_urls[0] if favicon_urls else static("assets/logo.ico")
72
73 return format_html('<link rel="shortcut icon" href="{}">', favicon_url)
74
[end of kolibri/core/templatetags/core_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/core/templatetags/core_tags.py b/kolibri/core/templatetags/core_tags.py
--- a/kolibri/core/templatetags/core_tags.py
+++ b/kolibri/core/templatetags/core_tags.py
@@ -14,6 +14,7 @@
from kolibri.core.hooks import FrontEndBaseHeadHook
from kolibri.core.hooks import FrontEndBaseSyncHook
from kolibri.core.theme_hook import ThemeHook
+from kolibri.utils.translation import ugettext as _
register = template.Library()
@@ -71,3 +72,13 @@
favicon_url = favicon_urls[0] if favicon_urls else static("assets/logo.ico")
return format_html('<link rel="shortcut icon" href="{}">', favicon_url)
+
+
[email protected]_tag()
+def site_title():
+ """
+ Return the text of the site title, if provided by the theme. If not, the
+ default will be returned. The site title may be translated, to allow for
+ transliteration into other alphabets where needed.
+ """
+ return ThemeHook.get_theme().get("siteTitle", _("Kolibri"))
|
{"golden_diff": "diff --git a/kolibri/core/templatetags/core_tags.py b/kolibri/core/templatetags/core_tags.py\n--- a/kolibri/core/templatetags/core_tags.py\n+++ b/kolibri/core/templatetags/core_tags.py\n@@ -14,6 +14,7 @@\n from kolibri.core.hooks import FrontEndBaseHeadHook\n from kolibri.core.hooks import FrontEndBaseSyncHook\n from kolibri.core.theme_hook import ThemeHook\n+from kolibri.utils.translation import ugettext as _\n \n register = template.Library()\n \n@@ -71,3 +72,13 @@\n favicon_url = favicon_urls[0] if favicon_urls else static(\"assets/logo.ico\")\n \n return format_html('<link rel=\"shortcut icon\" href=\"{}\">', favicon_url)\n+\n+\[email protected]_tag()\n+def site_title():\n+ \"\"\"\n+ Return the text of the site title, if provided by the theme. If not, the\n+ default will be returned. The site title may be translated, to allow for\n+ transliteration into other alphabets where needed.\n+ \"\"\"\n+ return ThemeHook.get_theme().get(\"siteTitle\", _(\"Kolibri\"))\n", "issue": "Allow site title to be customised\n## Overview\r\n\r\nAllow the site title to be customised; it\u2019s currently hardcoded as \u2018Kolibri\u2019.\r\n\r\n#### Description and outcomes\r\n\r\nThe site title is used in only a few places: the `<title>` of the base page and the \u2018unsupported browser\u2019 page, and the name in the PWA manifest.\r\n\r\nAlmost all of the time, the title is overridden by the plugin being used, via vuejs, so users will typically see something like \u2018Explore\u2019 or \u2018Library\u2019 instead of \u2018Kolibri\u2019.\r\n\r\nThe place where the default \u2018Kolibri\u2019 title is slightly problematic at the moment is in the PWA plugin: the name of the PWA is set to \u2018Kolibri\u2019, and that\u2019s shown much more prominently in the browser\u2019s list of PWA apps, or on the desktop app chooser when trying to run it.\r\n\r\nFor Endless Key in particular, that\u2019s a bit problematic because users will likely try to find the PWA from their desktop by searching for \u2018Endless Key\u2019 rather than \u2018Kolibri\u2019.\r\n\r\nSo it would be good to either be able to:\r\n - Separate the site title from the name of the platform (which will always be Kolibri), and allow the site title to be customised.\r\n - Or, specifically set the site title in the configuration for the PWA plugin.\r\n\r\nThe second option is much more self-contained, but doesn\u2019t seem semantically correct to me. The PWA manifest should be reflecting the main site\u2019s configuration.\r\n\r\n#### Resources\r\n\r\n - https://developer.mozilla.org/en-US/docs/Web/Manifest/name\r\n - https://developer.mozilla.org/en-US/docs/Web/Manifest/short_name\r\n\r\n#### Accessibility Requirements\r\n\r\nHaving an installed PWA use the name the users will be most familiar with it seems like an accessibility issue, although I have not been approaching it from that angle and don\u2019t know which specific accessibility spec applies here.\n", "before_files": [{"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom django import template\nfrom django.templatetags.static import static\nfrom django.utils.html import format_html\n\nfrom kolibri.core.hooks import FrontEndBaseASyncHook\nfrom kolibri.core.hooks import FrontEndBaseHeadHook\nfrom kolibri.core.hooks import FrontEndBaseSyncHook\nfrom kolibri.core.theme_hook import ThemeHook\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef frontend_base_assets():\n \"\"\"\n This is a script tag for all ``FrontEndAssetHook`` hooks that implement a\n render_to_html() method - this is used in ``/base.html`` template to\n populate any Javascript and CSS that should be loaded at page load.\n\n :return: HTML of script tags to insert into base.html\n \"\"\"\n return FrontEndBaseSyncHook.html()\n\n\[email protected]_tag()\ndef frontend_base_async_assets():\n \"\"\"\n This is a script tag for all ``FrontEndAssetHook`` hooks that implement a\n render_to_html() method - this is used in ``/base.html`` template to\n populate any Javascript and CSS that should be loaded at page load.\n\n :return: HTML of script tags to insert into base.html\n \"\"\"\n return FrontEndBaseASyncHook.html()\n\n\[email protected]_tag()\ndef frontend_base_head_markup():\n \"\"\"\n This is a script tag for all ``FrontEndBaseHeadHook`` hooks that implement\n a render_to_html() method - this is used in the ``/base.html`` template to\n inject arbitrary markup into the ``<head>`` element.\n\n :return: HTML to insert into head of base.html\n \"\"\"\n return FrontEndBaseHeadHook.html()\n\n\[email protected]_tag()\ndef theme_favicon():\n \"\"\"\n Render a favicon link to put in the <head> tag of base.html, if a favicon is\n provided by the theme. If not, a default will be returned.\n \"\"\"\n favicon_urls = [\n logo[\"src\"]\n for logo in ThemeHook.get_theme().get(\"logos\", [])\n if logo.get(\"content_type\", \"\") == \"image/vnd.microsoft.icon\"\n ]\n\n # Choose the first available .ico file. It's unlikely there's more than\n # one specified in the theme.\n favicon_url = favicon_urls[0] if favicon_urls else static(\"assets/logo.ico\")\n\n return format_html('<link rel=\"shortcut icon\" href=\"{}\">', favicon_url)\n", "path": "kolibri/core/templatetags/core_tags.py"}]}
| 1,632 | 265 |
gh_patches_debug_12329
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-2833
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Configuration file] keys are not used
##### Steps to reproduce the problem:
1. Create a configuration file at .mitmproxy/config.yaml
2. Set this configuration:
mode: "transparent"
showhost: true
3. Start mitmproxy using this command : "mitmproxy --conf config.yaml" (I'm on the .mitmproxy folder of course)
The process is started but the traffic is not working
4. Start mitmproxy using this command : "mitmproxy -T --host"
The process is started and the traffic is working
##### Any other comments? What have you tried so far?
I tried to use the ":" and "=" as separator for the YAML file but only the ":" is recognized apparently
##### System information
I have the last release of mitmproxy (v2.0.2)
Well I've just notice that there is a v3 release, maybe this could help me ?
I prefer to create this ticket if someone else has the same issue :)
</issue>
<code>
[start of mitmproxy/addons/script.py]
1 import os
2 import importlib.util
3 import importlib.machinery
4 import time
5 import sys
6 import types
7 import typing
8
9 from mitmproxy import addonmanager
10 from mitmproxy import exceptions
11 from mitmproxy import flow
12 from mitmproxy import command
13 from mitmproxy import eventsequence
14 from mitmproxy import ctx
15
16
17 def load_script(path: str) -> types.ModuleType:
18 fullname = "__mitmproxy_script__.{}".format(
19 os.path.splitext(os.path.basename(path))[0]
20 )
21 # the fullname is not unique among scripts, so if there already is an existing script with said
22 # fullname, remove it.
23 sys.modules.pop(fullname, None)
24 oldpath = sys.path
25 sys.path.insert(0, os.path.dirname(path))
26 try:
27 loader = importlib.machinery.SourceFileLoader(fullname, path)
28 spec = importlib.util.spec_from_loader(fullname, loader=loader)
29 m = importlib.util.module_from_spec(spec)
30 loader.exec_module(m)
31 if not getattr(m, "name", None):
32 m.name = path # type: ignore
33 return m
34 finally:
35 sys.path[:] = oldpath
36
37
38 class Script:
39 """
40 An addon that manages a single script.
41 """
42 ReloadInterval = 2
43
44 def __init__(self, path):
45 self.name = "scriptmanager:" + path
46 self.path = path
47 self.fullpath = os.path.expanduser(path)
48 self.ns = None
49
50 self.last_load = 0
51 self.last_mtime = 0
52 if not os.path.isfile(self.fullpath):
53 raise exceptions.OptionsError("No such script: %s" % path)
54
55 @property
56 def addons(self):
57 return [self.ns] if self.ns else []
58
59 def tick(self):
60 if time.time() - self.last_load > self.ReloadInterval:
61 try:
62 mtime = os.stat(self.fullpath).st_mtime
63 except FileNotFoundError:
64 scripts = list(ctx.options.scripts)
65 scripts.remove(self.path)
66 ctx.options.update(scripts=scripts)
67 return
68
69 if mtime > self.last_mtime:
70 ctx.log.info("Loading script: %s" % self.path)
71 if self.ns:
72 ctx.master.addons.remove(self.ns)
73 self.ns = None
74 with addonmanager.safecall():
75 ns = load_script(self.fullpath)
76 ctx.master.addons.register(ns)
77 self.ns = ns
78 if self.ns:
79 # We're already running, so we have to explicitly register and
80 # configure the addon
81 ctx.master.addons.invoke_addon(self.ns, "running")
82 ctx.master.addons.invoke_addon(
83 self.ns,
84 "configure",
85 ctx.options.keys()
86 )
87 self.last_load = time.time()
88 self.last_mtime = mtime
89
90
91 class ScriptLoader:
92 """
93 An addon that manages loading scripts from options.
94 """
95 def __init__(self):
96 self.is_running = False
97 self.addons = []
98
99 def running(self):
100 self.is_running = True
101
102 @command.command("script.run")
103 def script_run(self, flows: typing.Sequence[flow.Flow], path: str) -> None:
104 """
105 Run a script on the specified flows. The script is loaded with
106 default options, and all lifecycle events for each flow are
107 simulated.
108 """
109 try:
110 s = Script(path)
111 l = addonmanager.Loader(ctx.master)
112 ctx.master.addons.invoke_addon(s, "load", l)
113 ctx.master.addons.invoke_addon(s, "configure", ctx.options.keys())
114 # Script is loaded on the first tick
115 ctx.master.addons.invoke_addon(s, "tick")
116 for f in flows:
117 for evt, arg in eventsequence.iterate(f):
118 ctx.master.addons.invoke_addon(s, evt, arg)
119 except exceptions.OptionsError as e:
120 raise exceptions.CommandError("Error running script: %s" % e) from e
121
122 def configure(self, updated):
123 if "scripts" in updated:
124 for s in ctx.options.scripts:
125 if ctx.options.scripts.count(s) > 1:
126 raise exceptions.OptionsError("Duplicate script: %s" % s)
127
128 for a in self.addons[:]:
129 if a.path not in ctx.options.scripts:
130 ctx.log.info("Un-loading script: %s" % a.name)
131 ctx.master.addons.remove(a)
132 self.addons.remove(a)
133
134 # The machinations below are to ensure that:
135 # - Scripts remain in the same order
136 # - Scripts are not initialized un-necessarily. If only a
137 # script's order in the script list has changed, it is just
138 # moved.
139
140 current = {}
141 for a in self.addons:
142 current[a.path] = a
143
144 ordered = []
145 newscripts = []
146 for s in ctx.options.scripts:
147 if s in current:
148 ordered.append(current[s])
149 else:
150 sc = Script(s)
151 ordered.append(sc)
152 newscripts.append(sc)
153
154 self.addons = ordered
155
156 for s in newscripts:
157 ctx.master.addons.register(s)
158 if self.is_running:
159 # If we're already running, we configure and tell the addon
160 # we're up and running.
161 ctx.master.addons.invoke_addon(s, "running")
162
[end of mitmproxy/addons/script.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py
--- a/mitmproxy/addons/script.py
+++ b/mitmproxy/addons/script.py
@@ -44,13 +44,15 @@
def __init__(self, path):
self.name = "scriptmanager:" + path
self.path = path
- self.fullpath = os.path.expanduser(path)
+ self.fullpath = os.path.expanduser(
+ path.strip("'\" ")
+ )
self.ns = None
self.last_load = 0
self.last_mtime = 0
if not os.path.isfile(self.fullpath):
- raise exceptions.OptionsError("No such script: %s" % path)
+ raise exceptions.OptionsError('No such script: "%s"' % self.fullpath)
@property
def addons(self):
|
{"golden_diff": "diff --git a/mitmproxy/addons/script.py b/mitmproxy/addons/script.py\n--- a/mitmproxy/addons/script.py\n+++ b/mitmproxy/addons/script.py\n@@ -44,13 +44,15 @@\n def __init__(self, path):\n self.name = \"scriptmanager:\" + path\n self.path = path\n- self.fullpath = os.path.expanduser(path)\n+ self.fullpath = os.path.expanduser(\n+ path.strip(\"'\\\" \")\n+ )\n self.ns = None\n \n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n- raise exceptions.OptionsError(\"No such script: %s\" % path)\n+ raise exceptions.OptionsError('No such script: \"%s\"' % self.fullpath)\n \n @property\n def addons(self):\n", "issue": "[Configuration file] keys are not used\n##### Steps to reproduce the problem:\r\n\r\n1. Create a configuration file at .mitmproxy/config.yaml\r\n2. Set this configuration:\r\nmode: \"transparent\"\r\nshowhost: true \r\n3. Start mitmproxy using this command : \"mitmproxy --conf config.yaml\" (I'm on the .mitmproxy folder of course)\r\nThe process is started but the traffic is not working\r\n4. Start mitmproxy using this command : \"mitmproxy -T --host\"\r\nThe process is started and the traffic is working\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nI tried to use the \":\" and \"=\" as separator for the YAML file but only the \":\" is recognized apparently\r\n\r\n\r\n##### System information\r\nI have the last release of mitmproxy (v2.0.2)\r\n\r\nWell I've just notice that there is a v3 release, maybe this could help me ?\r\nI prefer to create this ticket if someone else has the same issue :)\n", "before_files": [{"content": "import os\nimport importlib.util\nimport importlib.machinery\nimport time\nimport sys\nimport types\nimport typing\n\nfrom mitmproxy import addonmanager\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import ctx\n\n\ndef load_script(path: str) -> types.ModuleType:\n fullname = \"__mitmproxy_script__.{}\".format(\n os.path.splitext(os.path.basename(path))[0]\n )\n # the fullname is not unique among scripts, so if there already is an existing script with said\n # fullname, remove it.\n sys.modules.pop(fullname, None)\n oldpath = sys.path\n sys.path.insert(0, os.path.dirname(path))\n try:\n loader = importlib.machinery.SourceFileLoader(fullname, path)\n spec = importlib.util.spec_from_loader(fullname, loader=loader)\n m = importlib.util.module_from_spec(spec)\n loader.exec_module(m)\n if not getattr(m, \"name\", None):\n m.name = path # type: ignore\n return m\n finally:\n sys.path[:] = oldpath\n\n\nclass Script:\n \"\"\"\n An addon that manages a single script.\n \"\"\"\n ReloadInterval = 2\n\n def __init__(self, path):\n self.name = \"scriptmanager:\" + path\n self.path = path\n self.fullpath = os.path.expanduser(path)\n self.ns = None\n\n self.last_load = 0\n self.last_mtime = 0\n if not os.path.isfile(self.fullpath):\n raise exceptions.OptionsError(\"No such script: %s\" % path)\n\n @property\n def addons(self):\n return [self.ns] if self.ns else []\n\n def tick(self):\n if time.time() - self.last_load > self.ReloadInterval:\n try:\n mtime = os.stat(self.fullpath).st_mtime\n except FileNotFoundError:\n scripts = list(ctx.options.scripts)\n scripts.remove(self.path)\n ctx.options.update(scripts=scripts)\n return\n\n if mtime > self.last_mtime:\n ctx.log.info(\"Loading script: %s\" % self.path)\n if self.ns:\n ctx.master.addons.remove(self.ns)\n self.ns = None\n with addonmanager.safecall():\n ns = load_script(self.fullpath)\n ctx.master.addons.register(ns)\n self.ns = ns\n if self.ns:\n # We're already running, so we have to explicitly register and\n # configure the addon\n ctx.master.addons.invoke_addon(self.ns, \"running\")\n ctx.master.addons.invoke_addon(\n self.ns,\n \"configure\",\n ctx.options.keys()\n )\n self.last_load = time.time()\n self.last_mtime = mtime\n\n\nclass ScriptLoader:\n \"\"\"\n An addon that manages loading scripts from options.\n \"\"\"\n def __init__(self):\n self.is_running = False\n self.addons = []\n\n def running(self):\n self.is_running = True\n\n @command.command(\"script.run\")\n def script_run(self, flows: typing.Sequence[flow.Flow], path: str) -> None:\n \"\"\"\n Run a script on the specified flows. The script is loaded with\n default options, and all lifecycle events for each flow are\n simulated.\n \"\"\"\n try:\n s = Script(path)\n l = addonmanager.Loader(ctx.master)\n ctx.master.addons.invoke_addon(s, \"load\", l)\n ctx.master.addons.invoke_addon(s, \"configure\", ctx.options.keys())\n # Script is loaded on the first tick\n ctx.master.addons.invoke_addon(s, \"tick\")\n for f in flows:\n for evt, arg in eventsequence.iterate(f):\n ctx.master.addons.invoke_addon(s, evt, arg)\n except exceptions.OptionsError as e:\n raise exceptions.CommandError(\"Error running script: %s\" % e) from e\n\n def configure(self, updated):\n if \"scripts\" in updated:\n for s in ctx.options.scripts:\n if ctx.options.scripts.count(s) > 1:\n raise exceptions.OptionsError(\"Duplicate script: %s\" % s)\n\n for a in self.addons[:]:\n if a.path not in ctx.options.scripts:\n ctx.log.info(\"Un-loading script: %s\" % a.name)\n ctx.master.addons.remove(a)\n self.addons.remove(a)\n\n # The machinations below are to ensure that:\n # - Scripts remain in the same order\n # - Scripts are not initialized un-necessarily. If only a\n # script's order in the script list has changed, it is just\n # moved.\n\n current = {}\n for a in self.addons:\n current[a.path] = a\n\n ordered = []\n newscripts = []\n for s in ctx.options.scripts:\n if s in current:\n ordered.append(current[s])\n else:\n sc = Script(s)\n ordered.append(sc)\n newscripts.append(sc)\n\n self.addons = ordered\n\n for s in newscripts:\n ctx.master.addons.register(s)\n if self.is_running:\n # If we're already running, we configure and tell the addon\n # we're up and running.\n ctx.master.addons.invoke_addon(s, \"running\")\n", "path": "mitmproxy/addons/script.py"}]}
| 2,291 | 191 |
gh_patches_debug_756
|
rasdani/github-patches
|
git_diff
|
vllm-project__vllm-1212
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v0.2.0] Release Tracker
## Major changes
* Up to 60% performance improvement by optimizing de-tokenization and sampler
* Initial support for AWQ (performance not optimized)
* Support for RoPE scaling and LongChat
* Support for Mistral-7B
## PRs to be merged before the release
- [x] Vectorized sampler: #1048, #820
- [x] LongChat: #555
- [x] `TORCH_CUDA_ARCH_LIST` build option: #1074
- [x] Support for Mistral-7B: #1196
- [x] #1198
- ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)
</issue>
<code>
[start of vllm/__init__.py]
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.1.7"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
[end of vllm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.1.7"
+__version__ = "0.2.0"
__all__ = [
"LLM",
|
{"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.1.7\"\n+__version__ = \"0.2.0\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.2.0] Release Tracker\n## Major changes\r\n\r\n* Up to 60% performance improvement by optimizing de-tokenization and sampler\r\n* Initial support for AWQ (performance not optimized)\r\n* Support for RoPE scaling and LongChat\r\n* Support for Mistral-7B\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] Vectorized sampler: #1048, #820 \r\n- [x] LongChat: #555 \r\n- [x] `TORCH_CUDA_ARCH_LIST` build option: #1074 \r\n- [x] Support for Mistral-7B: #1196 \r\n- [x] #1198 \r\n- ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)\n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.1.7\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]}
| 931 | 108 |
gh_patches_debug_30325
|
rasdani/github-patches
|
git_diff
|
mito-ds__mito-213
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow the installer to go pro after the user has already installed!
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
Please include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. Windows 11]
- Browser [e.g. Chrome, Firefox]
- Mito Version [e.g. 0.3.331] (you can find this with `pip list`)
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of mitoinstaller/mitoinstaller/user_install.py]
1 import json
2 import os
3 from typing import Optional
4 import uuid
5 from copy import deepcopy
6
7 from mitoinstaller import __version__
8
9 # Where all global .mito files are stored
10 MITO_FOLDER = os.path.join(os.path.expanduser("~"), '.mito')
11
12 # The path of the user.json file, which notably is the same
13 # path as the USER_JSON_PATH in mitosheet
14 USER_JSON_PATH = os.path.join(MITO_FOLDER, 'user.json')
15
16 def get_random_id() -> str:
17 """
18 Creates a new random ID for the user, which for any given user,
19 should only happen once.
20 """
21 return str(uuid.uuid1())
22
23 def is_running_test() -> bool:
24 """
25 A helper function that quickly returns if the current code is running inside
26 of a test, which is useful for making sure we don't generate tons of logs
27 """
28 running_pytests = "PYTEST_CURRENT_TEST" in os.environ
29 running_ci = 'CI' in os.environ and os.environ['CI'] is not None
30
31 return running_pytests or running_ci
32
33
34 # NOTE: the installer only creates the static id for the user, and
35 # otherwise does nothing with the user_json file. This makes sure
36 # we keep the dependencies as simple as possible with this file.
37 # We also add the telemetry, which we turn off if the user has a
38 # pro subscription.
39 # NOTE: if you delete a field from this, you need to update the
40 # user_json_is_installer_default to handle this properly
41 USER_JSON_DEFAULT = {
42 'static_user_id': get_random_id() if not is_running_test() else 'github_action',
43 'mitosheet_telemetry': True,
44 'mitosheet_pro': False,
45 }
46
47 def try_create_user_json_file(is_pro: bool=False) -> None:
48 # Create the mito folder if it does not exist
49 if not os.path.exists(MITO_FOLDER):
50 os.mkdir(MITO_FOLDER)
51
52 # We only create a user.json file if it does not exist
53 if not os.path.exists(USER_JSON_PATH):
54 with open(USER_JSON_PATH, 'w+') as f:
55 # And write the default object
56 default_user_json = deepcopy(USER_JSON_DEFAULT)
57 default_user_json['mitosheet_telemetry'] = not is_pro
58 default_user_json['mitosheet_pro'] = is_pro
59
60 f.write(json.dumps(default_user_json))
61 else:
62 # Otherwise, we make sure to update the mitosheet_telemetry variable
63 with open(USER_JSON_PATH, 'r') as f:
64 updated_user_json = json.loads(f.read())
65 updated_user_json['mitosheet_telemetry'] = not is_pro
66 updated_user_json['mitosheet_pro'] = is_pro
67 with open(USER_JSON_PATH, 'w') as f:
68 f.write(json.dumps(updated_user_json))
69
70
71 def get_static_user_id() -> Optional[str]:
72 try:
73 with open(USER_JSON_PATH) as f:
74 return json.load(f)['static_user_id']
75 except:
76 return None
77
78 def get_mitosheet_telemetry() -> bool:
79 try:
80 with open(USER_JSON_PATH) as f:
81 return json.load(f)['mitosheet_telemetry']
82 except:
83 return True
84
85 def user_json_is_installer_default() -> bool:
86 """
87 Returns True if the user.json file is the installer default,
88 and otherwise returns False.
89
90 This allows us to not call identify if we have already done
91 so in the mitosheet package (which would overwrite things
92 we don't want to).
93 """
94 try:
95 with open(USER_JSON_PATH) as f:
96 user_json_object = json.load(f)
97 return len(user_json_object) <= len(USER_JSON_DEFAULT)
98 except:
99 return False
100
[end of mitoinstaller/mitoinstaller/user_install.py]
[start of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
1 import os
2 import sys
3
4 from mitoinstaller import __version__
5 from mitoinstaller.commands import upgrade_mito_installer
6 from mitoinstaller.installer_steps.installer_step import InstallerStep
7 from mitoinstaller.log_utils import identify, log
8 from mitoinstaller.user_install import (USER_JSON_PATH,
9 try_create_user_json_file)
10
11
12 def initial_install_step_create_user():
13
14 if not os.path.exists(USER_JSON_PATH):
15 try_create_user_json_file(is_pro=('--pro' in sys.argv))
16
17 # Only try and log if we're not pro
18 if not ('--pro' in sys.argv):
19 identify()
20 log('install_started', {
21 'mitoinstaller_version': __version__
22 })
23
24
25 INITIAL_INSTALLER_STEPS = [
26 InstallerStep(
27 'Create mito user',
28 initial_install_step_create_user
29 ),
30 InstallerStep(
31 'Upgrade mitoinstaller',
32 upgrade_mito_installer,
33 optional=True
34 ),
35 ]
36
[end of mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py
@@ -5,7 +5,7 @@
from mitoinstaller.commands import upgrade_mito_installer
from mitoinstaller.installer_steps.installer_step import InstallerStep
from mitoinstaller.log_utils import identify, log
-from mitoinstaller.user_install import (USER_JSON_PATH,
+from mitoinstaller.user_install import (USER_JSON_PATH, go_pro,
try_create_user_json_file)
@@ -14,13 +14,15 @@
if not os.path.exists(USER_JSON_PATH):
try_create_user_json_file(is_pro=('--pro' in sys.argv))
- # Only try and log if we're not pro
if not ('--pro' in sys.argv):
+ # Only try and log if we're not pro
identify()
log('install_started', {
'mitoinstaller_version': __version__
})
-
+ else:
+ # If the user is going pro, make sure they are set to pro
+ go_pro()
INITIAL_INSTALLER_STEPS = [
InstallerStep(
diff --git a/mitoinstaller/mitoinstaller/user_install.py b/mitoinstaller/mitoinstaller/user_install.py
--- a/mitoinstaller/mitoinstaller/user_install.py
+++ b/mitoinstaller/mitoinstaller/user_install.py
@@ -97,3 +97,12 @@
return len(user_json_object) <= len(USER_JSON_DEFAULT)
except:
return False
+
+def go_pro() -> None:
+ with open(USER_JSON_PATH, 'r') as f:
+ updated_user_json = json.loads(f.read())
+ updated_user_json['mitosheet_telemetry'] = False
+ updated_user_json['mitosheet_pro'] = True
+
+ with open(USER_JSON_PATH, 'w') as f:
+ f.write(json.dumps(updated_user_json))
\ No newline at end of file
|
{"golden_diff": "diff --git a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n--- a/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n+++ b/mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py\n@@ -5,7 +5,7 @@\n from mitoinstaller.commands import upgrade_mito_installer\n from mitoinstaller.installer_steps.installer_step import InstallerStep\n from mitoinstaller.log_utils import identify, log\n-from mitoinstaller.user_install import (USER_JSON_PATH,\n+from mitoinstaller.user_install import (USER_JSON_PATH, go_pro,\n try_create_user_json_file)\n \n \n@@ -14,13 +14,15 @@\n if not os.path.exists(USER_JSON_PATH):\n try_create_user_json_file(is_pro=('--pro' in sys.argv))\n \n- # Only try and log if we're not pro\n if not ('--pro' in sys.argv):\n+ # Only try and log if we're not pro\n identify()\n log('install_started', {\n 'mitoinstaller_version': __version__\n })\n-\n+ else:\n+ # If the user is going pro, make sure they are set to pro\n+ go_pro()\n \n INITIAL_INSTALLER_STEPS = [\n InstallerStep(\ndiff --git a/mitoinstaller/mitoinstaller/user_install.py b/mitoinstaller/mitoinstaller/user_install.py\n--- a/mitoinstaller/mitoinstaller/user_install.py\n+++ b/mitoinstaller/mitoinstaller/user_install.py\n@@ -97,3 +97,12 @@\n return len(user_json_object) <= len(USER_JSON_DEFAULT)\n except:\n return False\n+\n+def go_pro() -> None:\n+ with open(USER_JSON_PATH, 'r') as f:\n+ updated_user_json = json.loads(f.read())\n+ updated_user_json['mitosheet_telemetry'] = False\n+ updated_user_json['mitosheet_pro'] = True\n+ \n+ with open(USER_JSON_PATH, 'w') as f:\n+ f.write(json.dumps(updated_user_json))\n\\ No newline at end of file\n", "issue": "Allow the installer to go pro after the user has already installed!\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to '...'\r\n2. Click on '....'\r\n3. Scroll down to '....'\r\n4. See error\r\n\r\nPlease include the relevant dataset if the bug you encountered is dataset specific. Make sure to anonymize the data properly.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [e.g. Windows 11]\r\n - Browser [e.g. Chrome, Firefox]\r\n - Mito Version [e.g. 0.3.331] (you can find this with `pip list`)\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "import json\nimport os\nfrom typing import Optional\nimport uuid\nfrom copy import deepcopy\n\nfrom mitoinstaller import __version__\n\n# Where all global .mito files are stored\nMITO_FOLDER = os.path.join(os.path.expanduser(\"~\"), '.mito')\n\n# The path of the user.json file, which notably is the same\n# path as the USER_JSON_PATH in mitosheet\nUSER_JSON_PATH = os.path.join(MITO_FOLDER, 'user.json')\n\ndef get_random_id() -> str:\n \"\"\"\n Creates a new random ID for the user, which for any given user,\n should only happen once.\n \"\"\"\n return str(uuid.uuid1())\n\ndef is_running_test() -> bool:\n \"\"\"\n A helper function that quickly returns if the current code is running inside \n of a test, which is useful for making sure we don't generate tons of logs \n \"\"\"\n running_pytests = \"PYTEST_CURRENT_TEST\" in os.environ\n running_ci = 'CI' in os.environ and os.environ['CI'] is not None\n\n return running_pytests or running_ci\n\n\n# NOTE: the installer only creates the static id for the user, and\n# otherwise does nothing with the user_json file. This makes sure\n# we keep the dependencies as simple as possible with this file. \n# We also add the telemetry, which we turn off if the user has a \n# pro subscription.\n# NOTE: if you delete a field from this, you need to update the \n# user_json_is_installer_default to handle this properly\nUSER_JSON_DEFAULT = {\n 'static_user_id': get_random_id() if not is_running_test() else 'github_action',\n 'mitosheet_telemetry': True,\n 'mitosheet_pro': False,\n}\n\ndef try_create_user_json_file(is_pro: bool=False) -> None:\n # Create the mito folder if it does not exist\n if not os.path.exists(MITO_FOLDER):\n os.mkdir(MITO_FOLDER)\n \n # We only create a user.json file if it does not exist\n if not os.path.exists(USER_JSON_PATH):\n with open(USER_JSON_PATH, 'w+') as f:\n # And write the default object\n default_user_json = deepcopy(USER_JSON_DEFAULT)\n default_user_json['mitosheet_telemetry'] = not is_pro\n default_user_json['mitosheet_pro'] = is_pro\n\n f.write(json.dumps(default_user_json))\n else:\n # Otherwise, we make sure to update the mitosheet_telemetry variable \n with open(USER_JSON_PATH, 'r') as f:\n updated_user_json = json.loads(f.read())\n updated_user_json['mitosheet_telemetry'] = not is_pro\n updated_user_json['mitosheet_pro'] = is_pro \n with open(USER_JSON_PATH, 'w') as f:\n f.write(json.dumps(updated_user_json))\n\n\ndef get_static_user_id() -> Optional[str]:\n try:\n with open(USER_JSON_PATH) as f:\n return json.load(f)['static_user_id']\n except: \n return None\n\ndef get_mitosheet_telemetry() -> bool:\n try:\n with open(USER_JSON_PATH) as f:\n return json.load(f)['mitosheet_telemetry']\n except: \n return True\n\ndef user_json_is_installer_default() -> bool:\n \"\"\"\n Returns True if the user.json file is the installer default, \n and otherwise returns False. \n\n This allows us to not call identify if we have already done\n so in the mitosheet package (which would overwrite things\n we don't want to).\n \"\"\"\n try:\n with open(USER_JSON_PATH) as f:\n user_json_object = json.load(f)\n return len(user_json_object) <= len(USER_JSON_DEFAULT)\n except:\n return False\n", "path": "mitoinstaller/mitoinstaller/user_install.py"}, {"content": "import os\nimport sys\n\nfrom mitoinstaller import __version__\nfrom mitoinstaller.commands import upgrade_mito_installer\nfrom mitoinstaller.installer_steps.installer_step import InstallerStep\nfrom mitoinstaller.log_utils import identify, log\nfrom mitoinstaller.user_install import (USER_JSON_PATH,\n try_create_user_json_file)\n\n\ndef initial_install_step_create_user():\n\n if not os.path.exists(USER_JSON_PATH):\n try_create_user_json_file(is_pro=('--pro' in sys.argv))\n\n # Only try and log if we're not pro\n if not ('--pro' in sys.argv):\n identify()\n log('install_started', {\n 'mitoinstaller_version': __version__\n })\n\n\nINITIAL_INSTALLER_STEPS = [\n InstallerStep(\n 'Create mito user',\n initial_install_step_create_user\n ),\n InstallerStep(\n 'Upgrade mitoinstaller',\n upgrade_mito_installer,\n optional=True\n ),\n]\n", "path": "mitoinstaller/mitoinstaller/installer_steps/initial_installer_steps.py"}]}
| 2,077 | 496 |
gh_patches_debug_4574
|
rasdani/github-patches
|
git_diff
|
qtile__qtile-2716
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
stack trace from Clipboard widget
```
2021-08-13 06:48:23,421 ERROR libqtile hook.py:fire():L381 Error in hook selection_change
Traceback (most recent call last):
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/hook.py", line 379, in fire
i(*args, **kwargs)
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py", line 82, in hook_change
if self.is_blacklisted(selection["owner"]):
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py", line 69, in is_blacklisted
owner = xcbq.Window(self.qtile.core.conn, owner_id)
AttributeError: module 'libqtile.backend.x11.xcbq' has no attribute 'Window'
```
</issue>
<code>
[start of libqtile/widget/clipboard.py]
1 # Copyright (c) 2014 Sean Vig
2 # Copyright (c) 2014 roger
3 # Copyright (c) 2014 Adi Sieker
4 # Copyright (c) 2014 Tycho Andersen
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 from libqtile import bar, hook
25 from libqtile.backend.x11 import xcbq
26 from libqtile.widget import base
27
28
29 class Clipboard(base._TextBox):
30 """Display current clipboard contents"""
31 orientations = base.ORIENTATION_HORIZONTAL
32 defaults = [
33 ("selection", "CLIPBOARD",
34 "the selection to display(CLIPBOARD or PRIMARY)"),
35 ("max_width", 10, "maximum number of characters to display "
36 "(None for all, useful when width is bar.STRETCH)"),
37 ("timeout", 10,
38 "Default timeout (seconds) for display text, None to keep forever"),
39 ("blacklist", ["keepassx"],
40 "list with blacklisted wm_class, sadly not every "
41 "clipboard window sets them, keepassx does."
42 "Clipboard contents from blacklisted wm_classes "
43 "will be replaced by the value of ``blacklist_text``."),
44 ("blacklist_text", "***********",
45 "text to display when the wm_class is blacklisted")
46 ]
47
48 def __init__(self, width=bar.CALCULATED, **config):
49 base._TextBox.__init__(self, "", width, **config)
50 self.add_defaults(Clipboard.defaults)
51 self.timeout_id = None
52
53 def _configure(self, qtile, bar):
54 base._TextBox._configure(self, qtile, bar)
55 self.text = ""
56 self.setup_hooks()
57
58 def clear(self, *args):
59 self.text = ""
60 self.bar.draw()
61
62 def is_blacklisted(self, owner_id):
63 if not self.blacklist:
64 return False
65
66 if owner_id in self.qtile.windows_map:
67 owner = self.qtile.windows_map[owner_id].window
68 else:
69 owner = xcbq.Window(self.qtile.core.conn, owner_id)
70
71 owner_class = owner.get_wm_class()
72 if owner_class:
73 for wm_class in self.blacklist:
74 if wm_class in owner_class:
75 return True
76
77 def setup_hooks(self):
78 def hook_change(name, selection):
79 if name != self.selection:
80 return
81
82 if self.is_blacklisted(selection["owner"]):
83 text = self.blacklist_text
84 else:
85 text = selection["selection"].replace("\n", " ")
86
87 text = text.strip()
88 if self.max_width is not None and len(text) > self.max_width:
89 text = text[:self.max_width] + "..."
90
91 self.text = text
92
93 if self.timeout_id:
94 self.timeout_id.cancel()
95 self.timeout_id = None
96
97 if self.timeout:
98 self.timeout_id = self.timeout_add(self.timeout, self.clear)
99 self.bar.draw()
100
101 def hook_notify(name, selection):
102 if name != self.selection:
103 return
104
105 if self.timeout_id:
106 self.timeout_id.cancel()
107 self.timeout_id = None
108
109 # only clear if don't change don't apply in .5 seconds
110 if self.timeout:
111 self.timeout_id = self.timeout_add(self.timeout, self.clear)
112 self.bar.draw()
113
114 hook.subscribe.selection_notify(hook_notify)
115 hook.subscribe.selection_change(hook_change)
116
[end of libqtile/widget/clipboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libqtile/widget/clipboard.py b/libqtile/widget/clipboard.py
--- a/libqtile/widget/clipboard.py
+++ b/libqtile/widget/clipboard.py
@@ -66,7 +66,7 @@
if owner_id in self.qtile.windows_map:
owner = self.qtile.windows_map[owner_id].window
else:
- owner = xcbq.Window(self.qtile.core.conn, owner_id)
+ owner = xcbq.window.XWindow(self.qtile.core.conn, owner_id)
owner_class = owner.get_wm_class()
if owner_class:
|
{"golden_diff": "diff --git a/libqtile/widget/clipboard.py b/libqtile/widget/clipboard.py\n--- a/libqtile/widget/clipboard.py\n+++ b/libqtile/widget/clipboard.py\n@@ -66,7 +66,7 @@\n if owner_id in self.qtile.windows_map:\n owner = self.qtile.windows_map[owner_id].window\n else:\n- owner = xcbq.Window(self.qtile.core.conn, owner_id)\n+ owner = xcbq.window.XWindow(self.qtile.core.conn, owner_id)\n \n owner_class = owner.get_wm_class()\n if owner_class:\n", "issue": "stack trace from Clipboard widget\n```\r\n2021-08-13 06:48:23,421 ERROR libqtile hook.py:fire():L381 Error in hook selection_change\r\nTraceback (most recent call last):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/hook.py\", line 379, in fire\r\n i(*args, **kwargs)\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py\", line 82, in hook_change\r\n if self.is_blacklisted(selection[\"owner\"]):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py\", line 69, in is_blacklisted\r\n owner = xcbq.Window(self.qtile.core.conn, owner_id)\r\nAttributeError: module 'libqtile.backend.x11.xcbq' has no attribute 'Window'\r\n```\n", "before_files": [{"content": "# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Adi Sieker\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.backend.x11 import xcbq\nfrom libqtile.widget import base\n\n\nclass Clipboard(base._TextBox):\n \"\"\"Display current clipboard contents\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"selection\", \"CLIPBOARD\",\n \"the selection to display(CLIPBOARD or PRIMARY)\"),\n (\"max_width\", 10, \"maximum number of characters to display \"\n \"(None for all, useful when width is bar.STRETCH)\"),\n (\"timeout\", 10,\n \"Default timeout (seconds) for display text, None to keep forever\"),\n (\"blacklist\", [\"keepassx\"],\n \"list with blacklisted wm_class, sadly not every \"\n \"clipboard window sets them, keepassx does.\"\n \"Clipboard contents from blacklisted wm_classes \"\n \"will be replaced by the value of ``blacklist_text``.\"),\n (\"blacklist_text\", \"***********\",\n \"text to display when the wm_class is blacklisted\")\n ]\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n self.add_defaults(Clipboard.defaults)\n self.timeout_id = None\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = \"\"\n self.setup_hooks()\n\n def clear(self, *args):\n self.text = \"\"\n self.bar.draw()\n\n def is_blacklisted(self, owner_id):\n if not self.blacklist:\n return False\n\n if owner_id in self.qtile.windows_map:\n owner = self.qtile.windows_map[owner_id].window\n else:\n owner = xcbq.Window(self.qtile.core.conn, owner_id)\n\n owner_class = owner.get_wm_class()\n if owner_class:\n for wm_class in self.blacklist:\n if wm_class in owner_class:\n return True\n\n def setup_hooks(self):\n def hook_change(name, selection):\n if name != self.selection:\n return\n\n if self.is_blacklisted(selection[\"owner\"]):\n text = self.blacklist_text\n else:\n text = selection[\"selection\"].replace(\"\\n\", \" \")\n\n text = text.strip()\n if self.max_width is not None and len(text) > self.max_width:\n text = text[:self.max_width] + \"...\"\n\n self.text = text\n\n if self.timeout_id:\n self.timeout_id.cancel()\n self.timeout_id = None\n\n if self.timeout:\n self.timeout_id = self.timeout_add(self.timeout, self.clear)\n self.bar.draw()\n\n def hook_notify(name, selection):\n if name != self.selection:\n return\n\n if self.timeout_id:\n self.timeout_id.cancel()\n self.timeout_id = None\n\n # only clear if don't change don't apply in .5 seconds\n if self.timeout:\n self.timeout_id = self.timeout_add(self.timeout, self.clear)\n self.bar.draw()\n\n hook.subscribe.selection_notify(hook_notify)\n hook.subscribe.selection_change(hook_change)\n", "path": "libqtile/widget/clipboard.py"}]}
| 1,946 | 130 |
gh_patches_debug_3982
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-48536
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix logic bug where Group.Status is archived_until_escalating but the GroupInbox.Status is escalating

## Objective:
We are not transitioning the GroupStatus to escalating but transitioned GroupInboxStatus.
</issue>
<code>
[start of src/sentry/issues/escalating.py]
1 """This module has the logic for querying Snuba for the hourly event count for a list of groups.
2 This is later used for generating group forecasts for determining when a group may be escalating.
3 """
4
5 import logging
6 from collections import defaultdict
7 from datetime import datetime, timedelta
8 from typing import Dict, List, Optional, Sequence, Tuple, TypedDict
9
10 from snuba_sdk import (
11 Column,
12 Condition,
13 Direction,
14 Entity,
15 Function,
16 Limit,
17 Offset,
18 Op,
19 OrderBy,
20 Query,
21 Request,
22 )
23
24 from sentry import analytics
25 from sentry.issues.escalating_group_forecast import EscalatingGroupForecast
26 from sentry.issues.escalating_issues_alg import GroupCount
27 from sentry.issues.grouptype import GroupCategory
28 from sentry.models import Group
29 from sentry.models.group import GroupStatus
30 from sentry.models.groupinbox import GroupInboxReason, add_group_to_inbox
31 from sentry.snuba.dataset import Dataset, EntityKey
32 from sentry.types.group import GroupSubStatus
33 from sentry.utils.cache import cache
34 from sentry.utils.snuba import raw_snql_query
35
36 logger = logging.getLogger(__name__)
37
38 __all__ = ["query_groups_past_counts", "parse_groups_past_counts"]
39
40 REFERRER = "sentry.issues.escalating"
41 ELEMENTS_PER_SNUBA_PAGE = 10000 # This is the maximum value for Snuba
42 # The amount of data needed to generate a group forecast
43 BUCKETS_PER_GROUP = 7 * 24
44 ONE_WEEK_DURATION = 7
45 IS_ESCALATING_REFERRER = "sentry.issues.escalating.is_escalating"
46 GROUP_HOURLY_COUNT_TTL = 60
47
48 GroupsCountResponse = TypedDict(
49 "GroupsCountResponse",
50 {"group_id": int, "hourBucket": str, "count()": int, "project_id": int},
51 )
52
53 ParsedGroupsCount = Dict[int, GroupCount]
54
55
56 def query_groups_past_counts(groups: Sequence[Group]) -> List[GroupsCountResponse]:
57 """Query Snuba for the counts for every group bucketed into hours.
58
59 It optimizes the query by guaranteeing that we look at group_ids that are from the same project id.
60 This is important for Snuba as the data is stored in blocks related to the project id.
61
62 We maximize the number of projects and groups to reduce the total number of Snuba queries.
63 Each project may not have enough groups in order to reach the max number of returned
64 elements (ELEMENTS_PER_SNUBA_PAGE), thus, projects with few groups should be grouped together until
65 we get at least a certain number of groups.
66
67 NOTE: Groups with less than the maximum number of buckets (think of groups with just 1 event or less
68 than 7 days old) will skew the optimization since we may only get one page and less elements than the max
69 ELEMENTS_PER_SNUBA_PAGE.
70 """
71 all_results = [] # type: ignore[var-annotated]
72 if not groups:
73 return all_results
74
75 start_date, end_date = _start_and_end_dates()
76
77 # Error groups use the events dataset while profile and perf groups use the issue platform dataset
78 error_groups: List[Group] = []
79 other_groups: List[Group] = []
80 for g in groups:
81 if g.issue_category == GroupCategory.ERROR:
82 error_groups.append(g)
83 else:
84 other_groups.append(g)
85
86 all_results += _process_groups(error_groups, start_date, end_date, GroupCategory.ERROR)
87 all_results += _process_groups(other_groups, start_date, end_date)
88
89 return all_results
90
91
92 def _process_groups(
93 groups: Sequence[Group],
94 start_date: datetime,
95 end_date: datetime,
96 category: Optional[GroupCategory] = None,
97 ) -> List[GroupsCountResponse]:
98 """Given a list of groups, query Snuba for their hourly bucket count.
99 The category defines which Snuba dataset and entity we query."""
100 all_results = [] # type: ignore[var-annotated]
101 if not groups:
102 return all_results
103
104 group_ids_by_project = _extract_project_and_group_ids(groups)
105 proj_ids, group_ids = [], []
106 processed_projects = 0
107 total_projects_count = len(group_ids_by_project)
108 organization_id = groups[0].project.organization.id
109
110 # This iteration guarantees that all groups for a project will be queried in the same call
111 # and only one page where the groups could be mixed with groups from another project
112 # Iterating over the sorted keys guarantees results for tests
113 for proj_id in sorted(group_ids_by_project.keys()):
114 _group_ids = group_ids_by_project[proj_id]
115 # Add them to the list of projects and groups to query
116 proj_ids.append(proj_id)
117 group_ids += _group_ids
118 processed_projects += 1
119 potential_num_elements = len(_group_ids) * BUCKETS_PER_GROUP
120 # This is trying to maximize the number of groups on the first page
121 if (
122 processed_projects < total_projects_count
123 and potential_num_elements < ELEMENTS_PER_SNUBA_PAGE
124 ):
125 continue
126
127 # TODO: Write this as a dispatcher type task and fire off a separate task per proj_ids
128 all_results += _query_with_pagination(
129 organization_id, proj_ids, group_ids, start_date, end_date, category
130 )
131 # We're ready for a new set of projects and ids
132 proj_ids, group_ids = [], []
133
134 return all_results
135
136
137 def _query_with_pagination(
138 organization_id: int,
139 project_ids: Sequence[int],
140 group_ids: Sequence[int],
141 start_date: datetime,
142 end_date: datetime,
143 category: Optional[GroupCategory],
144 ) -> List[GroupsCountResponse]:
145 """Query Snuba for event counts for the given list of project ids and groups ids in
146 a time range."""
147 all_results = []
148 offset = 0
149 while True:
150 query = _generate_query(project_ids, group_ids, offset, start_date, end_date, category)
151 request = Request(
152 dataset=_issue_category_dataset(category),
153 app_id=REFERRER,
154 query=query,
155 tenant_ids={"referrer": REFERRER, "organization_id": organization_id},
156 )
157 results = raw_snql_query(request, referrer=REFERRER)["data"]
158 all_results += results
159 offset += ELEMENTS_PER_SNUBA_PAGE
160 if not results or len(results) < ELEMENTS_PER_SNUBA_PAGE:
161 break
162
163 return all_results
164
165
166 def _generate_query(
167 project_ids: Sequence[int],
168 group_ids: Sequence[int],
169 offset: int,
170 start_date: datetime,
171 end_date: datetime,
172 category: Optional[GroupCategory],
173 ) -> Query:
174 """This simply generates a query based on the passed parameters"""
175 group_id_col = Column("group_id")
176 proj_id_col = Column("project_id")
177 return Query(
178 match=Entity(_issue_category_entity(category)),
179 select=[
180 proj_id_col,
181 group_id_col,
182 Function("toStartOfHour", [Column("timestamp")], "hourBucket"),
183 Function("count", []),
184 ],
185 groupby=[proj_id_col, group_id_col, Column("hourBucket")],
186 where=[
187 Condition(proj_id_col, Op.IN, Function("tuple", project_ids)),
188 Condition(Column("group_id"), Op.IN, Function("tuple", group_ids)),
189 Condition(Column("timestamp"), Op.GTE, start_date),
190 Condition(Column("timestamp"), Op.LT, end_date),
191 ],
192 limit=Limit(ELEMENTS_PER_SNUBA_PAGE),
193 offset=Offset(offset),
194 orderby=[
195 OrderBy(proj_id_col, Direction.ASC),
196 OrderBy(group_id_col, Direction.ASC),
197 OrderBy(Column("hourBucket"), Direction.ASC),
198 ],
199 )
200
201
202 def _start_and_end_dates(hours: int = BUCKETS_PER_GROUP) -> Tuple[datetime, datetime]:
203 """Return the start and end date of N hours time range."""
204 end_datetime = datetime.now()
205 return end_datetime - timedelta(hours=hours), end_datetime
206
207
208 def _extract_project_and_group_ids(groups: Sequence[Group]) -> Dict[int, List[int]]:
209 """Return all project and group IDs from a list of Group"""
210 group_ids_by_project: Dict[int, List[int]] = defaultdict(list)
211 for group in groups:
212 group_ids_by_project[group.project_id].append(group.id)
213
214 return group_ids_by_project
215
216
217 def get_group_hourly_count(group: Group) -> int:
218 """Return the number of events a group has had today in the last hour"""
219 key = f"hourly-group-count:{group.project.id}:{group.id}"
220 hourly_count = cache.get(key)
221
222 if hourly_count is None:
223 now = datetime.now()
224 current_hour = now.replace(minute=0, second=0, microsecond=0)
225 query = Query(
226 match=Entity(_issue_category_entity(group.issue_category)),
227 select=[
228 Function("count", []),
229 ],
230 where=[
231 Condition(Column("project_id"), Op.EQ, group.project.id),
232 Condition(Column("group_id"), Op.EQ, group.id),
233 Condition(Column("timestamp"), Op.GTE, current_hour),
234 Condition(Column("timestamp"), Op.LT, now),
235 ],
236 )
237 request = Request(
238 dataset=_issue_category_dataset(group.issue_category),
239 app_id=IS_ESCALATING_REFERRER,
240 query=query,
241 tenant_ids={
242 "referrer": IS_ESCALATING_REFERRER,
243 "organization_id": group.project.organization.id,
244 },
245 )
246 hourly_count = int(
247 raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)["data"][0]["count()"]
248 )
249 cache.set(key, hourly_count, GROUP_HOURLY_COUNT_TTL)
250 return int(hourly_count)
251
252
253 def is_escalating(group: Group) -> bool:
254 """Return boolean depending on if the group is escalating or not"""
255 group_hourly_count = get_group_hourly_count(group)
256 forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)
257 # Check if current event occurance is greater than forecast for today's date
258 if group_hourly_count > forecast_today:
259 group.substatus = GroupSubStatus.ESCALATING
260 group.status = GroupStatus.UNRESOLVED
261 add_group_to_inbox(group, GroupInboxReason.ESCALATING)
262
263 analytics.record(
264 "issue.escalating",
265 organization_id=group.project.organization.id,
266 project_id=group.project.id,
267 group_id=group.id,
268 )
269 return True
270 return False
271
272
273 def parse_groups_past_counts(response: Sequence[GroupsCountResponse]) -> ParsedGroupsCount:
274 """
275 Return the parsed snuba response for groups past counts to be used in generate_issue_forecast.
276 ParsedGroupCount is of the form {<group_id>: {"intervals": [str], "data": [int]}}.
277
278 `response`: Snuba response for group event counts
279 """
280 group_counts: ParsedGroupsCount = {}
281 group_ids_list = group_counts.keys()
282 for data in response:
283 group_id = data["group_id"]
284 if group_id not in group_ids_list:
285 group_counts[group_id] = {
286 "intervals": [data["hourBucket"]],
287 "data": [data["count()"]],
288 }
289 else:
290 group_counts[group_id]["intervals"].append(data["hourBucket"])
291 group_counts[group_id]["data"].append(data["count()"])
292 return group_counts
293
294
295 def _issue_category_dataset(category: Optional[GroupCategory]) -> Dataset:
296 return Dataset.Events.value if category == GroupCategory.ERROR else Dataset.IssuePlatform.value
297
298
299 def _issue_category_entity(category: Optional[GroupCategory]) -> EntityKey:
300 return (
301 EntityKey.Events.value if category == GroupCategory.ERROR else EntityKey.IssuePlatform.value
302 )
303
[end of src/sentry/issues/escalating.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/sentry/issues/escalating.py b/src/sentry/issues/escalating.py
--- a/src/sentry/issues/escalating.py
+++ b/src/sentry/issues/escalating.py
@@ -258,6 +258,7 @@
if group_hourly_count > forecast_today:
group.substatus = GroupSubStatus.ESCALATING
group.status = GroupStatus.UNRESOLVED
+ group.save()
add_group_to_inbox(group, GroupInboxReason.ESCALATING)
analytics.record(
|
{"golden_diff": "diff --git a/src/sentry/issues/escalating.py b/src/sentry/issues/escalating.py\n--- a/src/sentry/issues/escalating.py\n+++ b/src/sentry/issues/escalating.py\n@@ -258,6 +258,7 @@\n if group_hourly_count > forecast_today:\n group.substatus = GroupSubStatus.ESCALATING\n group.status = GroupStatus.UNRESOLVED\n+ group.save()\n add_group_to_inbox(group, GroupInboxReason.ESCALATING)\n \n analytics.record(\n", "issue": "Fix logic bug where Group.Status is archived_until_escalating but the GroupInbox.Status is escalating\n\n\n\n\n## Objective:\nWe are not transitioning the GroupStatus to escalating but transitioned GroupInboxStatus.\n", "before_files": [{"content": "\"\"\"This module has the logic for querying Snuba for the hourly event count for a list of groups.\nThis is later used for generating group forecasts for determining when a group may be escalating.\n\"\"\"\n\nimport logging\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom typing import Dict, List, Optional, Sequence, Tuple, TypedDict\n\nfrom snuba_sdk import (\n Column,\n Condition,\n Direction,\n Entity,\n Function,\n Limit,\n Offset,\n Op,\n OrderBy,\n Query,\n Request,\n)\n\nfrom sentry import analytics\nfrom sentry.issues.escalating_group_forecast import EscalatingGroupForecast\nfrom sentry.issues.escalating_issues_alg import GroupCount\nfrom sentry.issues.grouptype import GroupCategory\nfrom sentry.models import Group\nfrom sentry.models.group import GroupStatus\nfrom sentry.models.groupinbox import GroupInboxReason, add_group_to_inbox\nfrom sentry.snuba.dataset import Dataset, EntityKey\nfrom sentry.types.group import GroupSubStatus\nfrom sentry.utils.cache import cache\nfrom sentry.utils.snuba import raw_snql_query\n\nlogger = logging.getLogger(__name__)\n\n__all__ = [\"query_groups_past_counts\", \"parse_groups_past_counts\"]\n\nREFERRER = \"sentry.issues.escalating\"\nELEMENTS_PER_SNUBA_PAGE = 10000 # This is the maximum value for Snuba\n# The amount of data needed to generate a group forecast\nBUCKETS_PER_GROUP = 7 * 24\nONE_WEEK_DURATION = 7\nIS_ESCALATING_REFERRER = \"sentry.issues.escalating.is_escalating\"\nGROUP_HOURLY_COUNT_TTL = 60\n\nGroupsCountResponse = TypedDict(\n \"GroupsCountResponse\",\n {\"group_id\": int, \"hourBucket\": str, \"count()\": int, \"project_id\": int},\n)\n\nParsedGroupsCount = Dict[int, GroupCount]\n\n\ndef query_groups_past_counts(groups: Sequence[Group]) -> List[GroupsCountResponse]:\n \"\"\"Query Snuba for the counts for every group bucketed into hours.\n\n It optimizes the query by guaranteeing that we look at group_ids that are from the same project id.\n This is important for Snuba as the data is stored in blocks related to the project id.\n\n We maximize the number of projects and groups to reduce the total number of Snuba queries.\n Each project may not have enough groups in order to reach the max number of returned\n elements (ELEMENTS_PER_SNUBA_PAGE), thus, projects with few groups should be grouped together until\n we get at least a certain number of groups.\n\n NOTE: Groups with less than the maximum number of buckets (think of groups with just 1 event or less\n than 7 days old) will skew the optimization since we may only get one page and less elements than the max\n ELEMENTS_PER_SNUBA_PAGE.\n \"\"\"\n all_results = [] # type: ignore[var-annotated]\n if not groups:\n return all_results\n\n start_date, end_date = _start_and_end_dates()\n\n # Error groups use the events dataset while profile and perf groups use the issue platform dataset\n error_groups: List[Group] = []\n other_groups: List[Group] = []\n for g in groups:\n if g.issue_category == GroupCategory.ERROR:\n error_groups.append(g)\n else:\n other_groups.append(g)\n\n all_results += _process_groups(error_groups, start_date, end_date, GroupCategory.ERROR)\n all_results += _process_groups(other_groups, start_date, end_date)\n\n return all_results\n\n\ndef _process_groups(\n groups: Sequence[Group],\n start_date: datetime,\n end_date: datetime,\n category: Optional[GroupCategory] = None,\n) -> List[GroupsCountResponse]:\n \"\"\"Given a list of groups, query Snuba for their hourly bucket count.\n The category defines which Snuba dataset and entity we query.\"\"\"\n all_results = [] # type: ignore[var-annotated]\n if not groups:\n return all_results\n\n group_ids_by_project = _extract_project_and_group_ids(groups)\n proj_ids, group_ids = [], []\n processed_projects = 0\n total_projects_count = len(group_ids_by_project)\n organization_id = groups[0].project.organization.id\n\n # This iteration guarantees that all groups for a project will be queried in the same call\n # and only one page where the groups could be mixed with groups from another project\n # Iterating over the sorted keys guarantees results for tests\n for proj_id in sorted(group_ids_by_project.keys()):\n _group_ids = group_ids_by_project[proj_id]\n # Add them to the list of projects and groups to query\n proj_ids.append(proj_id)\n group_ids += _group_ids\n processed_projects += 1\n potential_num_elements = len(_group_ids) * BUCKETS_PER_GROUP\n # This is trying to maximize the number of groups on the first page\n if (\n processed_projects < total_projects_count\n and potential_num_elements < ELEMENTS_PER_SNUBA_PAGE\n ):\n continue\n\n # TODO: Write this as a dispatcher type task and fire off a separate task per proj_ids\n all_results += _query_with_pagination(\n organization_id, proj_ids, group_ids, start_date, end_date, category\n )\n # We're ready for a new set of projects and ids\n proj_ids, group_ids = [], []\n\n return all_results\n\n\ndef _query_with_pagination(\n organization_id: int,\n project_ids: Sequence[int],\n group_ids: Sequence[int],\n start_date: datetime,\n end_date: datetime,\n category: Optional[GroupCategory],\n) -> List[GroupsCountResponse]:\n \"\"\"Query Snuba for event counts for the given list of project ids and groups ids in\n a time range.\"\"\"\n all_results = []\n offset = 0\n while True:\n query = _generate_query(project_ids, group_ids, offset, start_date, end_date, category)\n request = Request(\n dataset=_issue_category_dataset(category),\n app_id=REFERRER,\n query=query,\n tenant_ids={\"referrer\": REFERRER, \"organization_id\": organization_id},\n )\n results = raw_snql_query(request, referrer=REFERRER)[\"data\"]\n all_results += results\n offset += ELEMENTS_PER_SNUBA_PAGE\n if not results or len(results) < ELEMENTS_PER_SNUBA_PAGE:\n break\n\n return all_results\n\n\ndef _generate_query(\n project_ids: Sequence[int],\n group_ids: Sequence[int],\n offset: int,\n start_date: datetime,\n end_date: datetime,\n category: Optional[GroupCategory],\n) -> Query:\n \"\"\"This simply generates a query based on the passed parameters\"\"\"\n group_id_col = Column(\"group_id\")\n proj_id_col = Column(\"project_id\")\n return Query(\n match=Entity(_issue_category_entity(category)),\n select=[\n proj_id_col,\n group_id_col,\n Function(\"toStartOfHour\", [Column(\"timestamp\")], \"hourBucket\"),\n Function(\"count\", []),\n ],\n groupby=[proj_id_col, group_id_col, Column(\"hourBucket\")],\n where=[\n Condition(proj_id_col, Op.IN, Function(\"tuple\", project_ids)),\n Condition(Column(\"group_id\"), Op.IN, Function(\"tuple\", group_ids)),\n Condition(Column(\"timestamp\"), Op.GTE, start_date),\n Condition(Column(\"timestamp\"), Op.LT, end_date),\n ],\n limit=Limit(ELEMENTS_PER_SNUBA_PAGE),\n offset=Offset(offset),\n orderby=[\n OrderBy(proj_id_col, Direction.ASC),\n OrderBy(group_id_col, Direction.ASC),\n OrderBy(Column(\"hourBucket\"), Direction.ASC),\n ],\n )\n\n\ndef _start_and_end_dates(hours: int = BUCKETS_PER_GROUP) -> Tuple[datetime, datetime]:\n \"\"\"Return the start and end date of N hours time range.\"\"\"\n end_datetime = datetime.now()\n return end_datetime - timedelta(hours=hours), end_datetime\n\n\ndef _extract_project_and_group_ids(groups: Sequence[Group]) -> Dict[int, List[int]]:\n \"\"\"Return all project and group IDs from a list of Group\"\"\"\n group_ids_by_project: Dict[int, List[int]] = defaultdict(list)\n for group in groups:\n group_ids_by_project[group.project_id].append(group.id)\n\n return group_ids_by_project\n\n\ndef get_group_hourly_count(group: Group) -> int:\n \"\"\"Return the number of events a group has had today in the last hour\"\"\"\n key = f\"hourly-group-count:{group.project.id}:{group.id}\"\n hourly_count = cache.get(key)\n\n if hourly_count is None:\n now = datetime.now()\n current_hour = now.replace(minute=0, second=0, microsecond=0)\n query = Query(\n match=Entity(_issue_category_entity(group.issue_category)),\n select=[\n Function(\"count\", []),\n ],\n where=[\n Condition(Column(\"project_id\"), Op.EQ, group.project.id),\n Condition(Column(\"group_id\"), Op.EQ, group.id),\n Condition(Column(\"timestamp\"), Op.GTE, current_hour),\n Condition(Column(\"timestamp\"), Op.LT, now),\n ],\n )\n request = Request(\n dataset=_issue_category_dataset(group.issue_category),\n app_id=IS_ESCALATING_REFERRER,\n query=query,\n tenant_ids={\n \"referrer\": IS_ESCALATING_REFERRER,\n \"organization_id\": group.project.organization.id,\n },\n )\n hourly_count = int(\n raw_snql_query(request, referrer=IS_ESCALATING_REFERRER)[\"data\"][0][\"count()\"]\n )\n cache.set(key, hourly_count, GROUP_HOURLY_COUNT_TTL)\n return int(hourly_count)\n\n\ndef is_escalating(group: Group) -> bool:\n \"\"\"Return boolean depending on if the group is escalating or not\"\"\"\n group_hourly_count = get_group_hourly_count(group)\n forecast_today = EscalatingGroupForecast.fetch_todays_forecast(group.project.id, group.id)\n # Check if current event occurance is greater than forecast for today's date\n if group_hourly_count > forecast_today:\n group.substatus = GroupSubStatus.ESCALATING\n group.status = GroupStatus.UNRESOLVED\n add_group_to_inbox(group, GroupInboxReason.ESCALATING)\n\n analytics.record(\n \"issue.escalating\",\n organization_id=group.project.organization.id,\n project_id=group.project.id,\n group_id=group.id,\n )\n return True\n return False\n\n\ndef parse_groups_past_counts(response: Sequence[GroupsCountResponse]) -> ParsedGroupsCount:\n \"\"\"\n Return the parsed snuba response for groups past counts to be used in generate_issue_forecast.\n ParsedGroupCount is of the form {<group_id>: {\"intervals\": [str], \"data\": [int]}}.\n\n `response`: Snuba response for group event counts\n \"\"\"\n group_counts: ParsedGroupsCount = {}\n group_ids_list = group_counts.keys()\n for data in response:\n group_id = data[\"group_id\"]\n if group_id not in group_ids_list:\n group_counts[group_id] = {\n \"intervals\": [data[\"hourBucket\"]],\n \"data\": [data[\"count()\"]],\n }\n else:\n group_counts[group_id][\"intervals\"].append(data[\"hourBucket\"])\n group_counts[group_id][\"data\"].append(data[\"count()\"])\n return group_counts\n\n\ndef _issue_category_dataset(category: Optional[GroupCategory]) -> Dataset:\n return Dataset.Events.value if category == GroupCategory.ERROR else Dataset.IssuePlatform.value\n\n\ndef _issue_category_entity(category: Optional[GroupCategory]) -> EntityKey:\n return (\n EntityKey.Events.value if category == GroupCategory.ERROR else EntityKey.IssuePlatform.value\n )\n", "path": "src/sentry/issues/escalating.py"}]}
| 4,034 | 119 |
gh_patches_debug_586
|
rasdani/github-patches
|
git_diff
|
pex-tool__pex-1275
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.34
On the docket:
+ [x] Allow command-line arguments to be read from a file #1271
+ [x] Issue when running a module inside pex file #1018
+ [x] Guard against concurrent re-imports. #1270
+ [x] Ensure Pip logs to stderr. #1268
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.33"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.33"
+__version__ = "2.1.34"
|
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.33\"\n+__version__ = \"2.1.34\"\n", "issue": "Release 2.1.34\nOn the docket:\r\n+ [x] Allow command-line arguments to be read from a file #1271\r\n+ [x] Issue when running a module inside pex file #1018\r\n+ [x] Guard against concurrent re-imports. #1270\r\n+ [x] Ensure Pip logs to stderr. #1268\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.33\"\n", "path": "pex/version.py"}]}
| 669 | 96 |
gh_patches_debug_1723
|
rasdani/github-patches
|
git_diff
|
ansible__ansible-40863
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
homebrew_tap fails to tap caskroom/cask now.
<!---
Verify first that your issue/request is not already reported on GitHub.
THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.
Also test if the latest release, and devel branch are affected too.
ALWAYS add information AFTER (OUTSIDE) these html comments.
Otherwise it may end up being automatically closed by our bot. -->
##### SUMMARY
<!--- Explain the problem briefly -->
Running the task `homebrew_tap: name=caskroom/cask` fails due to the fact that caskroom/cask has migrated to homebrew/cask. See https://github.com/Homebrew/brew/pull/4210
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
<!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature.
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path-->
homebrew_tap
##### ANSIBLE VERSION
<!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below -->
```
```
##### CONFIGURATION
<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).-->
```
(14:49:33) C02W513SHTD8:tmp aso$ ansible-config dump --only-changed
(14:49:35) C02W513SHTD8:tmp aso$
```
##### OS / ENVIRONMENT
<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.-->
From macOS 10.13.4
To macOS 10.13.4
##### STEPS TO REPRODUCE
<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used. -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: install homebrew cask
homebrew_tap: name=caskroom/cask
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
The task should have succeeded and running `brew tap` should have resulted in caskroom/cask being listed.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
The task failed even though it successfully tapped a homebrew cask. Running `brew tap` results in homebrew/cask being listed.
<!--- Paste verbatim command output between quotes below -->
```
Alberts-Mac:bin bambooagent$ brew tap
homebrew/cask
homebrew/core
```
</issue>
<code>
[start of lib/ansible/modules/packaging/os/homebrew_tap.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # (c) 2013, Daniel Jaouen <[email protected]>
5 # (c) 2016, Indrajit Raychaudhuri <[email protected]>
6 #
7 # Based on homebrew (Andrew Dunham <[email protected]>)
8 #
9 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
10
11 from __future__ import absolute_import, division, print_function
12 __metaclass__ = type
13
14
15 ANSIBLE_METADATA = {'metadata_version': '1.1',
16 'status': ['preview'],
17 'supported_by': 'community'}
18
19
20 DOCUMENTATION = '''
21 ---
22 module: homebrew_tap
23 author:
24 - "Indrajit Raychaudhuri (@indrajitr)"
25 - "Daniel Jaouen (@danieljaouen)"
26 short_description: Tap a Homebrew repository.
27 description:
28 - Tap external Homebrew repositories.
29 version_added: "1.6"
30 options:
31 name:
32 description:
33 - The GitHub user/organization repository to tap.
34 required: true
35 aliases: ['tap']
36 url:
37 description:
38 - The optional git URL of the repository to tap. The URL is not
39 assumed to be on GitHub, and the protocol doesn't have to be HTTP.
40 Any location and protocol that git can handle is fine.
41 - I(name) option may not be a list of multiple taps (but a single
42 tap instead) when this option is provided.
43 required: false
44 version_added: "2.2"
45 state:
46 description:
47 - state of the repository.
48 choices: [ 'present', 'absent' ]
49 required: false
50 default: 'present'
51 requirements: [ homebrew ]
52 '''
53
54 EXAMPLES = '''
55 - homebrew_tap:
56 name: homebrew/dupes
57
58 - homebrew_tap:
59 name: homebrew/dupes
60 state: absent
61
62 - homebrew_tap:
63 name: homebrew/dupes,homebrew/science
64 state: present
65
66 - homebrew_tap:
67 name: telemachus/brew
68 url: 'https://bitbucket.org/telemachus/brew'
69 '''
70
71 import re
72
73 from ansible.module_utils.basic import AnsibleModule
74
75
76 def a_valid_tap(tap):
77 '''Returns True if the tap is valid.'''
78 regex = re.compile(r'^([\w-]+)/(homebrew-)?([\w-]+)$')
79 return regex.match(tap)
80
81
82 def already_tapped(module, brew_path, tap):
83 '''Returns True if already tapped.'''
84
85 rc, out, err = module.run_command([
86 brew_path,
87 'tap',
88 ])
89
90 taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
91 tap_name = re.sub('homebrew-', '', tap.lower())
92
93 return tap_name in taps
94
95
96 def add_tap(module, brew_path, tap, url=None):
97 '''Adds a single tap.'''
98 failed, changed, msg = False, False, ''
99
100 if not a_valid_tap(tap):
101 failed = True
102 msg = 'not a valid tap: %s' % tap
103
104 elif not already_tapped(module, brew_path, tap):
105 if module.check_mode:
106 module.exit_json(changed=True)
107
108 rc, out, err = module.run_command([
109 brew_path,
110 'tap',
111 tap,
112 url,
113 ])
114 if already_tapped(module, brew_path, tap):
115 changed = True
116 msg = 'successfully tapped: %s' % tap
117 else:
118 failed = True
119 msg = 'failed to tap: %s' % tap
120
121 else:
122 msg = 'already tapped: %s' % tap
123
124 return (failed, changed, msg)
125
126
127 def add_taps(module, brew_path, taps):
128 '''Adds one or more taps.'''
129 failed, unchanged, added, msg = False, 0, 0, ''
130
131 for tap in taps:
132 (failed, changed, msg) = add_tap(module, brew_path, tap)
133 if failed:
134 break
135 if changed:
136 added += 1
137 else:
138 unchanged += 1
139
140 if failed:
141 msg = 'added: %d, unchanged: %d, error: ' + msg
142 msg = msg % (added, unchanged)
143 elif added:
144 changed = True
145 msg = 'added: %d, unchanged: %d' % (added, unchanged)
146 else:
147 msg = 'added: %d, unchanged: %d' % (added, unchanged)
148
149 return (failed, changed, msg)
150
151
152 def remove_tap(module, brew_path, tap):
153 '''Removes a single tap.'''
154 failed, changed, msg = False, False, ''
155
156 if not a_valid_tap(tap):
157 failed = True
158 msg = 'not a valid tap: %s' % tap
159
160 elif already_tapped(module, brew_path, tap):
161 if module.check_mode:
162 module.exit_json(changed=True)
163
164 rc, out, err = module.run_command([
165 brew_path,
166 'untap',
167 tap,
168 ])
169 if not already_tapped(module, brew_path, tap):
170 changed = True
171 msg = 'successfully untapped: %s' % tap
172 else:
173 failed = True
174 msg = 'failed to untap: %s' % tap
175
176 else:
177 msg = 'already untapped: %s' % tap
178
179 return (failed, changed, msg)
180
181
182 def remove_taps(module, brew_path, taps):
183 '''Removes one or more taps.'''
184 failed, unchanged, removed, msg = False, 0, 0, ''
185
186 for tap in taps:
187 (failed, changed, msg) = remove_tap(module, brew_path, tap)
188 if failed:
189 break
190 if changed:
191 removed += 1
192 else:
193 unchanged += 1
194
195 if failed:
196 msg = 'removed: %d, unchanged: %d, error: ' + msg
197 msg = msg % (removed, unchanged)
198 elif removed:
199 changed = True
200 msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
201 else:
202 msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
203
204 return (failed, changed, msg)
205
206
207 def main():
208 module = AnsibleModule(
209 argument_spec=dict(
210 name=dict(aliases=['tap'], type='list', required=True),
211 url=dict(default=None, required=False),
212 state=dict(default='present', choices=['present', 'absent']),
213 ),
214 supports_check_mode=True,
215 )
216
217 brew_path = module.get_bin_path(
218 'brew',
219 required=True,
220 opt_dirs=['/usr/local/bin']
221 )
222
223 taps = module.params['name']
224 url = module.params['url']
225
226 if module.params['state'] == 'present':
227 if url is None:
228 # No tap URL provided explicitly, continue with bulk addition
229 # of all the taps.
230 failed, changed, msg = add_taps(module, brew_path, taps)
231 else:
232 # When an tap URL is provided explicitly, we allow adding
233 # *single* tap only. Validate and proceed to add single tap.
234 if len(taps) > 1:
235 msg = "List of multiple taps may not be provided with 'url' option."
236 module.fail_json(msg=msg)
237 else:
238 failed, changed, msg = add_tap(module, brew_path, taps[0], url)
239
240 if failed:
241 module.fail_json(msg=msg)
242 else:
243 module.exit_json(changed=changed, msg=msg)
244
245 elif module.params['state'] == 'absent':
246 failed, changed, msg = remove_taps(module, brew_path, taps)
247
248 if failed:
249 module.fail_json(msg=msg)
250 else:
251 module.exit_json(changed=changed, msg=msg)
252
253
254 if __name__ == '__main__':
255 main()
256
[end of lib/ansible/modules/packaging/os/homebrew_tap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/ansible/modules/packaging/os/homebrew_tap.py b/lib/ansible/modules/packaging/os/homebrew_tap.py
--- a/lib/ansible/modules/packaging/os/homebrew_tap.py
+++ b/lib/ansible/modules/packaging/os/homebrew_tap.py
@@ -111,7 +111,7 @@
tap,
url,
])
- if already_tapped(module, brew_path, tap):
+ if rc == 0:
changed = True
msg = 'successfully tapped: %s' % tap
else:
|
{"golden_diff": "diff --git a/lib/ansible/modules/packaging/os/homebrew_tap.py b/lib/ansible/modules/packaging/os/homebrew_tap.py\n--- a/lib/ansible/modules/packaging/os/homebrew_tap.py\n+++ b/lib/ansible/modules/packaging/os/homebrew_tap.py\n@@ -111,7 +111,7 @@\n tap,\n url,\n ])\n- if already_tapped(module, brew_path, tap):\n+ if rc == 0:\n changed = True\n msg = 'successfully tapped: %s' % tap\n else:\n", "issue": "homebrew_tap fails to tap caskroom/cask now.\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nTHIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.\r\nAlso test if the latest release, and devel branch are affected too.\r\nALWAYS add information AFTER (OUTSIDE) these html comments.\r\nOtherwise it may end up being automatically closed by our bot. -->\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\nRunning the task `homebrew_tap: name=caskroom/cask` fails due to the fact that caskroom/cask has migrated to homebrew/cask. See https://github.com/Homebrew/brew/pull/4210\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\n<!--- Insert, BELOW THIS COMMENT, the name of the module, plugin, task or feature.\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path-->\r\nhomebrew_tap\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\n\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).-->\r\n```\r\n(14:49:33) C02W513SHTD8:tmp aso$ ansible-config dump --only-changed\r\n(14:49:35) C02W513SHTD8:tmp aso$\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.-->\r\nFrom macOS 10.13.4\r\nTo macOS 10.13.4\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: install homebrew cask\r\n homebrew_tap: name=caskroom/cask\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nThe task should have succeeded and running `brew tap` should have resulted in caskroom/cask being listed.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nThe task failed even though it successfully tapped a homebrew cask. Running `brew tap` results in homebrew/cask being listed.\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nAlberts-Mac:bin bambooagent$ brew tap\r\nhomebrew/cask\r\nhomebrew/core\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2013, Daniel Jaouen <[email protected]>\n# (c) 2016, Indrajit Raychaudhuri <[email protected]>\n#\n# Based on homebrew (Andrew Dunham <[email protected]>)\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: homebrew_tap\nauthor:\n - \"Indrajit Raychaudhuri (@indrajitr)\"\n - \"Daniel Jaouen (@danieljaouen)\"\nshort_description: Tap a Homebrew repository.\ndescription:\n - Tap external Homebrew repositories.\nversion_added: \"1.6\"\noptions:\n name:\n description:\n - The GitHub user/organization repository to tap.\n required: true\n aliases: ['tap']\n url:\n description:\n - The optional git URL of the repository to tap. The URL is not\n assumed to be on GitHub, and the protocol doesn't have to be HTTP.\n Any location and protocol that git can handle is fine.\n - I(name) option may not be a list of multiple taps (but a single\n tap instead) when this option is provided.\n required: false\n version_added: \"2.2\"\n state:\n description:\n - state of the repository.\n choices: [ 'present', 'absent' ]\n required: false\n default: 'present'\nrequirements: [ homebrew ]\n'''\n\nEXAMPLES = '''\n- homebrew_tap:\n name: homebrew/dupes\n\n- homebrew_tap:\n name: homebrew/dupes\n state: absent\n\n- homebrew_tap:\n name: homebrew/dupes,homebrew/science\n state: present\n\n- homebrew_tap:\n name: telemachus/brew\n url: 'https://bitbucket.org/telemachus/brew'\n'''\n\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef a_valid_tap(tap):\n '''Returns True if the tap is valid.'''\n regex = re.compile(r'^([\\w-]+)/(homebrew-)?([\\w-]+)$')\n return regex.match(tap)\n\n\ndef already_tapped(module, brew_path, tap):\n '''Returns True if already tapped.'''\n\n rc, out, err = module.run_command([\n brew_path,\n 'tap',\n ])\n\n taps = [tap_.strip().lower() for tap_ in out.split('\\n') if tap_]\n tap_name = re.sub('homebrew-', '', tap.lower())\n\n return tap_name in taps\n\n\ndef add_tap(module, brew_path, tap, url=None):\n '''Adds a single tap.'''\n failed, changed, msg = False, False, ''\n\n if not a_valid_tap(tap):\n failed = True\n msg = 'not a valid tap: %s' % tap\n\n elif not already_tapped(module, brew_path, tap):\n if module.check_mode:\n module.exit_json(changed=True)\n\n rc, out, err = module.run_command([\n brew_path,\n 'tap',\n tap,\n url,\n ])\n if already_tapped(module, brew_path, tap):\n changed = True\n msg = 'successfully tapped: %s' % tap\n else:\n failed = True\n msg = 'failed to tap: %s' % tap\n\n else:\n msg = 'already tapped: %s' % tap\n\n return (failed, changed, msg)\n\n\ndef add_taps(module, brew_path, taps):\n '''Adds one or more taps.'''\n failed, unchanged, added, msg = False, 0, 0, ''\n\n for tap in taps:\n (failed, changed, msg) = add_tap(module, brew_path, tap)\n if failed:\n break\n if changed:\n added += 1\n else:\n unchanged += 1\n\n if failed:\n msg = 'added: %d, unchanged: %d, error: ' + msg\n msg = msg % (added, unchanged)\n elif added:\n changed = True\n msg = 'added: %d, unchanged: %d' % (added, unchanged)\n else:\n msg = 'added: %d, unchanged: %d' % (added, unchanged)\n\n return (failed, changed, msg)\n\n\ndef remove_tap(module, brew_path, tap):\n '''Removes a single tap.'''\n failed, changed, msg = False, False, ''\n\n if not a_valid_tap(tap):\n failed = True\n msg = 'not a valid tap: %s' % tap\n\n elif already_tapped(module, brew_path, tap):\n if module.check_mode:\n module.exit_json(changed=True)\n\n rc, out, err = module.run_command([\n brew_path,\n 'untap',\n tap,\n ])\n if not already_tapped(module, brew_path, tap):\n changed = True\n msg = 'successfully untapped: %s' % tap\n else:\n failed = True\n msg = 'failed to untap: %s' % tap\n\n else:\n msg = 'already untapped: %s' % tap\n\n return (failed, changed, msg)\n\n\ndef remove_taps(module, brew_path, taps):\n '''Removes one or more taps.'''\n failed, unchanged, removed, msg = False, 0, 0, ''\n\n for tap in taps:\n (failed, changed, msg) = remove_tap(module, brew_path, tap)\n if failed:\n break\n if changed:\n removed += 1\n else:\n unchanged += 1\n\n if failed:\n msg = 'removed: %d, unchanged: %d, error: ' + msg\n msg = msg % (removed, unchanged)\n elif removed:\n changed = True\n msg = 'removed: %d, unchanged: %d' % (removed, unchanged)\n else:\n msg = 'removed: %d, unchanged: %d' % (removed, unchanged)\n\n return (failed, changed, msg)\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(aliases=['tap'], type='list', required=True),\n url=dict(default=None, required=False),\n state=dict(default='present', choices=['present', 'absent']),\n ),\n supports_check_mode=True,\n )\n\n brew_path = module.get_bin_path(\n 'brew',\n required=True,\n opt_dirs=['/usr/local/bin']\n )\n\n taps = module.params['name']\n url = module.params['url']\n\n if module.params['state'] == 'present':\n if url is None:\n # No tap URL provided explicitly, continue with bulk addition\n # of all the taps.\n failed, changed, msg = add_taps(module, brew_path, taps)\n else:\n # When an tap URL is provided explicitly, we allow adding\n # *single* tap only. Validate and proceed to add single tap.\n if len(taps) > 1:\n msg = \"List of multiple taps may not be provided with 'url' option.\"\n module.fail_json(msg=msg)\n else:\n failed, changed, msg = add_tap(module, brew_path, taps[0], url)\n\n if failed:\n module.fail_json(msg=msg)\n else:\n module.exit_json(changed=changed, msg=msg)\n\n elif module.params['state'] == 'absent':\n failed, changed, msg = remove_taps(module, brew_path, taps)\n\n if failed:\n module.fail_json(msg=msg)\n else:\n module.exit_json(changed=changed, msg=msg)\n\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/packaging/os/homebrew_tap.py"}]}
| 3,705 | 128 |
gh_patches_debug_344
|
rasdani/github-patches
|
git_diff
|
ManimCommunity__manim-3166
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Not all arrow tips are accessible
## Description of bug / unexpected behavior
<!-- Add a clear and concise description of the problem you encountered. -->
The [manim.mobject.geometry.tips](https://docs.manim.community/en/stable/_modules/manim/mobject/geometry/tips.html#ArrowTriangleFilledTip) file has presents of some arrow tips to use. The list `__all__` contains:
```py
__all__ = [
"ArrowTip",
"ArrowCircleFilledTip",
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip",
]
```
## Expected behavior
<!-- Add a clear and concise description of what you expected to happen. -->
Instead, it should have:
```py
__all__ = [
"ArrowTip",
"ArrowCircleFilledTip",
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip"
"ArrowTriangleTip", # added
"ArrowTriangleFilledTip", # added
]
```
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
class Test(Scene):
def construct(self):
my_line = Line()
my_line.add_tip(ArrowTriangleFilledTip(fill_color=WHITE))
self.add(my_line)
```
</details>
## Additional media files
<!-- Paste in the files manim produced on rendering the code above. -->
None
<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->
</details>
## System specifications
<details><summary>System Details</summary>
- OS: macOS 13.0.1 (Ventura)
- RAM: 8GB
- Python version: Python 3.10.9
- Installed modules: manim 0.17.2
</issue>
<code>
[start of manim/mobject/geometry/tips.py]
1 r"""A collection of tip mobjects for use with :class:`~.TipableVMobject`."""
2
3 from __future__ import annotations
4
5 __all__ = [
6 "ArrowTip",
7 "ArrowCircleFilledTip",
8 "ArrowCircleTip",
9 "ArrowSquareTip",
10 "ArrowSquareFilledTip",
11 ]
12
13 import numpy as np
14
15 from manim.constants import *
16 from manim.mobject.geometry.arc import Circle
17 from manim.mobject.geometry.polygram import Square, Triangle
18 from manim.mobject.opengl.opengl_compatibility import ConvertToOpenGL
19 from manim.mobject.types.vectorized_mobject import VMobject
20 from manim.utils.space_ops import angle_of_vector
21
22
23 class ArrowTip(VMobject, metaclass=ConvertToOpenGL):
24 r"""Base class for arrow tips.
25
26 .. seealso::
27 :class:`ArrowTriangleTip`
28 :class:`ArrowTriangleFilledTip`
29 :class:`ArrowCircleTip`
30 :class:`ArrowCircleFilledTip`
31 :class:`ArrowSquareTip`
32 :class:`ArrowSquareFilledTip`
33
34 Examples
35 --------
36 Cannot be used directly, only intended for inheritance::
37
38 >>> tip = ArrowTip()
39 Traceback (most recent call last):
40 ...
41 NotImplementedError: Has to be implemented in inheriting subclasses.
42
43 Instead, use one of the pre-defined ones, or make
44 a custom one like this:
45
46 .. manim:: CustomTipExample
47
48 >>> from manim import RegularPolygon, Arrow
49 >>> class MyCustomArrowTip(ArrowTip, RegularPolygon):
50 ... def __init__(self, length=0.35, **kwargs):
51 ... RegularPolygon.__init__(self, n=5, **kwargs)
52 ... self.width = length
53 ... self.stretch_to_fit_height(length)
54 >>> arr = Arrow(np.array([-2, -2, 0]), np.array([2, 2, 0]),
55 ... tip_shape=MyCustomArrowTip)
56 >>> isinstance(arr.tip, RegularPolygon)
57 True
58 >>> from manim import Scene, Create
59 >>> class CustomTipExample(Scene):
60 ... def construct(self):
61 ... self.play(Create(arr))
62
63 Using a class inherited from :class:`ArrowTip` to get a non-filled
64 tip is a shorthand to manually specifying the arrow tip style as follows::
65
66 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]),
67 ... tip_style={'fill_opacity': 0, 'stroke_width': 3})
68
69 The following example illustrates the usage of all of the predefined
70 arrow tips.
71
72 .. manim:: ArrowTipsShowcase
73 :save_last_frame:
74
75 from manim.mobject.geometry.tips import ArrowTriangleTip,\
76 ArrowSquareTip, ArrowSquareFilledTip,\
77 ArrowCircleTip, ArrowCircleFilledTip
78 class ArrowTipsShowcase(Scene):
79 def construct(self):
80 a00 = Arrow(start=[-2, 3, 0], end=[2, 3, 0], color=YELLOW)
81 a11 = Arrow(start=[-2, 2, 0], end=[2, 2, 0], tip_shape=ArrowTriangleTip)
82 a12 = Arrow(start=[-2, 1, 0], end=[2, 1, 0])
83 a21 = Arrow(start=[-2, 0, 0], end=[2, 0, 0], tip_shape=ArrowSquareTip)
84 a22 = Arrow([-2, -1, 0], [2, -1, 0], tip_shape=ArrowSquareFilledTip)
85 a31 = Arrow([-2, -2, 0], [2, -2, 0], tip_shape=ArrowCircleTip)
86 a32 = Arrow([-2, -3, 0], [2, -3, 0], tip_shape=ArrowCircleFilledTip)
87 b11 = a11.copy().scale(0.5, scale_tips=True).next_to(a11, RIGHT)
88 b12 = a12.copy().scale(0.5, scale_tips=True).next_to(a12, RIGHT)
89 b21 = a21.copy().scale(0.5, scale_tips=True).next_to(a21, RIGHT)
90 self.add(a00, a11, a12, a21, a22, a31, a32, b11, b12, b21)
91
92 """
93
94 def __init__(self, *args, **kwargs):
95 raise NotImplementedError("Has to be implemented in inheriting subclasses.")
96
97 @property
98 def base(self):
99 r"""The base point of the arrow tip.
100
101 This is the point connecting to the arrow line.
102
103 Examples
104 --------
105 ::
106
107 >>> from manim import Arrow
108 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)
109 >>> arrow.tip.base.round(2) + 0. # add 0. to avoid negative 0 in output
110 array([1.65, 0. , 0. ])
111
112 """
113 return self.point_from_proportion(0.5)
114
115 @property
116 def tip_point(self):
117 r"""The tip point of the arrow tip.
118
119 Examples
120 --------
121 ::
122
123 >>> from manim import Arrow
124 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)
125 >>> arrow.tip.tip_point.round(2) + 0.
126 array([2., 0., 0.])
127
128 """
129 return self.points[0]
130
131 @property
132 def vector(self):
133 r"""The vector pointing from the base point to the tip point.
134
135 Examples
136 --------
137 ::
138
139 >>> from manim import Arrow
140 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 2, 0]), buff=0)
141 >>> arrow.tip.vector.round(2) + 0.
142 array([0.25, 0.25, 0. ])
143
144 """
145 return self.tip_point - self.base
146
147 @property
148 def tip_angle(self):
149 r"""The angle of the arrow tip.
150
151 Examples
152 --------
153 ::
154
155 >>> from manim import Arrow
156 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]), buff=0)
157 >>> round(arrow.tip.tip_angle, 5) == round(PI/4, 5)
158 True
159
160 """
161 return angle_of_vector(self.vector)
162
163 @property
164 def length(self):
165 r"""The length of the arrow tip.
166
167 Examples
168 --------
169 ::
170
171 >>> from manim import Arrow
172 >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 2, 0]))
173 >>> round(arrow.tip.length, 3)
174 0.35
175
176 """
177 return np.linalg.norm(self.vector)
178
179
180 class ArrowTriangleTip(ArrowTip, Triangle):
181 r"""Triangular arrow tip."""
182
183 def __init__(
184 self,
185 fill_opacity=0,
186 stroke_width=3,
187 length=DEFAULT_ARROW_TIP_LENGTH,
188 width=DEFAULT_ARROW_TIP_LENGTH,
189 start_angle=PI,
190 **kwargs,
191 ):
192 Triangle.__init__(
193 self,
194 fill_opacity=fill_opacity,
195 stroke_width=stroke_width,
196 start_angle=start_angle,
197 **kwargs,
198 )
199 self.width = width
200
201 self.stretch_to_fit_width(length)
202 self.stretch_to_fit_height(width)
203
204
205 class ArrowTriangleFilledTip(ArrowTriangleTip):
206 r"""Triangular arrow tip with filled tip.
207
208 This is the default arrow tip shape.
209 """
210
211 def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
212 super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
213
214
215 class ArrowCircleTip(ArrowTip, Circle):
216 r"""Circular arrow tip."""
217
218 def __init__(
219 self,
220 fill_opacity=0,
221 stroke_width=3,
222 length=DEFAULT_ARROW_TIP_LENGTH,
223 start_angle=PI,
224 **kwargs,
225 ):
226 self.start_angle = start_angle
227 Circle.__init__(
228 self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs
229 )
230 self.width = length
231 self.stretch_to_fit_height(length)
232
233
234 class ArrowCircleFilledTip(ArrowCircleTip):
235 r"""Circular arrow tip with filled tip."""
236
237 def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
238 super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
239
240
241 class ArrowSquareTip(ArrowTip, Square):
242 r"""Square arrow tip."""
243
244 def __init__(
245 self,
246 fill_opacity=0,
247 stroke_width=3,
248 length=DEFAULT_ARROW_TIP_LENGTH,
249 start_angle=PI,
250 **kwargs,
251 ):
252 self.start_angle = start_angle
253 Square.__init__(
254 self,
255 fill_opacity=fill_opacity,
256 stroke_width=stroke_width,
257 side_length=length,
258 **kwargs,
259 )
260 self.width = length
261 self.stretch_to_fit_height(length)
262
263
264 class ArrowSquareFilledTip(ArrowSquareTip):
265 r"""Square arrow tip with filled tip."""
266
267 def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):
268 super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)
269
[end of manim/mobject/geometry/tips.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/manim/mobject/geometry/tips.py b/manim/mobject/geometry/tips.py
--- a/manim/mobject/geometry/tips.py
+++ b/manim/mobject/geometry/tips.py
@@ -8,6 +8,8 @@
"ArrowCircleTip",
"ArrowSquareTip",
"ArrowSquareFilledTip",
+ "ArrowTriangleTip",
+ "ArrowTriangleFilledTip",
]
import numpy as np
|
{"golden_diff": "diff --git a/manim/mobject/geometry/tips.py b/manim/mobject/geometry/tips.py\n--- a/manim/mobject/geometry/tips.py\n+++ b/manim/mobject/geometry/tips.py\n@@ -8,6 +8,8 @@\n \"ArrowCircleTip\",\n \"ArrowSquareTip\",\n \"ArrowSquareFilledTip\",\n+ \"ArrowTriangleTip\",\n+ \"ArrowTriangleFilledTip\",\n ]\n \n import numpy as np\n", "issue": "Not all arrow tips are accessible\n## Description of bug / unexpected behavior\r\n<!-- Add a clear and concise description of the problem you encountered. -->\r\nThe [manim.mobject.geometry.tips](https://docs.manim.community/en/stable/_modules/manim/mobject/geometry/tips.html#ArrowTriangleFilledTip) file has presents of some arrow tips to use. The list `__all__` contains:\r\n```py\r\n__all__ = [\r\n \"ArrowTip\",\r\n \"ArrowCircleFilledTip\",\r\n \"ArrowCircleTip\",\r\n \"ArrowSquareTip\",\r\n \"ArrowSquareFilledTip\",\r\n]\r\n```\r\n\r\n## Expected behavior\r\n<!-- Add a clear and concise description of what you expected to happen. -->\r\nInstead, it should have:\r\n\r\n```py\r\n__all__ = [\r\n \"ArrowTip\",\r\n \"ArrowCircleFilledTip\",\r\n \"ArrowCircleTip\",\r\n \"ArrowSquareTip\",\r\n \"ArrowSquareFilledTip\"\r\n \"ArrowTriangleTip\", # added\r\n \"ArrowTriangleFilledTip\", # added\r\n]\r\n```\r\n\r\n## How to reproduce the issue\r\n<!-- Provide a piece of code illustrating the undesired behavior. -->\r\n\r\n<details><summary>Code for reproducing the problem</summary>\r\n\r\n```py\r\nclass Test(Scene):\r\n def construct(self):\r\n my_line = Line()\r\n my_line.add_tip(ArrowTriangleFilledTip(fill_color=WHITE))\r\n self.add(my_line)\r\n```\r\n\r\n</details>\r\n\r\n\r\n## Additional media files\r\n<!-- Paste in the files manim produced on rendering the code above. -->\r\nNone\r\n\r\n\r\n\r\n<!-- Insert screenshots here (only when absolutely necessary, we prefer copy/pasted output!) -->\r\n\r\n</details>\r\n\r\n\r\n## System specifications\r\n\r\n<details><summary>System Details</summary>\r\n\r\n- OS: macOS 13.0.1 (Ventura)\r\n- RAM: 8GB\r\n- Python version: Python 3.10.9\r\n- Installed modules: manim 0.17.2\r\n\r\n\n", "before_files": [{"content": "r\"\"\"A collection of tip mobjects for use with :class:`~.TipableVMobject`.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"ArrowTip\",\n \"ArrowCircleFilledTip\",\n \"ArrowCircleTip\",\n \"ArrowSquareTip\",\n \"ArrowSquareFilledTip\",\n]\n\nimport numpy as np\n\nfrom manim.constants import *\nfrom manim.mobject.geometry.arc import Circle\nfrom manim.mobject.geometry.polygram import Square, Triangle\nfrom manim.mobject.opengl.opengl_compatibility import ConvertToOpenGL\nfrom manim.mobject.types.vectorized_mobject import VMobject\nfrom manim.utils.space_ops import angle_of_vector\n\n\nclass ArrowTip(VMobject, metaclass=ConvertToOpenGL):\n r\"\"\"Base class for arrow tips.\n\n .. seealso::\n :class:`ArrowTriangleTip`\n :class:`ArrowTriangleFilledTip`\n :class:`ArrowCircleTip`\n :class:`ArrowCircleFilledTip`\n :class:`ArrowSquareTip`\n :class:`ArrowSquareFilledTip`\n\n Examples\n --------\n Cannot be used directly, only intended for inheritance::\n\n >>> tip = ArrowTip()\n Traceback (most recent call last):\n ...\n NotImplementedError: Has to be implemented in inheriting subclasses.\n\n Instead, use one of the pre-defined ones, or make\n a custom one like this:\n\n .. manim:: CustomTipExample\n\n >>> from manim import RegularPolygon, Arrow\n >>> class MyCustomArrowTip(ArrowTip, RegularPolygon):\n ... def __init__(self, length=0.35, **kwargs):\n ... RegularPolygon.__init__(self, n=5, **kwargs)\n ... self.width = length\n ... self.stretch_to_fit_height(length)\n >>> arr = Arrow(np.array([-2, -2, 0]), np.array([2, 2, 0]),\n ... tip_shape=MyCustomArrowTip)\n >>> isinstance(arr.tip, RegularPolygon)\n True\n >>> from manim import Scene, Create\n >>> class CustomTipExample(Scene):\n ... def construct(self):\n ... self.play(Create(arr))\n\n Using a class inherited from :class:`ArrowTip` to get a non-filled\n tip is a shorthand to manually specifying the arrow tip style as follows::\n\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]),\n ... tip_style={'fill_opacity': 0, 'stroke_width': 3})\n\n The following example illustrates the usage of all of the predefined\n arrow tips.\n\n .. manim:: ArrowTipsShowcase\n :save_last_frame:\n\n from manim.mobject.geometry.tips import ArrowTriangleTip,\\\n ArrowSquareTip, ArrowSquareFilledTip,\\\n ArrowCircleTip, ArrowCircleFilledTip\n class ArrowTipsShowcase(Scene):\n def construct(self):\n a00 = Arrow(start=[-2, 3, 0], end=[2, 3, 0], color=YELLOW)\n a11 = Arrow(start=[-2, 2, 0], end=[2, 2, 0], tip_shape=ArrowTriangleTip)\n a12 = Arrow(start=[-2, 1, 0], end=[2, 1, 0])\n a21 = Arrow(start=[-2, 0, 0], end=[2, 0, 0], tip_shape=ArrowSquareTip)\n a22 = Arrow([-2, -1, 0], [2, -1, 0], tip_shape=ArrowSquareFilledTip)\n a31 = Arrow([-2, -2, 0], [2, -2, 0], tip_shape=ArrowCircleTip)\n a32 = Arrow([-2, -3, 0], [2, -3, 0], tip_shape=ArrowCircleFilledTip)\n b11 = a11.copy().scale(0.5, scale_tips=True).next_to(a11, RIGHT)\n b12 = a12.copy().scale(0.5, scale_tips=True).next_to(a12, RIGHT)\n b21 = a21.copy().scale(0.5, scale_tips=True).next_to(a21, RIGHT)\n self.add(a00, a11, a12, a21, a22, a31, a32, b11, b12, b21)\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n raise NotImplementedError(\"Has to be implemented in inheriting subclasses.\")\n\n @property\n def base(self):\n r\"\"\"The base point of the arrow tip.\n\n This is the point connecting to the arrow line.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.base.round(2) + 0. # add 0. to avoid negative 0 in output\n array([1.65, 0. , 0. ])\n\n \"\"\"\n return self.point_from_proportion(0.5)\n\n @property\n def tip_point(self):\n r\"\"\"The tip point of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 0, 0]), buff=0)\n >>> arrow.tip.tip_point.round(2) + 0.\n array([2., 0., 0.])\n\n \"\"\"\n return self.points[0]\n\n @property\n def vector(self):\n r\"\"\"The vector pointing from the base point to the tip point.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([2, 2, 0]), buff=0)\n >>> arrow.tip.vector.round(2) + 0.\n array([0.25, 0.25, 0. ])\n\n \"\"\"\n return self.tip_point - self.base\n\n @property\n def tip_angle(self):\n r\"\"\"The angle of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 1, 0]), buff=0)\n >>> round(arrow.tip.tip_angle, 5) == round(PI/4, 5)\n True\n\n \"\"\"\n return angle_of_vector(self.vector)\n\n @property\n def length(self):\n r\"\"\"The length of the arrow tip.\n\n Examples\n --------\n ::\n\n >>> from manim import Arrow\n >>> arrow = Arrow(np.array([0, 0, 0]), np.array([1, 2, 0]))\n >>> round(arrow.tip.length, 3)\n 0.35\n\n \"\"\"\n return np.linalg.norm(self.vector)\n\n\nclass ArrowTriangleTip(ArrowTip, Triangle):\n r\"\"\"Triangular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n width=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n Triangle.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n start_angle=start_angle,\n **kwargs,\n )\n self.width = width\n\n self.stretch_to_fit_width(length)\n self.stretch_to_fit_height(width)\n\n\nclass ArrowTriangleFilledTip(ArrowTriangleTip):\n r\"\"\"Triangular arrow tip with filled tip.\n\n This is the default arrow tip shape.\n \"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowCircleTip(ArrowTip, Circle):\n r\"\"\"Circular arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Circle.__init__(\n self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowCircleFilledTip(ArrowCircleTip):\n r\"\"\"Circular arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n\n\nclass ArrowSquareTip(ArrowTip, Square):\n r\"\"\"Square arrow tip.\"\"\"\n\n def __init__(\n self,\n fill_opacity=0,\n stroke_width=3,\n length=DEFAULT_ARROW_TIP_LENGTH,\n start_angle=PI,\n **kwargs,\n ):\n self.start_angle = start_angle\n Square.__init__(\n self,\n fill_opacity=fill_opacity,\n stroke_width=stroke_width,\n side_length=length,\n **kwargs,\n )\n self.width = length\n self.stretch_to_fit_height(length)\n\n\nclass ArrowSquareFilledTip(ArrowSquareTip):\n r\"\"\"Square arrow tip with filled tip.\"\"\"\n\n def __init__(self, fill_opacity=1, stroke_width=0, **kwargs):\n super().__init__(fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs)\n", "path": "manim/mobject/geometry/tips.py"}]}
| 3,782 | 99 |
gh_patches_debug_33607
|
rasdani/github-patches
|
git_diff
|
vas3k__vas3k.club-405
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Отсутствует robots.txt
Уверен, что в клубе всем похуй, но мне каждый раз больно, когда вижу, что нет robots.txt.
Можно хотяб стандартный добавить, вроде:
User-agent: *
Sitemap: https://vas3k.club/sitemap.xml
Host: https://vas3k.club
</issue>
<code>
[start of misc/views.py]
1 from django.shortcuts import render
2
3 from auth.helpers import auth_required
4 from landing.models import GodSettings
5 from users.models.achievements import Achievement
6
7
8 @auth_required
9 def achievements(request):
10 achievements = Achievement.objects.filter(is_visible=True)
11 return render(request, "pages/achievements.html", {
12 "achievements": achievements
13 })
14
15
16 @auth_required
17 def network(request):
18 secret_page_html = GodSettings.objects.first().network_page
19 return render(request, "pages/network.html", {
20 "page_html": secret_page_html,
21 })
22
[end of misc/views.py]
[start of club/urls.py]
1 from django.conf import settings
2 from django.contrib.sitemaps.views import sitemap
3 from django.urls import path, include, re_path
4
5 from auth.helpers import auth_switch
6 from auth.views.auth import login, logout, debug_dev_login, debug_random_login, join
7 from auth.views.email import email_login, email_login_code
8 from auth.views.external import external_login
9 from auth.views.patreon import patreon_login, patreon_oauth_callback
10 from bot.views import webhook_telegram, link_telegram
11 from comments.views import create_comment, edit_comment, delete_comment, show_comment, upvote_comment, \
12 retract_comment_vote, pin_comment
13 from landing.views import landing, docs, god_settings
14 from misc.views import achievements, network
15 from notifications.views import weekly_digest, email_unsubscribe, email_confirm, daily_digest, email_digest_switch
16 from payments.views import membership_expired, pay, done, stripe_webhook, stop_subscription
17 from posts.api import md_show_post, api_show_post
18 from posts.models.post import Post
19 from posts.rss import NewPostsRss
20 from posts.sitemaps import sitemaps
21 from posts.views.admin import admin_post, announce_post
22 from posts.views.api import toggle_post_bookmark
23 from posts.views.feed import feed
24 from posts.views.posts import show_post, edit_post, upvote_post, retract_post_vote, compose, compose_type, \
25 toggle_post_subscription
26 from bookmarks.views import bookmarks
27 from search.views import search
28 from users.api import api_profile
29 from users.views.delete_account import request_delete_account, confirm_delete_account
30 from users.views.messages import on_review, rejected, banned
31 from users.views.profile import profile, toggle_tag, add_expertise, delete_expertise
32 from users.views.settings import profile_settings, edit_profile, edit_account, edit_notifications, edit_payments, \
33 edit_bot, edit_data, request_data
34 from users.views.intro import intro
35 from users.views.admin import admin_profile
36 from users.views.people import people
37
38 POST_TYPE_RE = r"(?P<post_type>(all|{}))".format("|".join(dict(Post.TYPES).keys()))
39 ORDERING_RE = r"(?P<ordering>(activity|new|top|top_week|top_month))"
40
41 urlpatterns = [
42 path("", auth_switch(landing, feed), name="index"),
43
44 path("join/", join, name="join"),
45 path("auth/login/", login, name="login"),
46 path("auth/logout/", logout, name="logout"),
47 path("auth/patreon/", patreon_login, name="patreon_login"),
48 path("auth/patreon_callback/", patreon_oauth_callback, name="patreon_oauth_callback"),
49 path("auth/email/", email_login, name="email_login"),
50 path("auth/email/code/", email_login_code, name="email_login_code"),
51 path("auth/external/", external_login, name="external_login"),
52
53 path("monies/", pay, name="pay"),
54 path("monies/done/", done, name="done"),
55 path("monies/membership_expired/", membership_expired, name="membership_expired"),
56 path("monies/stripe/webhook/", stripe_webhook, name="stripe_webhook"),
57 path("monies/subscription/<str:subscription_id>/stop/", stop_subscription, name="stop_subscription"),
58
59 path("user/<slug:user_slug>/", profile, name="profile"),
60 path("user/<slug:user_slug>.json", api_profile, name="api_profile"),
61 path("user/<slug:user_slug>/edit/", profile_settings, name="profile_settings"),
62 path("user/<slug:user_slug>/edit/profile/", edit_profile, name="edit_profile"),
63 path("user/<slug:user_slug>/edit/account/", edit_account, name="edit_account"),
64 path("user/<slug:user_slug>/edit/bot/", edit_bot, name="edit_bot"),
65 path("user/<slug:user_slug>/edit/notifications/", edit_notifications, name="edit_notifications"),
66 path("user/<slug:user_slug>/edit/monies/", edit_payments, name="edit_payments"),
67 path("user/<slug:user_slug>/edit/data/", edit_data, name="edit_data"),
68 path("user/<slug:user_slug>/edit/data/request/", request_data, name="request_user_data"),
69 path("user/<slug:user_slug>/admin/", admin_profile, name="admin_profile"),
70 path("user/<slug:user_slug>/delete/", request_delete_account, name="request_delete_account"),
71 path("user/<slug:user_slug>/delete/confirm/", confirm_delete_account, name="confirm_delete_account"),
72
73 path("intro/", intro, name="intro"),
74 path("people/", people, name="people"),
75 path("achievements/", achievements, name="achievements"),
76 path("profile/tag/<slug:tag_code>/toggle/", toggle_tag, name="toggle_tag"),
77 path("profile/expertise/add/", add_expertise, name="add_expertise"),
78 path("profile/expertise/<slug:expertise>/delete/", delete_expertise, name="delete_expertise"),
79 path("profile/on_review/", on_review, name="on_review"),
80 path("profile/rejected/", rejected, name="rejected"),
81 path("profile/banned/", banned, name="banned"),
82
83 path("create/", compose, name="compose"),
84 path("create/<slug:post_type>/", compose_type, name="compose_type"),
85 path("post/<slug:post_slug>/edit/", edit_post, name="edit_post"),
86 path("post/<slug:post_slug>/bookmark/", toggle_post_bookmark, name="toggle_post_bookmark"),
87 path("post/<slug:post_slug>/upvote/", upvote_post, name="upvote_post"),
88 path("post/<slug:post_slug>/retract_vote/", retract_post_vote, name="retract_post_vote"),
89 path("post/<slug:post_slug>/subscription/", toggle_post_subscription, name="toggle_post_subscription"),
90 path("post/<slug:post_slug>/admin/", admin_post, name="admin_post"),
91 path("post/<slug:post_slug>/announce/", announce_post, name="announce_post"),
92 path("post/<slug:post_slug>/comment/create/", create_comment, name="create_comment"),
93 path("post/<slug:post_slug>/comment/<uuid:comment_id>/", show_comment, name="show_comment", ),
94
95 path("bookmarks/", bookmarks, name="bookmarks"),
96
97 path("search/", search, name="search"),
98 path("room/<slug:topic_slug>/", feed, name="feed_topic"),
99 path("room/<slug:topic_slug>/<slug:ordering>/", feed, name="feed_topic_ordering"),
100
101 path("comment/<uuid:comment_id>/upvote/", upvote_comment, name="upvote_comment"),
102 path("comment/<uuid:comment_id>/retract_vote/", retract_comment_vote, name="retract_comment_vote"),
103 path("comment/<uuid:comment_id>/edit/", edit_comment, name="edit_comment"),
104 path("comment/<uuid:comment_id>/pin/", pin_comment, name="pin_comment"),
105 path("comment/<uuid:comment_id>/delete/", delete_comment, name="delete_comment"),
106
107 path("telegram/link/", link_telegram, name="link_telegram"),
108 path("telegram/webhook/<str:token>/", webhook_telegram, name="webhook_telegram"),
109
110 path("notifications/confirm/<str:secret>/", email_confirm, name="email_confirm"),
111 path("notifications/confirm/<str:secret>/<str:legacy_code>/", email_confirm, name="email_confirm_legacy"),
112 path("notifications/unsubscribe/<str:user_id>/<str:secret>/", email_unsubscribe, name="email_unsubscribe"),
113 path("notifications/switch/<str:digest_type>/<str:user_id>/<str:secret>/", email_digest_switch,
114 name="email_digest_switch"),
115 path("notifications/renderer/digest/weekly/", weekly_digest, name="render_weekly_digest"),
116 path("notifications/renderer/digest/daily/<slug:user_slug>/", daily_digest, name="render_daily_digest"),
117
118 path("docs/<slug:doc_slug>/", docs, name="docs"),
119
120 path("network/", network, name="network"),
121
122 path("godmode/", god_settings, name="god_settings"),
123 path("godmode/dev_login/", debug_dev_login, name="debug_dev_login"),
124 path("godmode/random_login/", debug_random_login, name="debug_random_login"),
125
126 # feeds
127 path("sitemap.xml", sitemap, {"sitemaps": sitemaps}, name="sitemap"),
128 path("posts.rss", NewPostsRss(), name="rss"),
129
130 # keep these guys at the bottom
131 re_path(r"^{}/$".format(POST_TYPE_RE), feed, name="feed_type"),
132 re_path(r"^{}/{}/$".format(POST_TYPE_RE, ORDERING_RE), feed, name="feed_ordering"),
133 path("<slug:post_type>/<slug:post_slug>/", show_post, name="show_post"),
134 path("<slug:post_type>/<slug:post_slug>.md", md_show_post, name="md_show_post"),
135 path("<slug:post_type>/<slug:post_slug>.json", api_show_post, name="api_show_post"),
136 ]
137
138 if settings.DEBUG:
139 import debug_toolbar
140
141 urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
142
143 # According to django doc: https://docs.djangoproject.com/en/3.1/topics/testing/overview/#other-test-conditions
144 # Regardless of the value of the DEBUG setting in your configuration file, all Django tests run with DEBUG=False
145 # so we use separate special var instead of settings.DEBUG
146 if settings.TESTS_RUN:
147 from debug.api import api_me
148
149 urlpatterns.append(path("debug/me", api_me, name="debug_api_me"))
150
[end of club/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/club/urls.py b/club/urls.py
--- a/club/urls.py
+++ b/club/urls.py
@@ -11,7 +11,7 @@
from comments.views import create_comment, edit_comment, delete_comment, show_comment, upvote_comment, \
retract_comment_vote, pin_comment
from landing.views import landing, docs, god_settings
-from misc.views import achievements, network
+from misc.views import achievements, network, robots
from notifications.views import weekly_digest, email_unsubscribe, email_confirm, daily_digest, email_digest_switch
from payments.views import membership_expired, pay, done, stripe_webhook, stop_subscription
from posts.api import md_show_post, api_show_post
@@ -127,6 +127,8 @@
path("sitemap.xml", sitemap, {"sitemaps": sitemaps}, name="sitemap"),
path("posts.rss", NewPostsRss(), name="rss"),
+ path("robots.txt", robots, name="robots"),
+
# keep these guys at the bottom
re_path(r"^{}/$".format(POST_TYPE_RE), feed, name="feed_type"),
re_path(r"^{}/{}/$".format(POST_TYPE_RE, ORDERING_RE), feed, name="feed_ordering"),
diff --git a/misc/views.py b/misc/views.py
--- a/misc/views.py
+++ b/misc/views.py
@@ -1,4 +1,6 @@
+from django.http import HttpResponse
from django.shortcuts import render
+from django.views.decorators.http import require_GET
from auth.helpers import auth_required
from landing.models import GodSettings
@@ -19,3 +21,12 @@
return render(request, "pages/network.html", {
"page_html": secret_page_html,
})
+
+@require_GET
+def robots(request):
+ lines = [
+ "User-agent: *",
+ "Sitemap: https://vas3k.club/sitemap.xml",
+ "Host: https://vas3k.club",
+ ]
+ return HttpResponse("\n".join(lines), content_type="text/plain")
|
{"golden_diff": "diff --git a/club/urls.py b/club/urls.py\n--- a/club/urls.py\n+++ b/club/urls.py\n@@ -11,7 +11,7 @@\n from comments.views import create_comment, edit_comment, delete_comment, show_comment, upvote_comment, \\\n retract_comment_vote, pin_comment\n from landing.views import landing, docs, god_settings\n-from misc.views import achievements, network\n+from misc.views import achievements, network, robots\n from notifications.views import weekly_digest, email_unsubscribe, email_confirm, daily_digest, email_digest_switch\n from payments.views import membership_expired, pay, done, stripe_webhook, stop_subscription\n from posts.api import md_show_post, api_show_post\n@@ -127,6 +127,8 @@\n path(\"sitemap.xml\", sitemap, {\"sitemaps\": sitemaps}, name=\"sitemap\"),\n path(\"posts.rss\", NewPostsRss(), name=\"rss\"),\n \n+ path(\"robots.txt\", robots, name=\"robots\"),\n+\n # keep these guys at the bottom\n re_path(r\"^{}/$\".format(POST_TYPE_RE), feed, name=\"feed_type\"),\n re_path(r\"^{}/{}/$\".format(POST_TYPE_RE, ORDERING_RE), feed, name=\"feed_ordering\"),\ndiff --git a/misc/views.py b/misc/views.py\n--- a/misc/views.py\n+++ b/misc/views.py\n@@ -1,4 +1,6 @@\n+from django.http import HttpResponse\n from django.shortcuts import render\n+from django.views.decorators.http import require_GET\n \n from auth.helpers import auth_required\n from landing.models import GodSettings\n@@ -19,3 +21,12 @@\n return render(request, \"pages/network.html\", {\n \"page_html\": secret_page_html,\n })\n+\n+@require_GET\n+def robots(request):\n+ lines = [\n+ \"User-agent: *\",\n+ \"Sitemap: https://vas3k.club/sitemap.xml\",\n+ \"Host: https://vas3k.club\",\n+ ]\n+ return HttpResponse(\"\\n\".join(lines), content_type=\"text/plain\")\n", "issue": "\u041e\u0442\u0441\u0443\u0442\u0441\u0442\u0432\u0443\u0435\u0442 robots.txt\n\u0423\u0432\u0435\u0440\u0435\u043d, \u0447\u0442\u043e \u0432 \u043a\u043b\u0443\u0431\u0435 \u0432\u0441\u0435\u043c \u043f\u043e\u0445\u0443\u0439, \u043d\u043e \u043c\u043d\u0435 \u043a\u0430\u0436\u0434\u044b\u0439 \u0440\u0430\u0437 \u0431\u043e\u043b\u044c\u043d\u043e, \u043a\u043e\u0433\u0434\u0430 \u0432\u0438\u0436\u0443, \u0447\u0442\u043e \u043d\u0435\u0442 robots.txt.\r\n\r\n\u041c\u043e\u0436\u043d\u043e \u0445\u043e\u0442\u044f\u0431 \u0441\u0442\u0430\u043d\u0434\u0430\u0440\u0442\u043d\u044b\u0439 \u0434\u043e\u0431\u0430\u0432\u0438\u0442\u044c, \u0432\u0440\u043e\u0434\u0435:\r\n\r\nUser-agent: *\r\nSitemap: https://vas3k.club/sitemap.xml\r\nHost: https://vas3k.club\n", "before_files": [{"content": "from django.shortcuts import render\n\nfrom auth.helpers import auth_required\nfrom landing.models import GodSettings\nfrom users.models.achievements import Achievement\n\n\n@auth_required\ndef achievements(request):\n achievements = Achievement.objects.filter(is_visible=True)\n return render(request, \"pages/achievements.html\", {\n \"achievements\": achievements\n })\n\n\n@auth_required\ndef network(request):\n secret_page_html = GodSettings.objects.first().network_page\n return render(request, \"pages/network.html\", {\n \"page_html\": secret_page_html,\n })\n", "path": "misc/views.py"}, {"content": "from django.conf import settings\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.urls import path, include, re_path\n\nfrom auth.helpers import auth_switch\nfrom auth.views.auth import login, logout, debug_dev_login, debug_random_login, join\nfrom auth.views.email import email_login, email_login_code\nfrom auth.views.external import external_login\nfrom auth.views.patreon import patreon_login, patreon_oauth_callback\nfrom bot.views import webhook_telegram, link_telegram\nfrom comments.views import create_comment, edit_comment, delete_comment, show_comment, upvote_comment, \\\n retract_comment_vote, pin_comment\nfrom landing.views import landing, docs, god_settings\nfrom misc.views import achievements, network\nfrom notifications.views import weekly_digest, email_unsubscribe, email_confirm, daily_digest, email_digest_switch\nfrom payments.views import membership_expired, pay, done, stripe_webhook, stop_subscription\nfrom posts.api import md_show_post, api_show_post\nfrom posts.models.post import Post\nfrom posts.rss import NewPostsRss\nfrom posts.sitemaps import sitemaps\nfrom posts.views.admin import admin_post, announce_post\nfrom posts.views.api import toggle_post_bookmark\nfrom posts.views.feed import feed\nfrom posts.views.posts import show_post, edit_post, upvote_post, retract_post_vote, compose, compose_type, \\\n toggle_post_subscription\nfrom bookmarks.views import bookmarks\nfrom search.views import search\nfrom users.api import api_profile\nfrom users.views.delete_account import request_delete_account, confirm_delete_account\nfrom users.views.messages import on_review, rejected, banned\nfrom users.views.profile import profile, toggle_tag, add_expertise, delete_expertise\nfrom users.views.settings import profile_settings, edit_profile, edit_account, edit_notifications, edit_payments, \\\n edit_bot, edit_data, request_data\nfrom users.views.intro import intro\nfrom users.views.admin import admin_profile\nfrom users.views.people import people\n\nPOST_TYPE_RE = r\"(?P<post_type>(all|{}))\".format(\"|\".join(dict(Post.TYPES).keys()))\nORDERING_RE = r\"(?P<ordering>(activity|new|top|top_week|top_month))\"\n\nurlpatterns = [\n path(\"\", auth_switch(landing, feed), name=\"index\"),\n\n path(\"join/\", join, name=\"join\"),\n path(\"auth/login/\", login, name=\"login\"),\n path(\"auth/logout/\", logout, name=\"logout\"),\n path(\"auth/patreon/\", patreon_login, name=\"patreon_login\"),\n path(\"auth/patreon_callback/\", patreon_oauth_callback, name=\"patreon_oauth_callback\"),\n path(\"auth/email/\", email_login, name=\"email_login\"),\n path(\"auth/email/code/\", email_login_code, name=\"email_login_code\"),\n path(\"auth/external/\", external_login, name=\"external_login\"),\n\n path(\"monies/\", pay, name=\"pay\"),\n path(\"monies/done/\", done, name=\"done\"),\n path(\"monies/membership_expired/\", membership_expired, name=\"membership_expired\"),\n path(\"monies/stripe/webhook/\", stripe_webhook, name=\"stripe_webhook\"),\n path(\"monies/subscription/<str:subscription_id>/stop/\", stop_subscription, name=\"stop_subscription\"),\n\n path(\"user/<slug:user_slug>/\", profile, name=\"profile\"),\n path(\"user/<slug:user_slug>.json\", api_profile, name=\"api_profile\"),\n path(\"user/<slug:user_slug>/edit/\", profile_settings, name=\"profile_settings\"),\n path(\"user/<slug:user_slug>/edit/profile/\", edit_profile, name=\"edit_profile\"),\n path(\"user/<slug:user_slug>/edit/account/\", edit_account, name=\"edit_account\"),\n path(\"user/<slug:user_slug>/edit/bot/\", edit_bot, name=\"edit_bot\"),\n path(\"user/<slug:user_slug>/edit/notifications/\", edit_notifications, name=\"edit_notifications\"),\n path(\"user/<slug:user_slug>/edit/monies/\", edit_payments, name=\"edit_payments\"),\n path(\"user/<slug:user_slug>/edit/data/\", edit_data, name=\"edit_data\"),\n path(\"user/<slug:user_slug>/edit/data/request/\", request_data, name=\"request_user_data\"),\n path(\"user/<slug:user_slug>/admin/\", admin_profile, name=\"admin_profile\"),\n path(\"user/<slug:user_slug>/delete/\", request_delete_account, name=\"request_delete_account\"),\n path(\"user/<slug:user_slug>/delete/confirm/\", confirm_delete_account, name=\"confirm_delete_account\"),\n\n path(\"intro/\", intro, name=\"intro\"),\n path(\"people/\", people, name=\"people\"),\n path(\"achievements/\", achievements, name=\"achievements\"),\n path(\"profile/tag/<slug:tag_code>/toggle/\", toggle_tag, name=\"toggle_tag\"),\n path(\"profile/expertise/add/\", add_expertise, name=\"add_expertise\"),\n path(\"profile/expertise/<slug:expertise>/delete/\", delete_expertise, name=\"delete_expertise\"),\n path(\"profile/on_review/\", on_review, name=\"on_review\"),\n path(\"profile/rejected/\", rejected, name=\"rejected\"),\n path(\"profile/banned/\", banned, name=\"banned\"),\n\n path(\"create/\", compose, name=\"compose\"),\n path(\"create/<slug:post_type>/\", compose_type, name=\"compose_type\"),\n path(\"post/<slug:post_slug>/edit/\", edit_post, name=\"edit_post\"),\n path(\"post/<slug:post_slug>/bookmark/\", toggle_post_bookmark, name=\"toggle_post_bookmark\"),\n path(\"post/<slug:post_slug>/upvote/\", upvote_post, name=\"upvote_post\"),\n path(\"post/<slug:post_slug>/retract_vote/\", retract_post_vote, name=\"retract_post_vote\"),\n path(\"post/<slug:post_slug>/subscription/\", toggle_post_subscription, name=\"toggle_post_subscription\"),\n path(\"post/<slug:post_slug>/admin/\", admin_post, name=\"admin_post\"),\n path(\"post/<slug:post_slug>/announce/\", announce_post, name=\"announce_post\"),\n path(\"post/<slug:post_slug>/comment/create/\", create_comment, name=\"create_comment\"),\n path(\"post/<slug:post_slug>/comment/<uuid:comment_id>/\", show_comment, name=\"show_comment\", ),\n\n path(\"bookmarks/\", bookmarks, name=\"bookmarks\"),\n\n path(\"search/\", search, name=\"search\"),\n path(\"room/<slug:topic_slug>/\", feed, name=\"feed_topic\"),\n path(\"room/<slug:topic_slug>/<slug:ordering>/\", feed, name=\"feed_topic_ordering\"),\n\n path(\"comment/<uuid:comment_id>/upvote/\", upvote_comment, name=\"upvote_comment\"),\n path(\"comment/<uuid:comment_id>/retract_vote/\", retract_comment_vote, name=\"retract_comment_vote\"),\n path(\"comment/<uuid:comment_id>/edit/\", edit_comment, name=\"edit_comment\"),\n path(\"comment/<uuid:comment_id>/pin/\", pin_comment, name=\"pin_comment\"),\n path(\"comment/<uuid:comment_id>/delete/\", delete_comment, name=\"delete_comment\"),\n\n path(\"telegram/link/\", link_telegram, name=\"link_telegram\"),\n path(\"telegram/webhook/<str:token>/\", webhook_telegram, name=\"webhook_telegram\"),\n\n path(\"notifications/confirm/<str:secret>/\", email_confirm, name=\"email_confirm\"),\n path(\"notifications/confirm/<str:secret>/<str:legacy_code>/\", email_confirm, name=\"email_confirm_legacy\"),\n path(\"notifications/unsubscribe/<str:user_id>/<str:secret>/\", email_unsubscribe, name=\"email_unsubscribe\"),\n path(\"notifications/switch/<str:digest_type>/<str:user_id>/<str:secret>/\", email_digest_switch,\n name=\"email_digest_switch\"),\n path(\"notifications/renderer/digest/weekly/\", weekly_digest, name=\"render_weekly_digest\"),\n path(\"notifications/renderer/digest/daily/<slug:user_slug>/\", daily_digest, name=\"render_daily_digest\"),\n\n path(\"docs/<slug:doc_slug>/\", docs, name=\"docs\"),\n\n path(\"network/\", network, name=\"network\"),\n\n path(\"godmode/\", god_settings, name=\"god_settings\"),\n path(\"godmode/dev_login/\", debug_dev_login, name=\"debug_dev_login\"),\n path(\"godmode/random_login/\", debug_random_login, name=\"debug_random_login\"),\n\n # feeds\n path(\"sitemap.xml\", sitemap, {\"sitemaps\": sitemaps}, name=\"sitemap\"),\n path(\"posts.rss\", NewPostsRss(), name=\"rss\"),\n\n # keep these guys at the bottom\n re_path(r\"^{}/$\".format(POST_TYPE_RE), feed, name=\"feed_type\"),\n re_path(r\"^{}/{}/$\".format(POST_TYPE_RE, ORDERING_RE), feed, name=\"feed_ordering\"),\n path(\"<slug:post_type>/<slug:post_slug>/\", show_post, name=\"show_post\"),\n path(\"<slug:post_type>/<slug:post_slug>.md\", md_show_post, name=\"md_show_post\"),\n path(\"<slug:post_type>/<slug:post_slug>.json\", api_show_post, name=\"api_show_post\"),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n# According to django doc: https://docs.djangoproject.com/en/3.1/topics/testing/overview/#other-test-conditions\n# Regardless of the value of the DEBUG setting in your configuration file, all Django tests run with DEBUG=False\n# so we use separate special var instead of settings.DEBUG\nif settings.TESTS_RUN:\n from debug.api import api_me\n\n urlpatterns.append(path(\"debug/me\", api_me, name=\"debug_api_me\"))\n", "path": "club/urls.py"}]}
| 3,188 | 457 |
gh_patches_debug_19678
|
rasdani/github-patches
|
git_diff
|
fedora-infra__bodhi-2887
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
bodhi.server.consumers.updates.UpdatesHandler.consumer uses an assert statement
```assert``` statements get removed from optimized code, which is what gets built in Koji (which is where production Bodhi builds come from). Thus, the assertion will not be present in production.
If the assertion is important, we should either reconcile the database discrepancy, or raise an Exception. It might make the most sense to reconcile the database discrepancy.
</issue>
<code>
[start of bodhi/server/consumers/updates.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2015-2018 Red Hat Inc., and others.
3 #
4 # This file is part of Bodhi.
5 #
6 # This program is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU General Public License
8 # as published by the Free Software Foundation; either version 2
9 # of the License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License along with
17 # this program; if not, write to the Free Software Foundation, Inc., 51
18 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 """
20 The "updates handler".
21
22 This module is responsible for doing value-added work "offline" that used to be
23 done when updates were submitted. Specifically, when someone submits an update
24 we used to:
25
26 - Update any bugs in bugzilla associated with the update.
27 - Check for test cases in the wiki.
28
29 Those things could sometimes take a *very* long time, especially if there were
30 lots of builds and lots of bugs in the update.
31
32 Now, update-submission breezes by those steps and simply tells the user "OK".
33 A fedmsg message gets published when their update goes through, and *that*
34 message gets received here and triggers us to do all that network-laden heavy
35 lifting.
36 """
37
38 import logging
39 import pprint
40 import time
41
42 import fedmsg.consumers
43
44 from bodhi.server import initialize_db, util, bugs as bug_module
45 from bodhi.server.config import config
46 from bodhi.server.exceptions import BodhiException
47 from bodhi.server.models import Bug, Update, UpdateType
48
49
50 log = logging.getLogger('bodhi')
51
52
53 class UpdatesHandler(fedmsg.consumers.FedmsgConsumer):
54 """
55 Perform background tasks when updates are created or edited.
56
57 This fedmsg listener waits for messages from the frontend about new or edited updates, and
58 performs background tasks such as modifying Bugzilla issues (and loading information from
59 Bugzilla so we can display it to the user) and looking up wiki test cases.
60
61 Attributes:
62 db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a
63 database session.
64 handle_bugs (bool): If True, interact with Bugzilla. Else do not.
65 topic (list): A list of strings that indicate which fedmsg topics this consumer listens to.
66 """
67
68 config_key = 'updates_handler'
69
70 def __init__(self, hub, *args, **kwargs):
71 """
72 Initialize the UpdatesHandler, subscribing it to the appropriate topics.
73
74 Args:
75 hub (moksha.hub.hub.CentralMokshaHub): The hub this handler is consuming messages from.
76 It is used to look up the hub config.
77 """
78 initialize_db(config)
79 self.db_factory = util.transactional_session_maker()
80
81 prefix = hub.config.get('topic_prefix')
82 env = hub.config.get('environment')
83 self.topic = [
84 prefix + '.' + env + '.bodhi.update.request.testing',
85 prefix + '.' + env + '.bodhi.update.edit',
86 ]
87
88 self.handle_bugs = bool(config.get('bodhi_email'))
89 if not self.handle_bugs:
90 log.warning("No bodhi_email defined; not fetching bug details")
91 else:
92 bug_module.set_bugtracker()
93
94 super(UpdatesHandler, self).__init__(hub, *args, **kwargs)
95 log.info('Bodhi updates handler listening on:\n'
96 '%s' % pprint.pformat(self.topic))
97
98 def consume(self, message):
99 """
100 Process the given message, updating relevant bugs and test cases.
101
102 Args:
103 message (munch.Munch): A fedmsg about a new or edited update.
104 """
105 msg = message['body']['msg']
106 topic = message['topic']
107 alias = msg['update'].get('alias')
108
109 log.info("Updates Handler handling %s, %s" % (alias, topic))
110
111 # Go to sleep for a second to try and avoid a race condition
112 # https://github.com/fedora-infra/bodhi/issues/458
113 time.sleep(1)
114
115 if not alias:
116 log.error("Update Handler got update with no "
117 "alias %s." % pprint.pformat(msg))
118 return
119
120 with self.db_factory() as session:
121 update = Update.get(alias)
122 if not update:
123 raise BodhiException("Couldn't find alias '%s' in DB" % alias)
124
125 if topic.endswith('update.edit'):
126 bugs = [Bug.get(idx) for idx in msg['new_bugs']]
127 # Sanity check
128 for bug in bugs:
129 assert bug in update.bugs
130 elif topic.endswith('update.request.testing'):
131 bugs = update.bugs
132 else:
133 raise NotImplementedError("Should never get here.")
134
135 self.work_on_bugs(session, update, bugs)
136 self.fetch_test_cases(session, update)
137
138 if config['test_gating.required']:
139 with self.db_factory() as session:
140 update = Update.get(alias)
141 update.update_test_gating_status()
142
143 log.info("Updates Handler done with %s, %s" % (alias, topic))
144
145 def fetch_test_cases(self, session, update):
146 """
147 Query the wiki for test cases for each package on the given update.
148
149 Args:
150 session (sqlalchemy.orm.session.Session): A database session.
151 update (bodhi.server.models.Update): The update's builds are iterated upon to find test
152 cases for their associated Packages..
153 """
154 for build in update.builds:
155 build.package.fetch_test_cases(session)
156
157 def work_on_bugs(self, session, update, bugs):
158 """
159 Iterate the list of bugs, retrieving information from Bugzilla and modifying them.
160
161 Iterate the given list of bugs associated with the given update. For each bug, retrieve
162 details from Bugzilla, comment on the bug to let watchers know about the update, and mark
163 the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.
164
165 If the bug is private, Bodhi can't retrieve any information, comment on it, or modify
166 it, so we just associate the bug id with the update and mark it to be private.
167
168 If handle_bugs is not True, return and do nothing.
169
170 Args:
171 session (sqlalchemy.orm.session.Session): A database session.
172 update (bodhi.server.models.Update): The update that the bugs are associated with.
173 bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.
174 """
175 if not self.handle_bugs:
176 log.warning("Not configured to handle bugs")
177 return
178
179 log.info("Got %i bugs to sync for %r" % (len(bugs), update.alias))
180 for bug in bugs:
181 log.info("Getting RHBZ bug %r" % bug.bug_id)
182 try:
183 rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)
184
185 log.info("Updating our details for %r" % bug.bug_id)
186 bug.update_details(rhbz_bug)
187 if bug.private:
188 # Bodhi can't retrieve any information so just continue with the next bug
189 log.info(" Skipping bug %r because it is private" % (bug.bug_id))
190 continue
191 log.info(" Got title %r for %r" % (bug.title, bug.bug_id))
192
193 # If you set the type of your update to 'enhancement' but you
194 # attach a security bug, we automatically change the type of your
195 # update to 'security'. We need to do this first, so we don't
196 # accidentally comment on stuff that we shouldn't.
197 if bug.security:
198 log.info("Setting our UpdateType to security.")
199 update.type = UpdateType.security
200
201 log.info("Commenting on %r" % bug.bug_id)
202 comment = config['initial_bug_msg'] % (
203 update.title, update.release.long_name, update.abs_url())
204
205 log.info("Modifying %r" % bug.bug_id)
206 bug.modified(update, comment)
207 except Exception:
208 log.warning('Error occurred during updating single bug', exc_info=True)
209
[end of bodhi/server/consumers/updates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bodhi/server/consumers/updates.py b/bodhi/server/consumers/updates.py
--- a/bodhi/server/consumers/updates.py
+++ b/bodhi/server/consumers/updates.py
@@ -122,11 +122,21 @@
if not update:
raise BodhiException("Couldn't find alias '%s' in DB" % alias)
+ bugs = []
if topic.endswith('update.edit'):
- bugs = [Bug.get(idx) for idx in msg['new_bugs']]
- # Sanity check
- for bug in bugs:
- assert bug in update.bugs
+ for idx in msg['new_bugs']:
+ bug = Bug.get(idx)
+
+ # Sanity check
+ if bug is None or bug not in update.bugs:
+ update_bugs_ids = [b.bug_id for b in update.bugs]
+ update.update_bugs(update_bugs_ids + [idx], session)
+
+ # Now, after update.update_bugs, bug with idx should exists in DB
+ bug = Bug.get(idx)
+
+ bugs.append(bug)
+
elif topic.endswith('update.request.testing'):
bugs = update.bugs
else:
|
{"golden_diff": "diff --git a/bodhi/server/consumers/updates.py b/bodhi/server/consumers/updates.py\n--- a/bodhi/server/consumers/updates.py\n+++ b/bodhi/server/consumers/updates.py\n@@ -122,11 +122,21 @@\n if not update:\n raise BodhiException(\"Couldn't find alias '%s' in DB\" % alias)\n \n+ bugs = []\n if topic.endswith('update.edit'):\n- bugs = [Bug.get(idx) for idx in msg['new_bugs']]\n- # Sanity check\n- for bug in bugs:\n- assert bug in update.bugs\n+ for idx in msg['new_bugs']:\n+ bug = Bug.get(idx)\n+\n+ # Sanity check\n+ if bug is None or bug not in update.bugs:\n+ update_bugs_ids = [b.bug_id for b in update.bugs]\n+ update.update_bugs(update_bugs_ids + [idx], session)\n+\n+ # Now, after update.update_bugs, bug with idx should exists in DB\n+ bug = Bug.get(idx)\n+\n+ bugs.append(bug)\n+\n elif topic.endswith('update.request.testing'):\n bugs = update.bugs\n else:\n", "issue": "bodhi.server.consumers.updates.UpdatesHandler.consumer uses an assert statement\n```assert``` statements get removed from optimized code, which is what gets built in Koji (which is where production Bodhi builds come from). Thus, the assertion will not be present in production.\r\n\r\nIf the assertion is important, we should either reconcile the database discrepancy, or raise an Exception. It might make the most sense to reconcile the database discrepancy.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015-2018 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"updates handler\".\n\nThis module is responsible for doing value-added work \"offline\" that used to be\ndone when updates were submitted. Specifically, when someone submits an update\nwe used to:\n\n- Update any bugs in bugzilla associated with the update.\n- Check for test cases in the wiki.\n\nThose things could sometimes take a *very* long time, especially if there were\nlots of builds and lots of bugs in the update.\n\nNow, update-submission breezes by those steps and simply tells the user \"OK\".\nA fedmsg message gets published when their update goes through, and *that*\nmessage gets received here and triggers us to do all that network-laden heavy\nlifting.\n\"\"\"\n\nimport logging\nimport pprint\nimport time\n\nimport fedmsg.consumers\n\nfrom bodhi.server import initialize_db, util, bugs as bug_module\nfrom bodhi.server.config import config\nfrom bodhi.server.exceptions import BodhiException\nfrom bodhi.server.models import Bug, Update, UpdateType\n\n\nlog = logging.getLogger('bodhi')\n\n\nclass UpdatesHandler(fedmsg.consumers.FedmsgConsumer):\n \"\"\"\n Perform background tasks when updates are created or edited.\n\n This fedmsg listener waits for messages from the frontend about new or edited updates, and\n performs background tasks such as modifying Bugzilla issues (and loading information from\n Bugzilla so we can display it to the user) and looking up wiki test cases.\n\n Attributes:\n db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a\n database session.\n handle_bugs (bool): If True, interact with Bugzilla. Else do not.\n topic (list): A list of strings that indicate which fedmsg topics this consumer listens to.\n \"\"\"\n\n config_key = 'updates_handler'\n\n def __init__(self, hub, *args, **kwargs):\n \"\"\"\n Initialize the UpdatesHandler, subscribing it to the appropriate topics.\n\n Args:\n hub (moksha.hub.hub.CentralMokshaHub): The hub this handler is consuming messages from.\n It is used to look up the hub config.\n \"\"\"\n initialize_db(config)\n self.db_factory = util.transactional_session_maker()\n\n prefix = hub.config.get('topic_prefix')\n env = hub.config.get('environment')\n self.topic = [\n prefix + '.' + env + '.bodhi.update.request.testing',\n prefix + '.' + env + '.bodhi.update.edit',\n ]\n\n self.handle_bugs = bool(config.get('bodhi_email'))\n if not self.handle_bugs:\n log.warning(\"No bodhi_email defined; not fetching bug details\")\n else:\n bug_module.set_bugtracker()\n\n super(UpdatesHandler, self).__init__(hub, *args, **kwargs)\n log.info('Bodhi updates handler listening on:\\n'\n '%s' % pprint.pformat(self.topic))\n\n def consume(self, message):\n \"\"\"\n Process the given message, updating relevant bugs and test cases.\n\n Args:\n message (munch.Munch): A fedmsg about a new or edited update.\n \"\"\"\n msg = message['body']['msg']\n topic = message['topic']\n alias = msg['update'].get('alias')\n\n log.info(\"Updates Handler handling %s, %s\" % (alias, topic))\n\n # Go to sleep for a second to try and avoid a race condition\n # https://github.com/fedora-infra/bodhi/issues/458\n time.sleep(1)\n\n if not alias:\n log.error(\"Update Handler got update with no \"\n \"alias %s.\" % pprint.pformat(msg))\n return\n\n with self.db_factory() as session:\n update = Update.get(alias)\n if not update:\n raise BodhiException(\"Couldn't find alias '%s' in DB\" % alias)\n\n if topic.endswith('update.edit'):\n bugs = [Bug.get(idx) for idx in msg['new_bugs']]\n # Sanity check\n for bug in bugs:\n assert bug in update.bugs\n elif topic.endswith('update.request.testing'):\n bugs = update.bugs\n else:\n raise NotImplementedError(\"Should never get here.\")\n\n self.work_on_bugs(session, update, bugs)\n self.fetch_test_cases(session, update)\n\n if config['test_gating.required']:\n with self.db_factory() as session:\n update = Update.get(alias)\n update.update_test_gating_status()\n\n log.info(\"Updates Handler done with %s, %s\" % (alias, topic))\n\n def fetch_test_cases(self, session, update):\n \"\"\"\n Query the wiki for test cases for each package on the given update.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update's builds are iterated upon to find test\n cases for their associated Packages..\n \"\"\"\n for build in update.builds:\n build.package.fetch_test_cases(session)\n\n def work_on_bugs(self, session, update, bugs):\n \"\"\"\n Iterate the list of bugs, retrieving information from Bugzilla and modifying them.\n\n Iterate the given list of bugs associated with the given update. For each bug, retrieve\n details from Bugzilla, comment on the bug to let watchers know about the update, and mark\n the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.\n\n If the bug is private, Bodhi can't retrieve any information, comment on it, or modify\n it, so we just associate the bug id with the update and mark it to be private.\n\n If handle_bugs is not True, return and do nothing.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update that the bugs are associated with.\n bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.\n \"\"\"\n if not self.handle_bugs:\n log.warning(\"Not configured to handle bugs\")\n return\n\n log.info(\"Got %i bugs to sync for %r\" % (len(bugs), update.alias))\n for bug in bugs:\n log.info(\"Getting RHBZ bug %r\" % bug.bug_id)\n try:\n rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)\n\n log.info(\"Updating our details for %r\" % bug.bug_id)\n bug.update_details(rhbz_bug)\n if bug.private:\n # Bodhi can't retrieve any information so just continue with the next bug\n log.info(\" Skipping bug %r because it is private\" % (bug.bug_id))\n continue\n log.info(\" Got title %r for %r\" % (bug.title, bug.bug_id))\n\n # If you set the type of your update to 'enhancement' but you\n # attach a security bug, we automatically change the type of your\n # update to 'security'. We need to do this first, so we don't\n # accidentally comment on stuff that we shouldn't.\n if bug.security:\n log.info(\"Setting our UpdateType to security.\")\n update.type = UpdateType.security\n\n log.info(\"Commenting on %r\" % bug.bug_id)\n comment = config['initial_bug_msg'] % (\n update.title, update.release.long_name, update.abs_url())\n\n log.info(\"Modifying %r\" % bug.bug_id)\n bug.modified(update, comment)\n except Exception:\n log.warning('Error occurred during updating single bug', exc_info=True)\n", "path": "bodhi/server/consumers/updates.py"}]}
| 3,008 | 277 |
gh_patches_debug_21500
|
rasdani/github-patches
|
git_diff
|
saleor__saleor-11924
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Giftcard created event is not triggered when gift card is bought
### What are you trying to achieve?
I'm trying to inform the external system about the creation of the gift card.
### Steps to reproduce the problem
1. Create a Product type of Gift card
2. Create gift card -> Product
3. Create webhook which triggers on gift card creation
4. Create draft order and add newly gift card
5. Finalize draft order
### What did you expect to happen?
Saleor should properly send a webhook after the gift card is created.
### Logs
_No response_
### Environment
Saleor version: 3.10
</issue>
<code>
[start of saleor/giftcard/utils.py]
1 from collections import defaultdict
2 from datetime import date
3 from typing import TYPE_CHECKING, DefaultDict, Iterable, List, Optional
4 from uuid import UUID
5
6 from dateutil.relativedelta import relativedelta
7 from django.db import transaction
8 from django.db.models.expressions import Exists, OuterRef
9 from django.utils import timezone
10
11 from ..checkout.models import Checkout
12 from ..core.exceptions import GiftCardNotApplicable
13 from ..core.tracing import traced_atomic_transaction
14 from ..core.utils.promo_code import InvalidPromoCode, generate_promo_code
15 from ..order.actions import OrderFulfillmentLineInfo, create_fulfillments
16 from ..order.models import OrderLine
17 from ..site import GiftCardSettingsExpiryType
18 from . import GiftCardEvents, GiftCardLineData, events
19 from .models import GiftCard, GiftCardEvent
20 from .notifications import send_gift_card_notification
21
22 if TYPE_CHECKING:
23 from django.db.models import QuerySet
24
25 from ..account.models import User
26 from ..app.models import App
27 from ..order.models import Order
28 from ..plugins.manager import PluginsManager
29 from ..site.models import SiteSettings
30
31
32 def add_gift_card_code_to_checkout(
33 checkout: Checkout, email: str, promo_code: str, currency: str
34 ):
35 """Add gift card data to checkout by code.
36
37 Raise ValidationError if email is not provided.
38 Raise InvalidPromoCode if gift card cannot be applied.
39 """
40 from ..checkout.checkout_cleaner import validate_checkout_email
41
42 validate_checkout_email(checkout)
43
44 try:
45 # only active gift card with currency the same as channel currency can be used
46 gift_card = (
47 GiftCard.objects.active(date=date.today())
48 .filter(currency=currency)
49 .get(code=promo_code)
50 )
51 except GiftCard.DoesNotExist:
52 raise InvalidPromoCode()
53
54 used_by_email = gift_card.used_by_email
55 # gift card can be used only by one user
56 if used_by_email and used_by_email != email:
57 raise InvalidPromoCode()
58
59 checkout.gift_cards.add(gift_card)
60 checkout.save(update_fields=["last_change"])
61
62
63 def remove_gift_card_code_from_checkout(checkout: Checkout, gift_card_code: str):
64 """Remove gift card data from checkout by code.
65
66 Return information whether promo code was removed.
67 """
68 gift_card = checkout.gift_cards.filter(code=gift_card_code).first()
69 if gift_card:
70 checkout.gift_cards.remove(gift_card)
71 checkout.save(update_fields=["last_change"])
72 return True
73 return False
74
75
76 def deactivate_gift_card(gift_card: GiftCard):
77 """Set gift card status as inactive."""
78 if gift_card.is_active:
79 gift_card.is_active = False
80 gift_card.save(update_fields=["is_active"])
81
82
83 def activate_gift_card(gift_card: GiftCard):
84 """Set gift card status as active."""
85 if not gift_card.is_active:
86 gift_card.is_active = True
87 gift_card.save(update_fields=["is_active"])
88
89
90 def fulfill_non_shippable_gift_cards(
91 order: "Order",
92 order_lines: Iterable[OrderLine],
93 settings: "SiteSettings",
94 requestor_user: Optional["User"],
95 app: Optional["App"],
96 manager: "PluginsManager",
97 ):
98 gift_card_lines = get_non_shippable_gift_card_lines(order_lines)
99 if not gift_card_lines:
100 return
101 fulfill_gift_card_lines(
102 gift_card_lines, requestor_user, app, order, settings, manager
103 )
104
105
106 def get_non_shippable_gift_card_lines(lines: Iterable[OrderLine]) -> "QuerySet":
107 gift_card_lines = get_gift_card_lines(lines)
108 non_shippable_lines = OrderLine.objects.filter(
109 id__in=[line.pk for line in gift_card_lines], is_shipping_required=False
110 )
111 return non_shippable_lines
112
113
114 def get_gift_card_lines(lines: Iterable[OrderLine]):
115 gift_card_lines = [line for line in lines if line.is_gift_card]
116 return gift_card_lines
117
118
119 def fulfill_gift_card_lines(
120 gift_card_lines: "QuerySet",
121 requestor_user: Optional["User"],
122 app: Optional["App"],
123 order: "Order",
124 settings: "SiteSettings",
125 manager: "PluginsManager",
126 ):
127 lines_for_warehouses: DefaultDict[
128 UUID, List[OrderFulfillmentLineInfo]
129 ] = defaultdict(list)
130 channel_slug = order.channel.slug
131 for line in gift_card_lines.prefetch_related(
132 "allocations__stock", "variant__stocks"
133 ):
134 if allocations := line.allocations.all():
135 for allocation in allocations:
136 quantity = allocation.quantity_allocated
137 if quantity > 0:
138 warehouse_pk = allocation.stock.warehouse_id
139 lines_for_warehouses[warehouse_pk].append(
140 {"order_line": line, "quantity": quantity}
141 )
142 else:
143 stock = line.variant.stocks.for_channel_and_country(channel_slug).first()
144 if not stock:
145 raise GiftCardNotApplicable(
146 message="Lack of gift card stock for checkout channel.",
147 )
148 warehouse_pk = stock.warehouse_id
149 lines_for_warehouses[warehouse_pk].append(
150 {"order_line": line, "quantity": line.quantity}
151 )
152
153 return create_fulfillments(
154 requestor_user,
155 app,
156 order,
157 dict(lines_for_warehouses),
158 manager,
159 settings,
160 notify_customer=True,
161 )
162
163
164 @traced_atomic_transaction()
165 def gift_cards_create(
166 order: "Order",
167 gift_card_lines_info: Iterable["GiftCardLineData"],
168 settings: "SiteSettings",
169 requestor_user: Optional["User"],
170 app: Optional["App"],
171 manager: "PluginsManager",
172 ):
173 """Create purchased gift cards."""
174 customer_user = order.user
175 user_email = order.user_email
176 gift_cards = []
177 non_shippable_gift_cards = []
178 expiry_date = calculate_expiry_date(settings)
179 for line_data in gift_card_lines_info:
180 order_line = line_data.order_line
181 price = order_line.unit_price_gross
182 line_gift_cards = [
183 GiftCard( # type: ignore[misc] # see below:
184 code=generate_promo_code(),
185 initial_balance=price, # money field not supported by mypy_django_plugin # noqa: E501
186 current_balance=price, # money field not supported by mypy_django_plugin # noqa: E501
187 created_by=customer_user,
188 created_by_email=user_email,
189 product=line_data.variant.product if line_data.variant else None,
190 fulfillment_line=line_data.fulfillment_line,
191 expiry_date=expiry_date,
192 )
193 for _ in range(line_data.quantity)
194 ]
195 gift_cards.extend(line_gift_cards)
196 if not order_line.is_shipping_required:
197 non_shippable_gift_cards.extend(line_gift_cards)
198
199 gift_cards = GiftCard.objects.bulk_create(gift_cards)
200 events.gift_cards_bought_event(gift_cards, order, requestor_user, app)
201
202 channel_slug = order.channel.slug
203 # send to customer all non-shippable gift cards
204 transaction.on_commit(
205 lambda: send_gift_cards_to_customer(
206 non_shippable_gift_cards,
207 user_email,
208 requestor_user,
209 app,
210 customer_user,
211 manager,
212 channel_slug,
213 )
214 )
215 return gift_cards
216
217
218 def calculate_expiry_date(settings):
219 """Calculate expiry date based on gift card settings."""
220 today = timezone.now().date()
221 expiry_date = None
222 if settings.gift_card_expiry_type == GiftCardSettingsExpiryType.EXPIRY_PERIOD:
223 expiry_period_type = settings.gift_card_expiry_period_type
224 time_delta = {f"{expiry_period_type}s": settings.gift_card_expiry_period}
225 expiry_date = today + relativedelta(**time_delta)
226 return expiry_date
227
228
229 def send_gift_cards_to_customer(
230 gift_cards: Iterable[GiftCard],
231 user_email: str,
232 requestor_user: Optional["User"],
233 app: Optional["App"],
234 customer_user: Optional["User"],
235 manager: "PluginsManager",
236 channel_slug: str,
237 ):
238 for gift_card in gift_cards:
239 send_gift_card_notification(
240 requestor_user,
241 app,
242 customer_user,
243 user_email,
244 gift_card,
245 manager,
246 channel_slug,
247 resending=False,
248 )
249
250
251 def deactivate_order_gift_cards(
252 order_id: UUID, user: Optional["User"], app: Optional["App"]
253 ):
254 gift_card_events = GiftCardEvent.objects.filter(
255 type=GiftCardEvents.BOUGHT, order_id=order_id
256 )
257 gift_cards = GiftCard.objects.filter(
258 Exists(gift_card_events.filter(gift_card_id=OuterRef("id")))
259 )
260 gift_cards.update(is_active=False)
261 events.gift_cards_deactivated_event(
262 gift_cards.values_list("id", flat=True), user, app
263 )
264
265
266 def order_has_gift_card_lines(order):
267 return any(order.lines.filter(is_gift_card=True))
268
269
270 def assign_user_gift_cards(user):
271 GiftCard.objects.filter(used_by_email=user.email).update(used_by=user)
272 GiftCard.objects.filter(created_by_email=user.email).update(created_by=user)
273
274
275 def is_gift_card_expired(gift_card: GiftCard):
276 """Return True when gift card expiry date pass."""
277 today = timezone.now().date()
278 return bool(gift_card.expiry_date) and gift_card.expiry_date < today # type: ignore
279
[end of saleor/giftcard/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/saleor/giftcard/utils.py b/saleor/giftcard/utils.py
--- a/saleor/giftcard/utils.py
+++ b/saleor/giftcard/utils.py
@@ -11,6 +11,7 @@
from ..checkout.models import Checkout
from ..core.exceptions import GiftCardNotApplicable
from ..core.tracing import traced_atomic_transaction
+from ..core.utils.events import call_event
from ..core.utils.promo_code import InvalidPromoCode, generate_promo_code
from ..order.actions import OrderFulfillmentLineInfo, create_fulfillments
from ..order.models import OrderLine
@@ -199,6 +200,9 @@
gift_cards = GiftCard.objects.bulk_create(gift_cards)
events.gift_cards_bought_event(gift_cards, order, requestor_user, app)
+ for gift_card in gift_cards:
+ call_event(manager.gift_card_created, gift_card)
+
channel_slug = order.channel.slug
# send to customer all non-shippable gift cards
transaction.on_commit(
|
{"golden_diff": "diff --git a/saleor/giftcard/utils.py b/saleor/giftcard/utils.py\n--- a/saleor/giftcard/utils.py\n+++ b/saleor/giftcard/utils.py\n@@ -11,6 +11,7 @@\n from ..checkout.models import Checkout\n from ..core.exceptions import GiftCardNotApplicable\n from ..core.tracing import traced_atomic_transaction\n+from ..core.utils.events import call_event\n from ..core.utils.promo_code import InvalidPromoCode, generate_promo_code\n from ..order.actions import OrderFulfillmentLineInfo, create_fulfillments\n from ..order.models import OrderLine\n@@ -199,6 +200,9 @@\n gift_cards = GiftCard.objects.bulk_create(gift_cards)\n events.gift_cards_bought_event(gift_cards, order, requestor_user, app)\n \n+ for gift_card in gift_cards:\n+ call_event(manager.gift_card_created, gift_card)\n+\n channel_slug = order.channel.slug\n # send to customer all non-shippable gift cards\n transaction.on_commit(\n", "issue": "Bug: Giftcard created event is not triggered when gift card is bought \n### What are you trying to achieve?\n\nI'm trying to inform the external system about the creation of the gift card. \n\n### Steps to reproduce the problem\n\n1. Create a Product type of Gift card\r\n2. Create gift card -> Product\r\n3. Create webhook which triggers on gift card creation \r\n4. Create draft order and add newly gift card \r\n5. Finalize draft order \n\n### What did you expect to happen?\n\nSaleor should properly send a webhook after the gift card is created.\n\n### Logs\n\n_No response_\n\n### Environment\n\nSaleor version: 3.10\r\n\n", "before_files": [{"content": "from collections import defaultdict\nfrom datetime import date\nfrom typing import TYPE_CHECKING, DefaultDict, Iterable, List, Optional\nfrom uuid import UUID\n\nfrom dateutil.relativedelta import relativedelta\nfrom django.db import transaction\nfrom django.db.models.expressions import Exists, OuterRef\nfrom django.utils import timezone\n\nfrom ..checkout.models import Checkout\nfrom ..core.exceptions import GiftCardNotApplicable\nfrom ..core.tracing import traced_atomic_transaction\nfrom ..core.utils.promo_code import InvalidPromoCode, generate_promo_code\nfrom ..order.actions import OrderFulfillmentLineInfo, create_fulfillments\nfrom ..order.models import OrderLine\nfrom ..site import GiftCardSettingsExpiryType\nfrom . import GiftCardEvents, GiftCardLineData, events\nfrom .models import GiftCard, GiftCardEvent\nfrom .notifications import send_gift_card_notification\n\nif TYPE_CHECKING:\n from django.db.models import QuerySet\n\n from ..account.models import User\n from ..app.models import App\n from ..order.models import Order\n from ..plugins.manager import PluginsManager\n from ..site.models import SiteSettings\n\n\ndef add_gift_card_code_to_checkout(\n checkout: Checkout, email: str, promo_code: str, currency: str\n):\n \"\"\"Add gift card data to checkout by code.\n\n Raise ValidationError if email is not provided.\n Raise InvalidPromoCode if gift card cannot be applied.\n \"\"\"\n from ..checkout.checkout_cleaner import validate_checkout_email\n\n validate_checkout_email(checkout)\n\n try:\n # only active gift card with currency the same as channel currency can be used\n gift_card = (\n GiftCard.objects.active(date=date.today())\n .filter(currency=currency)\n .get(code=promo_code)\n )\n except GiftCard.DoesNotExist:\n raise InvalidPromoCode()\n\n used_by_email = gift_card.used_by_email\n # gift card can be used only by one user\n if used_by_email and used_by_email != email:\n raise InvalidPromoCode()\n\n checkout.gift_cards.add(gift_card)\n checkout.save(update_fields=[\"last_change\"])\n\n\ndef remove_gift_card_code_from_checkout(checkout: Checkout, gift_card_code: str):\n \"\"\"Remove gift card data from checkout by code.\n\n Return information whether promo code was removed.\n \"\"\"\n gift_card = checkout.gift_cards.filter(code=gift_card_code).first()\n if gift_card:\n checkout.gift_cards.remove(gift_card)\n checkout.save(update_fields=[\"last_change\"])\n return True\n return False\n\n\ndef deactivate_gift_card(gift_card: GiftCard):\n \"\"\"Set gift card status as inactive.\"\"\"\n if gift_card.is_active:\n gift_card.is_active = False\n gift_card.save(update_fields=[\"is_active\"])\n\n\ndef activate_gift_card(gift_card: GiftCard):\n \"\"\"Set gift card status as active.\"\"\"\n if not gift_card.is_active:\n gift_card.is_active = True\n gift_card.save(update_fields=[\"is_active\"])\n\n\ndef fulfill_non_shippable_gift_cards(\n order: \"Order\",\n order_lines: Iterable[OrderLine],\n settings: \"SiteSettings\",\n requestor_user: Optional[\"User\"],\n app: Optional[\"App\"],\n manager: \"PluginsManager\",\n):\n gift_card_lines = get_non_shippable_gift_card_lines(order_lines)\n if not gift_card_lines:\n return\n fulfill_gift_card_lines(\n gift_card_lines, requestor_user, app, order, settings, manager\n )\n\n\ndef get_non_shippable_gift_card_lines(lines: Iterable[OrderLine]) -> \"QuerySet\":\n gift_card_lines = get_gift_card_lines(lines)\n non_shippable_lines = OrderLine.objects.filter(\n id__in=[line.pk for line in gift_card_lines], is_shipping_required=False\n )\n return non_shippable_lines\n\n\ndef get_gift_card_lines(lines: Iterable[OrderLine]):\n gift_card_lines = [line for line in lines if line.is_gift_card]\n return gift_card_lines\n\n\ndef fulfill_gift_card_lines(\n gift_card_lines: \"QuerySet\",\n requestor_user: Optional[\"User\"],\n app: Optional[\"App\"],\n order: \"Order\",\n settings: \"SiteSettings\",\n manager: \"PluginsManager\",\n):\n lines_for_warehouses: DefaultDict[\n UUID, List[OrderFulfillmentLineInfo]\n ] = defaultdict(list)\n channel_slug = order.channel.slug\n for line in gift_card_lines.prefetch_related(\n \"allocations__stock\", \"variant__stocks\"\n ):\n if allocations := line.allocations.all():\n for allocation in allocations:\n quantity = allocation.quantity_allocated\n if quantity > 0:\n warehouse_pk = allocation.stock.warehouse_id\n lines_for_warehouses[warehouse_pk].append(\n {\"order_line\": line, \"quantity\": quantity}\n )\n else:\n stock = line.variant.stocks.for_channel_and_country(channel_slug).first()\n if not stock:\n raise GiftCardNotApplicable(\n message=\"Lack of gift card stock for checkout channel.\",\n )\n warehouse_pk = stock.warehouse_id\n lines_for_warehouses[warehouse_pk].append(\n {\"order_line\": line, \"quantity\": line.quantity}\n )\n\n return create_fulfillments(\n requestor_user,\n app,\n order,\n dict(lines_for_warehouses),\n manager,\n settings,\n notify_customer=True,\n )\n\n\n@traced_atomic_transaction()\ndef gift_cards_create(\n order: \"Order\",\n gift_card_lines_info: Iterable[\"GiftCardLineData\"],\n settings: \"SiteSettings\",\n requestor_user: Optional[\"User\"],\n app: Optional[\"App\"],\n manager: \"PluginsManager\",\n):\n \"\"\"Create purchased gift cards.\"\"\"\n customer_user = order.user\n user_email = order.user_email\n gift_cards = []\n non_shippable_gift_cards = []\n expiry_date = calculate_expiry_date(settings)\n for line_data in gift_card_lines_info:\n order_line = line_data.order_line\n price = order_line.unit_price_gross\n line_gift_cards = [\n GiftCard( # type: ignore[misc] # see below:\n code=generate_promo_code(),\n initial_balance=price, # money field not supported by mypy_django_plugin # noqa: E501\n current_balance=price, # money field not supported by mypy_django_plugin # noqa: E501\n created_by=customer_user,\n created_by_email=user_email,\n product=line_data.variant.product if line_data.variant else None,\n fulfillment_line=line_data.fulfillment_line,\n expiry_date=expiry_date,\n )\n for _ in range(line_data.quantity)\n ]\n gift_cards.extend(line_gift_cards)\n if not order_line.is_shipping_required:\n non_shippable_gift_cards.extend(line_gift_cards)\n\n gift_cards = GiftCard.objects.bulk_create(gift_cards)\n events.gift_cards_bought_event(gift_cards, order, requestor_user, app)\n\n channel_slug = order.channel.slug\n # send to customer all non-shippable gift cards\n transaction.on_commit(\n lambda: send_gift_cards_to_customer(\n non_shippable_gift_cards,\n user_email,\n requestor_user,\n app,\n customer_user,\n manager,\n channel_slug,\n )\n )\n return gift_cards\n\n\ndef calculate_expiry_date(settings):\n \"\"\"Calculate expiry date based on gift card settings.\"\"\"\n today = timezone.now().date()\n expiry_date = None\n if settings.gift_card_expiry_type == GiftCardSettingsExpiryType.EXPIRY_PERIOD:\n expiry_period_type = settings.gift_card_expiry_period_type\n time_delta = {f\"{expiry_period_type}s\": settings.gift_card_expiry_period}\n expiry_date = today + relativedelta(**time_delta)\n return expiry_date\n\n\ndef send_gift_cards_to_customer(\n gift_cards: Iterable[GiftCard],\n user_email: str,\n requestor_user: Optional[\"User\"],\n app: Optional[\"App\"],\n customer_user: Optional[\"User\"],\n manager: \"PluginsManager\",\n channel_slug: str,\n):\n for gift_card in gift_cards:\n send_gift_card_notification(\n requestor_user,\n app,\n customer_user,\n user_email,\n gift_card,\n manager,\n channel_slug,\n resending=False,\n )\n\n\ndef deactivate_order_gift_cards(\n order_id: UUID, user: Optional[\"User\"], app: Optional[\"App\"]\n):\n gift_card_events = GiftCardEvent.objects.filter(\n type=GiftCardEvents.BOUGHT, order_id=order_id\n )\n gift_cards = GiftCard.objects.filter(\n Exists(gift_card_events.filter(gift_card_id=OuterRef(\"id\")))\n )\n gift_cards.update(is_active=False)\n events.gift_cards_deactivated_event(\n gift_cards.values_list(\"id\", flat=True), user, app\n )\n\n\ndef order_has_gift_card_lines(order):\n return any(order.lines.filter(is_gift_card=True))\n\n\ndef assign_user_gift_cards(user):\n GiftCard.objects.filter(used_by_email=user.email).update(used_by=user)\n GiftCard.objects.filter(created_by_email=user.email).update(created_by=user)\n\n\ndef is_gift_card_expired(gift_card: GiftCard):\n \"\"\"Return True when gift card expiry date pass.\"\"\"\n today = timezone.now().date()\n return bool(gift_card.expiry_date) and gift_card.expiry_date < today # type: ignore\n", "path": "saleor/giftcard/utils.py"}]}
| 3,437 | 234 |
gh_patches_debug_31371
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-5177
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[pantsd] sending ctrl-\ in a `./pants repl` can lead to a hung pantsd-runner
repro:
```
[omerta pants-release (master)]$ ps -ef |grep pantsd-runner |grep -v grep
[omerta pants-release (master)]$ ./pants -q repl 3rdparty/python:psutil
Python 2.7.10 (default, Dec 16 2015, 14:09:45)
[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
(InteractiveConsole)
>>> ^\Quit: 3
[omerta pants-release (master)]$ ps -ef |grep pantsd-runner |grep -v grep
501 67669 1 0 10:37PM ?? 0:01.14 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67670 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67671 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67672 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67673 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67674 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67675 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67676 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
501 67677 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil]
[omerta pants-release (master)]$
```
we'll want to better handle `SIGQUIT` in the thin client side of the runner to avoid this.
</issue>
<code>
[start of src/python/pants/bin/remote_pants_runner.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
6 unicode_literals, with_statement)
7
8 import logging
9 import signal
10 import sys
11 from contextlib import contextmanager
12
13 from pants.java.nailgun_client import NailgunClient
14 from pants.java.nailgun_protocol import NailgunProtocol
15 from pants.pantsd.pants_daemon import PantsDaemon
16 from pants.util.collections import combined_dict
17 from pants.util.memo import memoized_property
18
19
20 logger = logging.getLogger(__name__)
21
22
23 class RemotePantsRunner(object):
24 """A thin client variant of PantsRunner."""
25
26 class Fallback(Exception):
27 """Raised when fallback to an alternate execution mode is requested."""
28
29 class PortNotFound(Exception):
30 """Raised when the pailgun port can't be found."""
31
32 PANTS_COMMAND = 'pants'
33 RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)
34
35 def __init__(self, exiter, args, env, bootstrap_options, stdin=None, stdout=None, stderr=None):
36 """
37 :param Exiter exiter: The Exiter instance to use for this run.
38 :param list args: The arguments (e.g. sys.argv) for this run.
39 :param dict env: The environment (e.g. os.environ) for this run.
40 :param Options bootstrap_options: The Options bag containing the bootstrap options.
41 :param file stdin: The stream representing stdin.
42 :param file stdout: The stream representing stdout.
43 :param file stderr: The stream representing stderr.
44 """
45 self._exiter = exiter
46 self._args = args
47 self._env = env
48 self._bootstrap_options = bootstrap_options
49 self._stdin = stdin or sys.stdin
50 self._stdout = stdout or sys.stdout
51 self._stderr = stderr or sys.stderr
52
53 @memoized_property
54 def pantsd(self):
55 return PantsDaemon.Factory.create(bootstrap_options=self._bootstrap_options)
56
57 @contextmanager
58 def _trapped_control_c(self, client):
59 """A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely."""
60 def handle_control_c(signum, frame):
61 client.send_control_c()
62
63 existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
64 signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.
65 try:
66 yield
67 finally:
68 signal.signal(signal.SIGINT, existing_sigint_handler)
69
70 def _setup_logging(self):
71 """Sets up basic stdio logging for the thin client."""
72 log_level = logging.getLevelName(self._bootstrap_options.for_global_scope().level.upper())
73
74 formatter = logging.Formatter('%(levelname)s] %(message)s')
75 handler = logging.StreamHandler(sys.stdout)
76 handler.setLevel(log_level)
77 handler.setFormatter(formatter)
78
79 root = logging.getLogger()
80 root.setLevel(log_level)
81 root.addHandler(handler)
82
83 def _connect_and_execute(self, port):
84 # Merge the nailgun TTY capability environment variables with the passed environment dict.
85 ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
86 modified_env = combined_dict(self._env, ng_env)
87
88 assert isinstance(port, int), 'port {} is not an integer!'.format(port)
89
90 # Instantiate a NailgunClient.
91 client = NailgunClient(port=port,
92 ins=self._stdin,
93 out=self._stdout,
94 err=self._stderr,
95 exit_on_broken_pipe=True)
96
97 with self._trapped_control_c(client):
98 # Execute the command on the pailgun.
99 result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
100
101 # Exit.
102 self._exiter.exit(result)
103
104 def run(self, args=None):
105 self._setup_logging()
106 port = self.pantsd.maybe_launch()
107
108 logger.debug('connecting to pailgun on port {}'.format(port))
109 try:
110 self._connect_and_execute(port)
111 except self.RECOVERABLE_EXCEPTIONS as e:
112 raise self.Fallback(e)
113
[end of src/python/pants/bin/remote_pants_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/bin/remote_pants_runner.py b/src/python/pants/bin/remote_pants_runner.py
--- a/src/python/pants/bin/remote_pants_runner.py
+++ b/src/python/pants/bin/remote_pants_runner.py
@@ -55,17 +55,25 @@
return PantsDaemon.Factory.create(bootstrap_options=self._bootstrap_options)
@contextmanager
- def _trapped_control_c(self, client):
- """A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely."""
+ def _trapped_signals(self, client):
+ """A contextmanager that overrides the SIGINT (control-c) and SIGQUIT (control-\) handlers
+ and handles them remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
- signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.
+ # N.B. SIGQUIT will abruptly kill the pantsd-runner, which will shut down the other end
+ # of the Pailgun connection - so we send a gentler SIGINT here instead.
+ existing_sigquit_handler = signal.signal(signal.SIGQUIT, handle_control_c)
+
+ # Retry interrupted system calls.
+ signal.siginterrupt(signal.SIGINT, False)
+ signal.siginterrupt(signal.SIGQUIT, False)
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
+ signal.signal(signal.SIGQUIT, existing_sigquit_handler)
def _setup_logging(self):
"""Sets up basic stdio logging for the thin client."""
@@ -94,7 +102,7 @@
err=self._stderr,
exit_on_broken_pipe=True)
- with self._trapped_control_c(client):
+ with self._trapped_signals(client):
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
|
{"golden_diff": "diff --git a/src/python/pants/bin/remote_pants_runner.py b/src/python/pants/bin/remote_pants_runner.py\n--- a/src/python/pants/bin/remote_pants_runner.py\n+++ b/src/python/pants/bin/remote_pants_runner.py\n@@ -55,17 +55,25 @@\n return PantsDaemon.Factory.create(bootstrap_options=self._bootstrap_options)\n \n @contextmanager\n- def _trapped_control_c(self, client):\n- \"\"\"A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely.\"\"\"\n+ def _trapped_signals(self, client):\n+ \"\"\"A contextmanager that overrides the SIGINT (control-c) and SIGQUIT (control-\\) handlers\n+ and handles them remotely.\"\"\"\n def handle_control_c(signum, frame):\n client.send_control_c()\n \n existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)\n- signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.\n+ # N.B. SIGQUIT will abruptly kill the pantsd-runner, which will shut down the other end\n+ # of the Pailgun connection - so we send a gentler SIGINT here instead.\n+ existing_sigquit_handler = signal.signal(signal.SIGQUIT, handle_control_c)\n+\n+ # Retry interrupted system calls.\n+ signal.siginterrupt(signal.SIGINT, False)\n+ signal.siginterrupt(signal.SIGQUIT, False)\n try:\n yield\n finally:\n signal.signal(signal.SIGINT, existing_sigint_handler)\n+ signal.signal(signal.SIGQUIT, existing_sigquit_handler)\n \n def _setup_logging(self):\n \"\"\"Sets up basic stdio logging for the thin client.\"\"\"\n@@ -94,7 +102,7 @@\n err=self._stderr,\n exit_on_broken_pipe=True)\n \n- with self._trapped_control_c(client):\n+ with self._trapped_signals(client):\n # Execute the command on the pailgun.\n result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)\n", "issue": "[pantsd] sending ctrl-\\ in a `./pants repl` can lead to a hung pantsd-runner\nrepro:\r\n\r\n```\r\n[omerta pants-release (master)]$ ps -ef |grep pantsd-runner |grep -v grep\r\n[omerta pants-release (master)]$ ./pants -q repl 3rdparty/python:psutil\r\n\r\nPython 2.7.10 (default, Dec 16 2015, 14:09:45) \r\n[GCC 4.2.1 Compatible Apple LLVM 7.0.2 (clang-700.1.81)] on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n(InteractiveConsole)\r\n>>> ^\\Quit: 3\r\n[omerta pants-release (master)]$ ps -ef |grep pantsd-runner |grep -v grep\r\n 501 67669 1 0 10:37PM ?? 0:01.14 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67670 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67671 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67672 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67673 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67674 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67675 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67676 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n 501 67677 67669 0 10:37PM ?? 0:00.00 pantsd-runner [./pants -q repl 3rdparty/python:psutil] \r\n[omerta pants-release (master)]$ \r\n```\r\n\r\nwe'll want to better handle `SIGQUIT` in the thin client side of the runner to avoid this.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport signal\nimport sys\nfrom contextlib import contextmanager\n\nfrom pants.java.nailgun_client import NailgunClient\nfrom pants.java.nailgun_protocol import NailgunProtocol\nfrom pants.pantsd.pants_daemon import PantsDaemon\nfrom pants.util.collections import combined_dict\nfrom pants.util.memo import memoized_property\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemotePantsRunner(object):\n \"\"\"A thin client variant of PantsRunner.\"\"\"\n\n class Fallback(Exception):\n \"\"\"Raised when fallback to an alternate execution mode is requested.\"\"\"\n\n class PortNotFound(Exception):\n \"\"\"Raised when the pailgun port can't be found.\"\"\"\n\n PANTS_COMMAND = 'pants'\n RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)\n\n def __init__(self, exiter, args, env, bootstrap_options, stdin=None, stdout=None, stderr=None):\n \"\"\"\n :param Exiter exiter: The Exiter instance to use for this run.\n :param list args: The arguments (e.g. sys.argv) for this run.\n :param dict env: The environment (e.g. os.environ) for this run.\n :param Options bootstrap_options: The Options bag containing the bootstrap options.\n :param file stdin: The stream representing stdin.\n :param file stdout: The stream representing stdout.\n :param file stderr: The stream representing stderr.\n \"\"\"\n self._exiter = exiter\n self._args = args\n self._env = env\n self._bootstrap_options = bootstrap_options\n self._stdin = stdin or sys.stdin\n self._stdout = stdout or sys.stdout\n self._stderr = stderr or sys.stderr\n\n @memoized_property\n def pantsd(self):\n return PantsDaemon.Factory.create(bootstrap_options=self._bootstrap_options)\n\n @contextmanager\n def _trapped_control_c(self, client):\n \"\"\"A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely.\"\"\"\n def handle_control_c(signum, frame):\n client.send_control_c()\n\n existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)\n signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.\n try:\n yield\n finally:\n signal.signal(signal.SIGINT, existing_sigint_handler)\n\n def _setup_logging(self):\n \"\"\"Sets up basic stdio logging for the thin client.\"\"\"\n log_level = logging.getLevelName(self._bootstrap_options.for_global_scope().level.upper())\n\n formatter = logging.Formatter('%(levelname)s] %(message)s')\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(log_level)\n handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(log_level)\n root.addHandler(handler)\n\n def _connect_and_execute(self, port):\n # Merge the nailgun TTY capability environment variables with the passed environment dict.\n ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)\n modified_env = combined_dict(self._env, ng_env)\n\n assert isinstance(port, int), 'port {} is not an integer!'.format(port)\n\n # Instantiate a NailgunClient.\n client = NailgunClient(port=port,\n ins=self._stdin,\n out=self._stdout,\n err=self._stderr,\n exit_on_broken_pipe=True)\n\n with self._trapped_control_c(client):\n # Execute the command on the pailgun.\n result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)\n\n # Exit.\n self._exiter.exit(result)\n\n def run(self, args=None):\n self._setup_logging()\n port = self.pantsd.maybe_launch()\n\n logger.debug('connecting to pailgun on port {}'.format(port))\n try:\n self._connect_and_execute(port)\n except self.RECOVERABLE_EXCEPTIONS as e:\n raise self.Fallback(e)\n", "path": "src/python/pants/bin/remote_pants_runner.py"}]}
| 2,420 | 443 |
gh_patches_debug_66030
|
rasdani/github-patches
|
git_diff
|
pypa__pip-2810
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip install --allow-all-external in requirements.txt file fails
**With pip 7**
requirements.txt file:
``` txt
--allow-all-external
mysql-connector-python
```
On comandline
``` bash
pip install -r requirements.txt
pip: error: no such option: --allow-all-external
```
**With pip 6.1.1**
Collecting mysql-connector-python (from -r requirements.txt (line 2))
Downloading http://cdn.mysql.com/Downloads/Connector-Python/mysql-connector-python-2.0.3.zip (275kB)
100% |████████████████████████████████| 278kB 3.0MB/s
Installing collected packages: mysql-connector-python
Running setup.py install for mysql-connector-python
Successfully installed mysql-connector-python-2.0.3
</issue>
<code>
[start of pip/req/req_file.py]
1 """
2 Requirements file parsing
3 """
4
5 from __future__ import absolute_import
6
7 import os
8 import re
9 import shlex
10 import optparse
11
12 from pip._vendor.six.moves.urllib import parse as urllib_parse
13 from pip._vendor.six.moves import filterfalse
14
15 import pip
16 from pip.download import get_file_content
17 from pip.req.req_install import InstallRequirement
18 from pip.exceptions import (RequirementsFileParseError)
19 from pip.utils import normalize_name
20 from pip import cmdoptions
21
22 __all__ = ['parse_requirements']
23
24 SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
25 COMMENT_RE = re.compile(r'(^|\s)+#.*$')
26
27 SUPPORTED_OPTIONS = [
28 cmdoptions.editable,
29 cmdoptions.requirements,
30 cmdoptions.no_index,
31 cmdoptions.index_url,
32 cmdoptions.find_links,
33 cmdoptions.extra_index_url,
34 cmdoptions.allow_external,
35 cmdoptions.no_allow_external,
36 cmdoptions.allow_unsafe,
37 cmdoptions.no_allow_unsafe,
38 cmdoptions.use_wheel,
39 cmdoptions.no_use_wheel,
40 cmdoptions.always_unzip,
41 cmdoptions.no_binary,
42 cmdoptions.only_binary,
43 ]
44
45 # options to be passed to requirements
46 SUPPORTED_OPTIONS_REQ = [
47 cmdoptions.install_options,
48 cmdoptions.global_options
49 ]
50
51 # the 'dest' string values
52 SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]
53
54
55 def parse_requirements(filename, finder=None, comes_from=None, options=None,
56 session=None, wheel_cache=None):
57 """
58 Parse a requirements file and yield InstallRequirement instances.
59
60 :param filename: Path or url of requirements file.
61 :param finder: Instance of pip.index.PackageFinder.
62 :param comes_from: Origin description of requirements.
63 :param options: Global options.
64 :param session: Instance of pip.download.PipSession.
65 :param wheel_cache: Instance of pip.wheel.WheelCache
66 """
67 if session is None:
68 raise TypeError(
69 "parse_requirements() missing 1 required keyword argument: "
70 "'session'"
71 )
72
73 _, content = get_file_content(
74 filename, comes_from=comes_from, session=session
75 )
76
77 lines = content.splitlines()
78 lines = ignore_comments(lines)
79 lines = join_lines(lines)
80 lines = skip_regex(lines, options)
81
82 for line_number, line in enumerate(lines, 1):
83 req_iter = process_line(line, filename, line_number, finder,
84 comes_from, options, session, wheel_cache)
85 for req in req_iter:
86 yield req
87
88
89 def process_line(line, filename, line_number, finder=None, comes_from=None,
90 options=None, session=None, wheel_cache=None):
91 """Process a single requirements line; This can result in creating/yielding
92 requirements, or updating the finder.
93
94 For lines that contain requirements, the only options that have an effect
95 are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
96 requirement. Other options from SUPPORTED_OPTIONS may be present, but are
97 ignored.
98
99 For lines that do not contain requirements, the only options that have an
100 effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
101 be present, but are ignored. These lines may contain multiple options
102 (although our docs imply only one is supported), and all our parsed and
103 affect the finder.
104
105 """
106
107 parser = build_parser()
108 defaults = parser.get_default_values()
109 defaults.index_url = None
110 if finder:
111 # `finder.format_control` will be updated during parsing
112 defaults.format_control = finder.format_control
113 opts, args = parser.parse_args(shlex.split(line), defaults)
114
115 # yield a line requirement
116 if args:
117 args_line = ' '.join(args)
118 comes_from = '-r %s (line %s)' % (filename, line_number)
119 isolated = options.isolated_mode if options else False
120 if options:
121 cmdoptions.check_install_build_global(options, opts)
122 # get the options that apply to requirements
123 req_options = {}
124 for dest in SUPPORTED_OPTIONS_REQ_DEST:
125 if dest in opts.__dict__ and opts.__dict__[dest]:
126 req_options[dest] = opts.__dict__[dest]
127 yield InstallRequirement.from_line(
128 args_line, comes_from, isolated=isolated, options=req_options,
129 wheel_cache=wheel_cache
130 )
131
132 # yield an editable requirement
133 elif opts.editables:
134 comes_from = '-r %s (line %s)' % (filename, line_number)
135 isolated = options.isolated_mode if options else False
136 default_vcs = options.default_vcs if options else None
137 yield InstallRequirement.from_editable(
138 opts.editables[0], comes_from=comes_from,
139 default_vcs=default_vcs, isolated=isolated,
140 wheel_cache=wheel_cache
141 )
142
143 # parse a nested requirements file
144 elif opts.requirements:
145 req_path = opts.requirements[0]
146 # original file is over http
147 if SCHEME_RE.search(filename):
148 # do a url join so relative paths work
149 req_path = urllib_parse.urljoin(filename, req_path)
150 # original file and nested file are paths
151 elif not SCHEME_RE.search(req_path):
152 # do a join so relative paths work
153 req_dir = os.path.dirname(filename)
154 req_path = os.path.join(os.path.dirname(filename), req_path)
155 # TODO: Why not use `comes_from='-r {} (line {})'` here as well?
156 parser = parse_requirements(
157 req_path, finder, comes_from, options, session,
158 wheel_cache=wheel_cache
159 )
160 for req in parser:
161 yield req
162
163 # set finder options
164 elif finder:
165 if opts.index_url:
166 finder.index_urls = [opts.index_url]
167 if opts.use_wheel is False:
168 finder.use_wheel = False
169 pip.index.fmt_ctl_no_use_wheel(finder.format_control)
170 if opts.no_index is True:
171 finder.index_urls = []
172 if opts.allow_all_external:
173 finder.allow_all_external = opts.allow_all_external
174 if opts.extra_index_urls:
175 finder.index_urls.extend(opts.extra_index_urls)
176 if opts.allow_external:
177 finder.allow_external |= set(
178 [normalize_name(v).lower() for v in opts.allow_external])
179 if opts.allow_unverified:
180 # Remove after 7.0
181 finder.allow_unverified |= set(
182 [normalize_name(v).lower() for v in opts.allow_unverified])
183 if opts.find_links:
184 # FIXME: it would be nice to keep track of the source
185 # of the find_links: support a find-links local path
186 # relative to a requirements file.
187 value = opts.find_links[0]
188 req_dir = os.path.dirname(os.path.abspath(filename))
189 relative_to_reqs_file = os.path.join(req_dir, value)
190 if os.path.exists(relative_to_reqs_file):
191 value = relative_to_reqs_file
192 finder.find_links.append(value)
193
194
195 def build_parser():
196 """
197 Return a parser for parsing requirement lines
198 """
199 parser = optparse.OptionParser(add_help_option=False)
200
201 option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
202 for option_factory in option_factories:
203 option = option_factory()
204 parser.add_option(option)
205
206 # By default optparse sys.exits on parsing errors. We want to wrap
207 # that in our own exception.
208 def parser_exit(self, msg):
209 raise RequirementsFileParseError(msg)
210 parser.exit = parser_exit
211
212 return parser
213
214
215 def join_lines(iterator):
216 """
217 Joins a line ending in '\' with the previous line.
218 """
219 lines = []
220 for line in iterator:
221 if not line.endswith('\\'):
222 if lines:
223 lines.append(line)
224 yield ''.join(lines)
225 lines = []
226 else:
227 yield line
228 else:
229 lines.append(line.strip('\\'))
230
231 # TODO: handle space after '\'.
232 # TODO: handle '\' on last line.
233
234
235 def ignore_comments(iterator):
236 """
237 Strips and filters empty or commented lines.
238 """
239 for line in iterator:
240 line = COMMENT_RE.sub('', line)
241 line = line.strip()
242 if line:
243 yield line
244
245
246 def skip_regex(lines, options):
247 """
248 Optionally exclude lines that match '--skip-requirements-regex'
249 """
250 skip_regex = options.skip_requirements_regex if options else None
251 if skip_regex:
252 lines = filterfalse(re.compile(skip_regex).search, lines)
253 return lines
254
[end of pip/req/req_file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pip/req/req_file.py b/pip/req/req_file.py
--- a/pip/req/req_file.py
+++ b/pip/req/req_file.py
@@ -32,6 +32,7 @@
cmdoptions.find_links,
cmdoptions.extra_index_url,
cmdoptions.allow_external,
+ cmdoptions.allow_all_external,
cmdoptions.no_allow_external,
cmdoptions.allow_unsafe,
cmdoptions.no_allow_unsafe,
|
{"golden_diff": "diff --git a/pip/req/req_file.py b/pip/req/req_file.py\n--- a/pip/req/req_file.py\n+++ b/pip/req/req_file.py\n@@ -32,6 +32,7 @@\n cmdoptions.find_links,\n cmdoptions.extra_index_url,\n cmdoptions.allow_external,\n+ cmdoptions.allow_all_external,\n cmdoptions.no_allow_external,\n cmdoptions.allow_unsafe,\n cmdoptions.no_allow_unsafe,\n", "issue": "pip install --allow-all-external in requirements.txt file fails\n**With pip 7**\nrequirements.txt file:\n\n``` txt\n--allow-all-external\nmysql-connector-python\n```\n\nOn comandline\n\n``` bash\npip install -r requirements.txt \npip: error: no such option: --allow-all-external\n```\n\n**With pip 6.1.1**\nCollecting mysql-connector-python (from -r requirements.txt (line 2))\n Downloading http://cdn.mysql.com/Downloads/Connector-Python/mysql-connector-python-2.0.3.zip (275kB)\n 100% |\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 278kB 3.0MB/s \nInstalling collected packages: mysql-connector-python\n Running setup.py install for mysql-connector-python\nSuccessfully installed mysql-connector-python-2.0.3\n\n", "before_files": [{"content": "\"\"\"\nRequirements file parsing\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport os\nimport re\nimport shlex\nimport optparse\n\nfrom pip._vendor.six.moves.urllib import parse as urllib_parse\nfrom pip._vendor.six.moves import filterfalse\n\nimport pip\nfrom pip.download import get_file_content\nfrom pip.req.req_install import InstallRequirement\nfrom pip.exceptions import (RequirementsFileParseError)\nfrom pip.utils import normalize_name\nfrom pip import cmdoptions\n\n__all__ = ['parse_requirements']\n\nSCHEME_RE = re.compile(r'^(http|https|file):', re.I)\nCOMMENT_RE = re.compile(r'(^|\\s)+#.*$')\n\nSUPPORTED_OPTIONS = [\n cmdoptions.editable,\n cmdoptions.requirements,\n cmdoptions.no_index,\n cmdoptions.index_url,\n cmdoptions.find_links,\n cmdoptions.extra_index_url,\n cmdoptions.allow_external,\n cmdoptions.no_allow_external,\n cmdoptions.allow_unsafe,\n cmdoptions.no_allow_unsafe,\n cmdoptions.use_wheel,\n cmdoptions.no_use_wheel,\n cmdoptions.always_unzip,\n cmdoptions.no_binary,\n cmdoptions.only_binary,\n]\n\n# options to be passed to requirements\nSUPPORTED_OPTIONS_REQ = [\n cmdoptions.install_options,\n cmdoptions.global_options\n]\n\n# the 'dest' string values\nSUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ]\n\n\ndef parse_requirements(filename, finder=None, comes_from=None, options=None,\n session=None, wheel_cache=None):\n \"\"\"\n Parse a requirements file and yield InstallRequirement instances.\n\n :param filename: Path or url of requirements file.\n :param finder: Instance of pip.index.PackageFinder.\n :param comes_from: Origin description of requirements.\n :param options: Global options.\n :param session: Instance of pip.download.PipSession.\n :param wheel_cache: Instance of pip.wheel.WheelCache\n \"\"\"\n if session is None:\n raise TypeError(\n \"parse_requirements() missing 1 required keyword argument: \"\n \"'session'\"\n )\n\n _, content = get_file_content(\n filename, comes_from=comes_from, session=session\n )\n\n lines = content.splitlines()\n lines = ignore_comments(lines)\n lines = join_lines(lines)\n lines = skip_regex(lines, options)\n\n for line_number, line in enumerate(lines, 1):\n req_iter = process_line(line, filename, line_number, finder,\n comes_from, options, session, wheel_cache)\n for req in req_iter:\n yield req\n\n\ndef process_line(line, filename, line_number, finder=None, comes_from=None,\n options=None, session=None, wheel_cache=None):\n \"\"\"Process a single requirements line; This can result in creating/yielding\n requirements, or updating the finder.\n\n For lines that contain requirements, the only options that have an effect\n are from SUPPORTED_OPTIONS_REQ, and they are scoped to the\n requirement. Other options from SUPPORTED_OPTIONS may be present, but are\n ignored.\n\n For lines that do not contain requirements, the only options that have an\n effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may\n be present, but are ignored. These lines may contain multiple options\n (although our docs imply only one is supported), and all our parsed and\n affect the finder.\n\n \"\"\"\n\n parser = build_parser()\n defaults = parser.get_default_values()\n defaults.index_url = None\n if finder:\n # `finder.format_control` will be updated during parsing\n defaults.format_control = finder.format_control\n opts, args = parser.parse_args(shlex.split(line), defaults)\n\n # yield a line requirement\n if args:\n args_line = ' '.join(args)\n comes_from = '-r %s (line %s)' % (filename, line_number)\n isolated = options.isolated_mode if options else False\n if options:\n cmdoptions.check_install_build_global(options, opts)\n # get the options that apply to requirements\n req_options = {}\n for dest in SUPPORTED_OPTIONS_REQ_DEST:\n if dest in opts.__dict__ and opts.__dict__[dest]:\n req_options[dest] = opts.__dict__[dest]\n yield InstallRequirement.from_line(\n args_line, comes_from, isolated=isolated, options=req_options,\n wheel_cache=wheel_cache\n )\n\n # yield an editable requirement\n elif opts.editables:\n comes_from = '-r %s (line %s)' % (filename, line_number)\n isolated = options.isolated_mode if options else False\n default_vcs = options.default_vcs if options else None\n yield InstallRequirement.from_editable(\n opts.editables[0], comes_from=comes_from,\n default_vcs=default_vcs, isolated=isolated,\n wheel_cache=wheel_cache\n )\n\n # parse a nested requirements file\n elif opts.requirements:\n req_path = opts.requirements[0]\n # original file is over http\n if SCHEME_RE.search(filename):\n # do a url join so relative paths work\n req_path = urllib_parse.urljoin(filename, req_path)\n # original file and nested file are paths\n elif not SCHEME_RE.search(req_path):\n # do a join so relative paths work\n req_dir = os.path.dirname(filename)\n req_path = os.path.join(os.path.dirname(filename), req_path)\n # TODO: Why not use `comes_from='-r {} (line {})'` here as well?\n parser = parse_requirements(\n req_path, finder, comes_from, options, session,\n wheel_cache=wheel_cache\n )\n for req in parser:\n yield req\n\n # set finder options\n elif finder:\n if opts.index_url:\n finder.index_urls = [opts.index_url]\n if opts.use_wheel is False:\n finder.use_wheel = False\n pip.index.fmt_ctl_no_use_wheel(finder.format_control)\n if opts.no_index is True:\n finder.index_urls = []\n if opts.allow_all_external:\n finder.allow_all_external = opts.allow_all_external\n if opts.extra_index_urls:\n finder.index_urls.extend(opts.extra_index_urls)\n if opts.allow_external:\n finder.allow_external |= set(\n [normalize_name(v).lower() for v in opts.allow_external])\n if opts.allow_unverified:\n # Remove after 7.0\n finder.allow_unverified |= set(\n [normalize_name(v).lower() for v in opts.allow_unverified])\n if opts.find_links:\n # FIXME: it would be nice to keep track of the source\n # of the find_links: support a find-links local path\n # relative to a requirements file.\n value = opts.find_links[0]\n req_dir = os.path.dirname(os.path.abspath(filename))\n relative_to_reqs_file = os.path.join(req_dir, value)\n if os.path.exists(relative_to_reqs_file):\n value = relative_to_reqs_file\n finder.find_links.append(value)\n\n\ndef build_parser():\n \"\"\"\n Return a parser for parsing requirement lines\n \"\"\"\n parser = optparse.OptionParser(add_help_option=False)\n\n option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ\n for option_factory in option_factories:\n option = option_factory()\n parser.add_option(option)\n\n # By default optparse sys.exits on parsing errors. We want to wrap\n # that in our own exception.\n def parser_exit(self, msg):\n raise RequirementsFileParseError(msg)\n parser.exit = parser_exit\n\n return parser\n\n\ndef join_lines(iterator):\n \"\"\"\n Joins a line ending in '\\' with the previous line.\n \"\"\"\n lines = []\n for line in iterator:\n if not line.endswith('\\\\'):\n if lines:\n lines.append(line)\n yield ''.join(lines)\n lines = []\n else:\n yield line\n else:\n lines.append(line.strip('\\\\'))\n\n # TODO: handle space after '\\'.\n # TODO: handle '\\' on last line.\n\n\ndef ignore_comments(iterator):\n \"\"\"\n Strips and filters empty or commented lines.\n \"\"\"\n for line in iterator:\n line = COMMENT_RE.sub('', line)\n line = line.strip()\n if line:\n yield line\n\n\ndef skip_regex(lines, options):\n \"\"\"\n Optionally exclude lines that match '--skip-requirements-regex'\n \"\"\"\n skip_regex = options.skip_requirements_regex if options else None\n if skip_regex:\n lines = filterfalse(re.compile(skip_regex).search, lines)\n return lines\n", "path": "pip/req/req_file.py"}]}
| 3,223 | 105 |
gh_patches_debug_6675
|
rasdani/github-patches
|
git_diff
|
fal-ai__dbt-fal-197
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Too many messages received before initialization
> mmeasic: Hey, I get this log message on dbt version 0.21.0:
```Logged from file /Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/parser/manifest.py, line 792
Traceback (most recent call last):
File "/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/logbook/handlers.py", line 216, in handle
self.emit(record)
File "/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/logger.py", line 478, in emit
assert len(self._msg_buffer) < self._bufmax, \
AssertionError: too many messages received before initilization!
```
*****
> jstrom40: did your job run after it gave you this error message? i have had this problem when i have had too many threads set up in dbt. i also had it when i tried to run the fal tool but my actual job still ran after it popped out this message
*****
> mmeasic: It did run.
> I actually have 4 threads set for the target
[Thread link](https://discord.com/channels/908693336280432750/908693336280432755/930791100803850283)
</issue>
<code>
[start of src/fal/cli/cli.py]
1 from typing import List
2 import sys
3 from dbt.logger import log_manager, GLOBAL_LOGGER as logger
4 from fal.cli.flow_runner import fal_flow_run
5 from faldbt.lib import DBT_VCURRENT, DBT_V1
6 from .args import parse_args
7 from .fal_runner import fal_run
8 from fal.telemetry import telemetry
9
10
11 @telemetry.log_call("cli")
12 def cli(argv: List[str] = sys.argv):
13 parsed = parse_args(argv[1:])
14
15 # TODO: remove `action="extend"` to match exactly what dbt does
16 selects_count = (
17 argv.count("-s")
18 + argv.count("--select")
19 + argv.count("-m")
20 + argv.count("--model")
21 )
22 exclude_count = argv.count("--exclude")
23 script_count = argv.count("--script")
24
25 if parsed.disable_logging:
26 logger.disable()
27 # Re-enable logging for 1.0.0 through old API of logger
28 elif DBT_VCURRENT.compare(DBT_V1) >= 0:
29 if logger.disabled:
30 logger.enable()
31
32 with log_manager.applicationbound():
33 if parsed.debug:
34 log_manager.set_debug()
35
36 if parsed.command == "flow":
37 if parsed.flow_command == "run":
38 fal_flow_run(parsed)
39
40 elif parsed.command == "run":
41 fal_run(
42 parsed,
43 selects_count=selects_count,
44 exclude_count=exclude_count,
45 script_count=script_count,
46 )
47
[end of src/fal/cli/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/fal/cli/cli.py b/src/fal/cli/cli.py
--- a/src/fal/cli/cli.py
+++ b/src/fal/cli/cli.py
@@ -20,6 +20,10 @@
exclude_count = argv.count("--exclude")
script_count = argv.count("--script")
+ # Disabling the dbt.logger.DelayedFileHandler manually
+ # since we do not use the new dbt logging system
+ # This fixes issue https://github.com/fal-ai/fal/issues/97
+ log_manager.set_path(None)
if parsed.disable_logging:
logger.disable()
# Re-enable logging for 1.0.0 through old API of logger
|
{"golden_diff": "diff --git a/src/fal/cli/cli.py b/src/fal/cli/cli.py\n--- a/src/fal/cli/cli.py\n+++ b/src/fal/cli/cli.py\n@@ -20,6 +20,10 @@\n exclude_count = argv.count(\"--exclude\")\n script_count = argv.count(\"--script\")\n \n+ # Disabling the dbt.logger.DelayedFileHandler manually\n+ # since we do not use the new dbt logging system\n+ # This fixes issue https://github.com/fal-ai/fal/issues/97\n+ log_manager.set_path(None)\n if parsed.disable_logging:\n logger.disable()\n # Re-enable logging for 1.0.0 through old API of logger\n", "issue": "[Bug] Too many messages received before initialization\n> mmeasic: Hey, I get this log message on dbt version 0.21.0:\r\n\r\n```Logged from file /Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/parser/manifest.py, line 792\r\nTraceback (most recent call last):\r\n File \"/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/logbook/handlers.py\", line 216, in handle\r\n self.emit(record)\r\n File \"/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/logger.py\", line 478, in emit\r\n assert len(self._msg_buffer) < self._bufmax, \\\r\nAssertionError: too many messages received before initilization!\r\n```\r\n\r\n*****\r\n\r\n> jstrom40: did your job run after it gave you this error message? i have had this problem when i have had too many threads set up in dbt. i also had it when i tried to run the fal tool but my actual job still ran after it popped out this message\r\n\r\n*****\r\n\r\n> mmeasic: It did run.\r\n> I actually have 4 threads set for the target\r\n\r\n[Thread link](https://discord.com/channels/908693336280432750/908693336280432755/930791100803850283)\n", "before_files": [{"content": "from typing import List\nimport sys\nfrom dbt.logger import log_manager, GLOBAL_LOGGER as logger\nfrom fal.cli.flow_runner import fal_flow_run\nfrom faldbt.lib import DBT_VCURRENT, DBT_V1\nfrom .args import parse_args\nfrom .fal_runner import fal_run\nfrom fal.telemetry import telemetry\n\n\[email protected]_call(\"cli\")\ndef cli(argv: List[str] = sys.argv):\n parsed = parse_args(argv[1:])\n\n # TODO: remove `action=\"extend\"` to match exactly what dbt does\n selects_count = (\n argv.count(\"-s\")\n + argv.count(\"--select\")\n + argv.count(\"-m\")\n + argv.count(\"--model\")\n )\n exclude_count = argv.count(\"--exclude\")\n script_count = argv.count(\"--script\")\n\n if parsed.disable_logging:\n logger.disable()\n # Re-enable logging for 1.0.0 through old API of logger\n elif DBT_VCURRENT.compare(DBT_V1) >= 0:\n if logger.disabled:\n logger.enable()\n\n with log_manager.applicationbound():\n if parsed.debug:\n log_manager.set_debug()\n\n if parsed.command == \"flow\":\n if parsed.flow_command == \"run\":\n fal_flow_run(parsed)\n\n elif parsed.command == \"run\":\n fal_run(\n parsed,\n selects_count=selects_count,\n exclude_count=exclude_count,\n script_count=script_count,\n )\n", "path": "src/fal/cli/cli.py"}]}
| 1,276 | 155 |
gh_patches_debug_23183
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-3067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'PathManagerBase' object has no attribute 'makedirs'
In attempting to create the tensorboard directory with PathManager we're calling a nonexistent function.
To repro:
```bash
$ python -m parlai.scripts.train_model -t personachat -m transformer/ranker -mf /tmp/model_tr6 --n-layers 1 --embedding-size 300 --ffn-size 600 --n-heads 4 --num-epochs 2 -veps 0.25 -bs 64 -lr 0.001 --dropout 0.1 --embedding-type fasttext_cc --candidates batch --tensorboard-log true
```
Exception hit:
```
File "/Users/spoff/ParlAI/parlai/core/logs.py", line 56, in __init__
PathManager.makedirs(tbpath)
AttributeError: 'PathManagerBase' object has no attribute 'makedirs'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7
8 import sys
9
10 from setuptools import setup, find_packages
11
12 VERSION = '0.9.1' # if you update, update parlai/__init__.py too!
13
14 if sys.version_info < (3, 6):
15 sys.exit('Sorry, Python >=3.6 is required for ParlAI.')
16
17 with open('README.md', encoding="utf8") as f:
18 # strip the header and badges etc
19 readme = f.read().split('--------------------')[-1]
20
21 with open('requirements.txt') as f:
22 reqs = []
23 for line in f:
24 line = line.strip()
25 reqs.append(line.split('==')[0])
26
27
28 if __name__ == '__main__':
29 setup(
30 name='parlai',
31 version=VERSION,
32 description='Unified platform for dialogue research.',
33 long_description=readme,
34 long_description_content_type='text/markdown',
35 url='http://parl.ai/',
36 python_requires='>=3.6',
37 packages=find_packages(
38 exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal*')
39 ),
40 install_requires=reqs,
41 include_package_data=True,
42 package_data={'': ['*.txt', '*.md']},
43 entry_points={
44 "flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"],
45 "console_scripts": ["parlai=parlai.__main__:main"],
46 },
47 classifiers=[
48 "Programming Language :: Python :: 3",
49 "License :: OSI Approved :: MIT License",
50 "Topic :: Scientific/Engineering :: Artificial Intelligence",
51 "Natural Language :: English",
52 ],
53 )
54
[end of setup.py]
[start of parlai/core/logs.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """
7 Log metrics to tensorboard.
8
9 This file provides interface to log any metrics in tensorboard, could be
10 extended to any other tool like visdom.
11
12 .. code-block: none
13
14 tensorboard --logdir <PARLAI_DATA/tensorboard> --port 8888.
15 """
16
17 import json
18 import numbers
19 from parlai.core.opt import Opt
20 from parlai.core.metrics import Metric
21 from parlai.utils.io import PathManager
22 import parlai.utils.logging as logging
23
24
25 class TensorboardLogger(object):
26 """
27 Log objects to tensorboard.
28 """
29
30 @staticmethod
31 def add_cmdline_args(argparser):
32 """
33 Add tensorboard CLI args.
34 """
35 logger = argparser.add_argument_group('Tensorboard Arguments')
36 logger.add_argument(
37 '-tblog',
38 '--tensorboard-log',
39 type='bool',
40 default=False,
41 help="Tensorboard logging of metrics, default is %(default)s",
42 hidden=False,
43 )
44
45 def __init__(self, opt: Opt):
46 try:
47 # tensorboard is a very expensive thing to import. Wait until the
48 # last second to import it.
49 from tensorboardX import SummaryWriter
50 except ImportError:
51 raise ImportError('Please run `pip install tensorboard tensorboardX`.')
52
53 tbpath = opt['model_file'] + '.tensorboard'
54 logging.debug(f'Saving tensorboard logs to: {tbpath}')
55 if not PathManager.exists(tbpath):
56 PathManager.makedirs(tbpath)
57 self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))
58
59 def log_metrics(self, setting, step, report):
60 """
61 Add all metrics from tensorboard_metrics opt key.
62
63 :param setting:
64 One of train/valid/test. Will be used as the title for the graph.
65 :param step:
66 Number of parleys
67 :param report:
68 The report to log
69 """
70 for k, v in report.items():
71 if isinstance(v, numbers.Number):
72 self.writer.add_scalar(f'{k}/{setting}', v, global_step=step)
73 elif isinstance(v, Metric):
74 self.writer.add_scalar(f'{k}/{setting}', v.value(), global_step=step)
75 else:
76 logging.error(f'k {k} v {v} is not a number')
77
78 def flush(self):
79 self.writer.flush()
80
[end of parlai/core/logs.py]
[start of parlai/__init__.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 __version__ = '0.9.1'
8
[end of parlai/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/parlai/__init__.py b/parlai/__init__.py
--- a/parlai/__init__.py
+++ b/parlai/__init__.py
@@ -4,4 +4,4 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-__version__ = '0.9.1'
+__version__ = '0.9.2'
diff --git a/parlai/core/logs.py b/parlai/core/logs.py
--- a/parlai/core/logs.py
+++ b/parlai/core/logs.py
@@ -53,7 +53,7 @@
tbpath = opt['model_file'] + '.tensorboard'
logging.debug(f'Saving tensorboard logs to: {tbpath}')
if not PathManager.exists(tbpath):
- PathManager.makedirs(tbpath)
+ PathManager.mkdirs(tbpath)
self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))
def log_metrics(self, setting, step, report):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
from setuptools import setup, find_packages
-VERSION = '0.9.1' # if you update, update parlai/__init__.py too!
+VERSION = '0.9.2' # if you update, update parlai/__init__.py too!
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >=3.6 is required for ParlAI.')
|
{"golden_diff": "diff --git a/parlai/__init__.py b/parlai/__init__.py\n--- a/parlai/__init__.py\n+++ b/parlai/__init__.py\n@@ -4,4 +4,4 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \n-__version__ = '0.9.1'\n+__version__ = '0.9.2'\ndiff --git a/parlai/core/logs.py b/parlai/core/logs.py\n--- a/parlai/core/logs.py\n+++ b/parlai/core/logs.py\n@@ -53,7 +53,7 @@\n tbpath = opt['model_file'] + '.tensorboard'\n logging.debug(f'Saving tensorboard logs to: {tbpath}')\n if not PathManager.exists(tbpath):\n- PathManager.makedirs(tbpath)\n+ PathManager.mkdirs(tbpath)\n self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))\n \n def log_metrics(self, setting, step, report):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n \n from setuptools import setup, find_packages\n \n-VERSION = '0.9.1' # if you update, update parlai/__init__.py too!\n+VERSION = '0.9.2' # if you update, update parlai/__init__.py too!\n \n if sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n", "issue": "'PathManagerBase' object has no attribute 'makedirs'\nIn attempting to create the tensorboard directory with PathManager we're calling a nonexistent function.\r\n\r\nTo repro:\r\n```bash\r\n$ python -m parlai.scripts.train_model -t personachat -m transformer/ranker -mf /tmp/model_tr6 --n-layers 1 --embedding-size 300 --ffn-size 600 --n-heads 4 --num-epochs 2 -veps 0.25 -bs 64 -lr 0.001 --dropout 0.1 --embedding-type fasttext_cc --candidates batch --tensorboard-log true\r\n```\r\n\r\nException hit:\r\n```\r\nFile \"/Users/spoff/ParlAI/parlai/core/logs.py\", line 56, in __init__\r\n PathManager.makedirs(tbpath)\r\nAttributeError: 'PathManagerBase' object has no attribute 'makedirs'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\nVERSION = '0.9.1' # if you update, update parlai/__init__.py too!\n\nif sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n # strip the header and badges etc\n readme = f.read().split('--------------------')[-1]\n\nwith open('requirements.txt') as f:\n reqs = []\n for line in f:\n line = line.strip()\n reqs.append(line.split('==')[0])\n\n\nif __name__ == '__main__':\n setup(\n name='parlai',\n version=VERSION,\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n url='http://parl.ai/',\n python_requires='>=3.6',\n packages=find_packages(\n exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal*')\n ),\n install_requires=reqs,\n include_package_data=True,\n package_data={'': ['*.txt', '*.md']},\n entry_points={\n \"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"],\n \"console_scripts\": [\"parlai=parlai.__main__:main\"],\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Natural Language :: English\",\n ],\n )\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nLog metrics to tensorboard.\n\nThis file provides interface to log any metrics in tensorboard, could be\nextended to any other tool like visdom.\n\n.. code-block: none\n\n tensorboard --logdir <PARLAI_DATA/tensorboard> --port 8888.\n\"\"\"\n\nimport json\nimport numbers\nfrom parlai.core.opt import Opt\nfrom parlai.core.metrics import Metric\nfrom parlai.utils.io import PathManager\nimport parlai.utils.logging as logging\n\n\nclass TensorboardLogger(object):\n \"\"\"\n Log objects to tensorboard.\n \"\"\"\n\n @staticmethod\n def add_cmdline_args(argparser):\n \"\"\"\n Add tensorboard CLI args.\n \"\"\"\n logger = argparser.add_argument_group('Tensorboard Arguments')\n logger.add_argument(\n '-tblog',\n '--tensorboard-log',\n type='bool',\n default=False,\n help=\"Tensorboard logging of metrics, default is %(default)s\",\n hidden=False,\n )\n\n def __init__(self, opt: Opt):\n try:\n # tensorboard is a very expensive thing to import. Wait until the\n # last second to import it.\n from tensorboardX import SummaryWriter\n except ImportError:\n raise ImportError('Please run `pip install tensorboard tensorboardX`.')\n\n tbpath = opt['model_file'] + '.tensorboard'\n logging.debug(f'Saving tensorboard logs to: {tbpath}')\n if not PathManager.exists(tbpath):\n PathManager.makedirs(tbpath)\n self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))\n\n def log_metrics(self, setting, step, report):\n \"\"\"\n Add all metrics from tensorboard_metrics opt key.\n\n :param setting:\n One of train/valid/test. Will be used as the title for the graph.\n :param step:\n Number of parleys\n :param report:\n The report to log\n \"\"\"\n for k, v in report.items():\n if isinstance(v, numbers.Number):\n self.writer.add_scalar(f'{k}/{setting}', v, global_step=step)\n elif isinstance(v, Metric):\n self.writer.add_scalar(f'{k}/{setting}', v.value(), global_step=step)\n else:\n logging.error(f'k {k} v {v} is not a number')\n\n def flush(self):\n self.writer.flush()\n", "path": "parlai/core/logs.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = '0.9.1'\n", "path": "parlai/__init__.py"}]}
| 2,033 | 356 |
gh_patches_debug_17281
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-3303
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't create package for windows with kivy 1.9 portable
I'm looking to port an existing kivy 1.8 project to kivy 1.9. I've just downloaded the portable version and have the application working.
However when packaging the app using pyinstaller and the instructions on http://kivy.org/docs/guide/packaging-windows.html the app packages, but on execution immediately fails with error:
```
Traceback (most recent call last):
File "<string>", line 34, in <module>
ImportError: No module named pygame.pkgdata
```
I've tried using my old .spec file and generating a new one with exactly the same results.
I'm a bit mystified where this is coming from as pygame isn't imported anywhere in my application and I thought it had been replaced with sdl2 in kivy 1.9. I'm also confused that the application works when run directly.
Anyone come across this issue or can point me in the right direction?
</issue>
<code>
[start of kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py]
1 from os.path import join, dirname
2 from os import environ, chdir, putenv
3 import sys
4
5 root = 'kivy_install'
6 if hasattr(sys, '_MEIPASS'):
7 # PyInstaller >= 1.6
8 chdir(sys._MEIPASS)
9 root = join(sys._MEIPASS, root)
10 elif '_MEIPASS2' in environ:
11 # PyInstaller < 1.6 (tested on 1.5 only)
12 chdir(environ['_MEIPASS2'])
13 root = join(environ['_MEIPASS2'], root)
14 else:
15 chdir(dirname(sys.argv[0]))
16 root = join(dirname(sys.argv[0]), root)
17
18
19 sys.path += [join(root, '_libs')]
20
21 if sys.platform == 'darwin':
22 sitepackages = join(root, '..', 'sitepackages')
23 sys.path += [sitepackages, join(sitepackages, 'gst-0.10')]
24 putenv('GST_REGISTRY_FORK', 'no')
25
26 environ['GST_PLUGIN_PATH'] = join(root, '..', 'gst-plugins')
27 environ['KIVY_DATA_DIR'] = join(root, 'data')
28 environ['KIVY_EXTS_DIR'] = join(root, 'extensions')
29 environ['KIVY_MODULES_DIR'] = join(root, 'modules')
30 environ['KIVY_EMBED'] = '1'
31
32 # Monkey-patch pygame to get around an issue with Pygame window icon and
33 # PyInstaller 2.1. See kivy issue #1638
34 # Uncomment the following to package pygame
35 #import pygame.pkgdata
36 #_original_getResource = pygame.pkgdata.getResource
37 #
38 #
39 #def getResource(identifier, *args, **kwargs):
40 # if identifier == 'pygame_icon.tiff':
41 # raise IOError()
42 # return _original_getResource(identifier, *args, **kwargs)
43 #pygame.pkgdata.getResource = getResource
44
[end of kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
--- a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
+++ b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
@@ -29,15 +29,17 @@
environ['KIVY_MODULES_DIR'] = join(root, 'modules')
environ['KIVY_EMBED'] = '1'
+
# Monkey-patch pygame to get around an issue with Pygame window icon and
# PyInstaller 2.1. See kivy issue #1638
-# Uncomment the following to package pygame
-#import pygame.pkgdata
-#_original_getResource = pygame.pkgdata.getResource
-#
-#
-#def getResource(identifier, *args, **kwargs):
-# if identifier == 'pygame_icon.tiff':
-# raise IOError()
-# return _original_getResource(identifier, *args, **kwargs)
-#pygame.pkgdata.getResource = getResource
+def getResource(identifier, *args, **kwargs):
+ if identifier == 'pygame_icon.tiff':
+ raise IOError()
+ return _original_getResource(identifier, *args, **kwargs)
+
+try:
+ import pygame.pkgdata
+ _original_getResource = pygame.pkgdata.getResource
+ pygame.pkgdata.getResource = getResource
+except ImportError:
+ pass
|
{"golden_diff": "diff --git a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n--- a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n+++ b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n@@ -29,15 +29,17 @@\n environ['KIVY_MODULES_DIR'] = join(root, 'modules')\n environ['KIVY_EMBED'] = '1'\n \n+\n # Monkey-patch pygame to get around an issue with Pygame window icon and\n # PyInstaller 2.1. See kivy issue #1638\n-# Uncomment the following to package pygame\n-#import pygame.pkgdata\n-#_original_getResource = pygame.pkgdata.getResource\n-#\n-#\n-#def getResource(identifier, *args, **kwargs):\n-# if identifier == 'pygame_icon.tiff':\n-# raise IOError()\n-# return _original_getResource(identifier, *args, **kwargs)\n-#pygame.pkgdata.getResource = getResource\n+def getResource(identifier, *args, **kwargs):\n+ if identifier == 'pygame_icon.tiff':\n+ raise IOError()\n+ return _original_getResource(identifier, *args, **kwargs)\n+\n+try:\n+ import pygame.pkgdata\n+ _original_getResource = pygame.pkgdata.getResource\n+ pygame.pkgdata.getResource = getResource\n+except ImportError:\n+ pass\n", "issue": "Can't create package for windows with kivy 1.9 portable\nI'm looking to port an existing kivy 1.8 project to kivy 1.9. I've just downloaded the portable version and have the application working.\n\nHowever when packaging the app using pyinstaller and the instructions on http://kivy.org/docs/guide/packaging-windows.html the app packages, but on execution immediately fails with error:\n\n```\nTraceback (most recent call last):\n File \"<string>\", line 34, in <module>\nImportError: No module named pygame.pkgdata\n```\n\nI've tried using my old .spec file and generating a new one with exactly the same results.\n\nI'm a bit mystified where this is coming from as pygame isn't imported anywhere in my application and I thought it had been replaced with sdl2 in kivy 1.9. I'm also confused that the application works when run directly.\n\nAnyone come across this issue or can point me in the right direction?\n\n", "before_files": [{"content": "from os.path import join, dirname\nfrom os import environ, chdir, putenv\nimport sys\n\nroot = 'kivy_install'\nif hasattr(sys, '_MEIPASS'):\n # PyInstaller >= 1.6\n chdir(sys._MEIPASS)\n root = join(sys._MEIPASS, root)\nelif '_MEIPASS2' in environ:\n # PyInstaller < 1.6 (tested on 1.5 only)\n chdir(environ['_MEIPASS2'])\n root = join(environ['_MEIPASS2'], root)\nelse:\n chdir(dirname(sys.argv[0]))\n root = join(dirname(sys.argv[0]), root)\n\n\nsys.path += [join(root, '_libs')]\n\nif sys.platform == 'darwin':\n sitepackages = join(root, '..', 'sitepackages')\n sys.path += [sitepackages, join(sitepackages, 'gst-0.10')]\n putenv('GST_REGISTRY_FORK', 'no')\n\nenviron['GST_PLUGIN_PATH'] = join(root, '..', 'gst-plugins')\nenviron['KIVY_DATA_DIR'] = join(root, 'data')\nenviron['KIVY_EXTS_DIR'] = join(root, 'extensions')\nenviron['KIVY_MODULES_DIR'] = join(root, 'modules')\nenviron['KIVY_EMBED'] = '1'\n\n# Monkey-patch pygame to get around an issue with Pygame window icon and\n# PyInstaller 2.1. See kivy issue #1638\n# Uncomment the following to package pygame\n#import pygame.pkgdata\n#_original_getResource = pygame.pkgdata.getResource\n#\n#\n#def getResource(identifier, *args, **kwargs):\n# if identifier == 'pygame_icon.tiff':\n# raise IOError()\n# return _original_getResource(identifier, *args, **kwargs)\n#pygame.pkgdata.getResource = getResource\n", "path": "kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py"}]}
| 1,238 | 319 |
gh_patches_debug_20296
|
rasdani/github-patches
|
git_diff
|
frappe__hrms-1584
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IFSC Code showing wrong value in Bank Remittance Report
### Information about bug
IFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.
### Module
Payroll
### Version
ERPNext: v14.52.1 (HEAD)
Frappe Framework: v14.57.0 (HEAD)
Frappe HR: v14.18.1 (HEAD)
### Installation method
FrappeCloud
### Relevant log output / Stack trace / Full Error Message.
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
</issue>
<code>
[start of hrms/payroll/report/bank_remittance/bank_remittance.py]
1 # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
2 # For license information, please see license.txt
3
4
5 import frappe
6 from frappe import _, get_all
7
8
9 def execute(filters=None):
10 columns = [
11 {
12 "label": _("Payroll Number"),
13 "fieldtype": "Link",
14 "fieldname": "payroll_no",
15 "options": "Payroll Entry",
16 "width": 150,
17 },
18 {
19 "label": _("Debit A/C Number"),
20 "fieldtype": "Int",
21 "fieldname": "debit_account",
22 "hidden": 1,
23 "width": 200,
24 },
25 {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
26 {
27 "label": _("Employee Name"),
28 "fieldtype": "Link",
29 "fieldname": "employee_name",
30 "options": "Employee",
31 "width": 200,
32 },
33 {"label": _("Bank Name"), "fieldtype": "Data", "fieldname": "bank_name", "width": 50},
34 {
35 "label": _("Employee A/C Number"),
36 "fieldtype": "Int",
37 "fieldname": "employee_account_no",
38 "width": 50,
39 },
40 ]
41
42 if frappe.db.has_column("Employee", "ifsc_code"):
43 columns.append(
44 {"label": _("IFSC Code"), "fieldtype": "Data", "fieldname": "bank_code", "width": 100}
45 )
46
47 columns += [
48 {"label": _("Currency"), "fieldtype": "Data", "fieldname": "currency", "width": 50},
49 {
50 "label": _("Net Salary Amount"),
51 "fieldtype": "Currency",
52 "options": "currency",
53 "fieldname": "amount",
54 "width": 100,
55 },
56 ]
57
58 data = []
59
60 accounts = get_bank_accounts()
61 payroll_entries = get_payroll_entries(accounts, filters)
62 salary_slips = get_salary_slips(payroll_entries)
63
64 if frappe.db.has_column("Employee", "ifsc_code"):
65 get_emp_bank_ifsc_code(salary_slips)
66
67 for salary in salary_slips:
68 if (
69 salary.bank_name
70 and salary.bank_account_no
71 and salary.debit_acc_no
72 and salary.status in ["Submitted", "Paid"]
73 ):
74 row = {
75 "payroll_no": salary.payroll_entry,
76 "debit_account": salary.debit_acc_no,
77 "payment_date": frappe.utils.formatdate(salary.modified.strftime("%Y-%m-%d")),
78 "bank_name": salary.bank_name,
79 "employee_account_no": salary.bank_account_no,
80 "bank_code": salary.ifsc_code,
81 "employee_name": salary.employee + ": " + salary.employee_name,
82 "currency": frappe.get_cached_value("Company", filters.company, "default_currency"),
83 "amount": salary.net_pay,
84 }
85 data.append(row)
86
87 return columns, data
88
89
90 def get_bank_accounts():
91 accounts = [d.name for d in get_all("Account", filters={"account_type": "Bank"})]
92 return accounts
93
94
95 def get_payroll_entries(accounts, filters):
96 payroll_filter = [
97 ("payment_account", "IN", accounts),
98 ("number_of_employees", ">", 0),
99 ("Company", "=", filters.company),
100 ]
101 if filters.to_date:
102 payroll_filter.append(("posting_date", "<", filters.to_date))
103
104 if filters.from_date:
105 payroll_filter.append(("posting_date", ">", filters.from_date))
106
107 entries = get_all("Payroll Entry", payroll_filter, ["name", "payment_account"])
108
109 payment_accounts = [d.payment_account for d in entries]
110 entries = set_company_account(payment_accounts, entries)
111 return entries
112
113
114 def get_salary_slips(payroll_entries):
115 payroll = [d.name for d in payroll_entries]
116 salary_slips = get_all(
117 "Salary Slip",
118 filters=[("payroll_entry", "IN", payroll)],
119 fields=[
120 "modified",
121 "net_pay",
122 "bank_name",
123 "bank_account_no",
124 "payroll_entry",
125 "employee",
126 "employee_name",
127 "status",
128 ],
129 )
130
131 payroll_entry_map = {}
132 for entry in payroll_entries:
133 payroll_entry_map[entry.name] = entry
134
135 # appending company debit accounts
136 for slip in salary_slips:
137 if slip.payroll_entry:
138 slip["debit_acc_no"] = payroll_entry_map[slip.payroll_entry]["company_account"]
139 else:
140 slip["debit_acc_no"] = None
141
142 return salary_slips
143
144
145 def get_emp_bank_ifsc_code(salary_slips):
146 emp_names = [d.employee for d in salary_slips]
147 ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
148
149 ifsc_codes_map = {}
150 for code in ifsc_codes:
151 ifsc_codes_map[code.name] = code
152
153 for slip in salary_slips:
154 slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
155
156 return salary_slips
157
158
159 def set_company_account(payment_accounts, payroll_entries):
160 company_accounts = get_all(
161 "Bank Account", [("account", "in", payment_accounts)], ["account", "bank_account_no"]
162 )
163 company_accounts_map = {}
164 for acc in company_accounts:
165 company_accounts_map[acc.account] = acc
166
167 for entry in payroll_entries:
168 company_account = ""
169 if entry.payment_account in company_accounts_map:
170 company_account = company_accounts_map[entry.payment_account]["bank_account_no"]
171 entry["company_account"] = company_account
172
173 return payroll_entries
174
[end of hrms/payroll/report/bank_remittance/bank_remittance.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py
--- a/hrms/payroll/report/bank_remittance/bank_remittance.py
+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py
@@ -22,7 +22,12 @@
"hidden": 1,
"width": 200,
},
- {"label": _("Payment Date"), "fieldtype": "Data", "fieldname": "payment_date", "width": 100},
+ {
+ "label": _("Payment Date"),
+ "fieldtype": "Data",
+ "fieldname": "payment_date",
+ "width": 100,
+ },
{
"label": _("Employee Name"),
"fieldtype": "Link",
@@ -146,12 +151,10 @@
emp_names = [d.employee for d in salary_slips]
ifsc_codes = get_all("Employee", [("name", "IN", emp_names)], ["ifsc_code", "name"])
- ifsc_codes_map = {}
- for code in ifsc_codes:
- ifsc_codes_map[code.name] = code
+ ifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}
for slip in salary_slips:
- slip["ifsc_code"] = ifsc_codes_map[code.name]["ifsc_code"]
+ slip["ifsc_code"] = ifsc_codes_map[slip.employee]
return salary_slips
|
{"golden_diff": "diff --git a/hrms/payroll/report/bank_remittance/bank_remittance.py b/hrms/payroll/report/bank_remittance/bank_remittance.py\n--- a/hrms/payroll/report/bank_remittance/bank_remittance.py\n+++ b/hrms/payroll/report/bank_remittance/bank_remittance.py\n@@ -22,7 +22,12 @@\n \t\t\t\"hidden\": 1,\n \t\t\t\"width\": 200,\n \t\t},\n-\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n+\t\t{\n+\t\t\t\"label\": _(\"Payment Date\"),\n+\t\t\t\"fieldtype\": \"Data\",\n+\t\t\t\"fieldname\": \"payment_date\",\n+\t\t\t\"width\": 100,\n+\t\t},\n \t\t{\n \t\t\t\"label\": _(\"Employee Name\"),\n \t\t\t\"fieldtype\": \"Link\",\n@@ -146,12 +151,10 @@\n \temp_names = [d.employee for d in salary_slips]\n \tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n \n-\tifsc_codes_map = {}\n-\tfor code in ifsc_codes:\n-\t\tifsc_codes_map[code.name] = code\n+\tifsc_codes_map = {code.name: code.ifsc_code for code in ifsc_codes}\n \n \tfor slip in salary_slips:\n-\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n+\t\tslip[\"ifsc_code\"] = ifsc_codes_map[slip.employee]\n \n \treturn salary_slips\n", "issue": "IFSC Code showing wrong value in Bank Remittance Report\n### Information about bug\n\nIFSC Code showing wrong value in Bank Remittance Report. It is showing the same IFSC Code for all the employee in the list.\n\n### Module\n\nPayroll\n\n### Version\n\nERPNext: v14.52.1 (HEAD)\r\nFrappe Framework: v14.57.0 (HEAD)\r\nFrappe HR: v14.18.1 (HEAD)\n\n### Installation method\n\nFrappeCloud\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors\n# For license information, please see license.txt\n\n\nimport frappe\nfrom frappe import _, get_all\n\n\ndef execute(filters=None):\n\tcolumns = [\n\t\t{\n\t\t\t\"label\": _(\"Payroll Number\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"payroll_no\",\n\t\t\t\"options\": \"Payroll Entry\",\n\t\t\t\"width\": 150,\n\t\t},\n\t\t{\n\t\t\t\"label\": _(\"Debit A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"debit_account\",\n\t\t\t\"hidden\": 1,\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Payment Date\"), \"fieldtype\": \"Data\", \"fieldname\": \"payment_date\", \"width\": 100},\n\t\t{\n\t\t\t\"label\": _(\"Employee Name\"),\n\t\t\t\"fieldtype\": \"Link\",\n\t\t\t\"fieldname\": \"employee_name\",\n\t\t\t\"options\": \"Employee\",\n\t\t\t\"width\": 200,\n\t\t},\n\t\t{\"label\": _(\"Bank Name\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_name\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Employee A/C Number\"),\n\t\t\t\"fieldtype\": \"Int\",\n\t\t\t\"fieldname\": \"employee_account_no\",\n\t\t\t\"width\": 50,\n\t\t},\n\t]\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tcolumns.append(\n\t\t\t{\"label\": _(\"IFSC Code\"), \"fieldtype\": \"Data\", \"fieldname\": \"bank_code\", \"width\": 100}\n\t\t)\n\n\tcolumns += [\n\t\t{\"label\": _(\"Currency\"), \"fieldtype\": \"Data\", \"fieldname\": \"currency\", \"width\": 50},\n\t\t{\n\t\t\t\"label\": _(\"Net Salary Amount\"),\n\t\t\t\"fieldtype\": \"Currency\",\n\t\t\t\"options\": \"currency\",\n\t\t\t\"fieldname\": \"amount\",\n\t\t\t\"width\": 100,\n\t\t},\n\t]\n\n\tdata = []\n\n\taccounts = get_bank_accounts()\n\tpayroll_entries = get_payroll_entries(accounts, filters)\n\tsalary_slips = get_salary_slips(payroll_entries)\n\n\tif frappe.db.has_column(\"Employee\", \"ifsc_code\"):\n\t\tget_emp_bank_ifsc_code(salary_slips)\n\n\tfor salary in salary_slips:\n\t\tif (\n\t\t\tsalary.bank_name\n\t\t\tand salary.bank_account_no\n\t\t\tand salary.debit_acc_no\n\t\t\tand salary.status in [\"Submitted\", \"Paid\"]\n\t\t):\n\t\t\trow = {\n\t\t\t\t\"payroll_no\": salary.payroll_entry,\n\t\t\t\t\"debit_account\": salary.debit_acc_no,\n\t\t\t\t\"payment_date\": frappe.utils.formatdate(salary.modified.strftime(\"%Y-%m-%d\")),\n\t\t\t\t\"bank_name\": salary.bank_name,\n\t\t\t\t\"employee_account_no\": salary.bank_account_no,\n\t\t\t\t\"bank_code\": salary.ifsc_code,\n\t\t\t\t\"employee_name\": salary.employee + \": \" + salary.employee_name,\n\t\t\t\t\"currency\": frappe.get_cached_value(\"Company\", filters.company, \"default_currency\"),\n\t\t\t\t\"amount\": salary.net_pay,\n\t\t\t}\n\t\t\tdata.append(row)\n\n\treturn columns, data\n\n\ndef get_bank_accounts():\n\taccounts = [d.name for d in get_all(\"Account\", filters={\"account_type\": \"Bank\"})]\n\treturn accounts\n\n\ndef get_payroll_entries(accounts, filters):\n\tpayroll_filter = [\n\t\t(\"payment_account\", \"IN\", accounts),\n\t\t(\"number_of_employees\", \">\", 0),\n\t\t(\"Company\", \"=\", filters.company),\n\t]\n\tif filters.to_date:\n\t\tpayroll_filter.append((\"posting_date\", \"<\", filters.to_date))\n\n\tif filters.from_date:\n\t\tpayroll_filter.append((\"posting_date\", \">\", filters.from_date))\n\n\tentries = get_all(\"Payroll Entry\", payroll_filter, [\"name\", \"payment_account\"])\n\n\tpayment_accounts = [d.payment_account for d in entries]\n\tentries = set_company_account(payment_accounts, entries)\n\treturn entries\n\n\ndef get_salary_slips(payroll_entries):\n\tpayroll = [d.name for d in payroll_entries]\n\tsalary_slips = get_all(\n\t\t\"Salary Slip\",\n\t\tfilters=[(\"payroll_entry\", \"IN\", payroll)],\n\t\tfields=[\n\t\t\t\"modified\",\n\t\t\t\"net_pay\",\n\t\t\t\"bank_name\",\n\t\t\t\"bank_account_no\",\n\t\t\t\"payroll_entry\",\n\t\t\t\"employee\",\n\t\t\t\"employee_name\",\n\t\t\t\"status\",\n\t\t],\n\t)\n\n\tpayroll_entry_map = {}\n\tfor entry in payroll_entries:\n\t\tpayroll_entry_map[entry.name] = entry\n\n\t# appending company debit accounts\n\tfor slip in salary_slips:\n\t\tif slip.payroll_entry:\n\t\t\tslip[\"debit_acc_no\"] = payroll_entry_map[slip.payroll_entry][\"company_account\"]\n\t\telse:\n\t\t\tslip[\"debit_acc_no\"] = None\n\n\treturn salary_slips\n\n\ndef get_emp_bank_ifsc_code(salary_slips):\n\temp_names = [d.employee for d in salary_slips]\n\tifsc_codes = get_all(\"Employee\", [(\"name\", \"IN\", emp_names)], [\"ifsc_code\", \"name\"])\n\n\tifsc_codes_map = {}\n\tfor code in ifsc_codes:\n\t\tifsc_codes_map[code.name] = code\n\n\tfor slip in salary_slips:\n\t\tslip[\"ifsc_code\"] = ifsc_codes_map[code.name][\"ifsc_code\"]\n\n\treturn salary_slips\n\n\ndef set_company_account(payment_accounts, payroll_entries):\n\tcompany_accounts = get_all(\n\t\t\"Bank Account\", [(\"account\", \"in\", payment_accounts)], [\"account\", \"bank_account_no\"]\n\t)\n\tcompany_accounts_map = {}\n\tfor acc in company_accounts:\n\t\tcompany_accounts_map[acc.account] = acc\n\n\tfor entry in payroll_entries:\n\t\tcompany_account = \"\"\n\t\tif entry.payment_account in company_accounts_map:\n\t\t\tcompany_account = company_accounts_map[entry.payment_account][\"bank_account_no\"]\n\t\tentry[\"company_account\"] = company_account\n\n\treturn payroll_entries\n", "path": "hrms/payroll/report/bank_remittance/bank_remittance.py"}]}
| 2,478 | 365 |
gh_patches_debug_33986
|
rasdani/github-patches
|
git_diff
|
facebookresearch__fairseq-190
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
the raw output data can not be trained
when I use preprocess.py to ouput raw data by use `--output-format raw`, the output data file can not be used to train.py which I also use `--raw-text`. Through looking the source code, I change the output data file mentioned above in this way: rename 'train.src' to 'train.src-tgt.src' and 'train.tgt' to 'train.src-tgt.tgt' (assume I use `--source-lang=src --target-lang=tgt` ) and this can run.
I think it's a bug and is easy to fix :)
</issue>
<code>
[start of preprocess.py]
1 #!/usr/bin/env python3
2 # Copyright (c) 2017-present, Facebook, Inc.
3 # All rights reserved.
4 #
5 # This source code is licensed under the license found in the LICENSE file in
6 # the root directory of this source tree. An additional grant of patent rights
7 # can be found in the PATENTS file in the same directory.
8 #
9
10 import argparse
11 from itertools import zip_longest
12 import os
13 import shutil
14
15 from fairseq.data import indexed_dataset, dictionary
16 from fairseq.tokenizer import Tokenizer, tokenize_line
17
18
19 def get_parser():
20 parser = argparse.ArgumentParser(
21 description='Data pre-processing: Create dictionary and store data in binary format')
22 parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
23 parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
24 parser.add_argument('--trainpref', metavar='FP', default=None, help='target language')
25 parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid language prefixes')
26 parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test language prefixes')
27 parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
28 parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
29 help='map words appearing less than threshold times to unknown')
30 parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
31 help='map words appearing less than threshold times to unknown')
32 parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
33 parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
34 parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')
35 parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')
36 parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
37 parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
38 help='output format (optional)')
39 parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
40 parser.add_argument('--only-source', action='store_true', help='Only process the source language')
41 parser.add_argument('--padding-factor', metavar='N', default=8, type=int,
42 help='Pad dictionary size to be multiple of N')
43 return parser
44
45
46 def main(args):
47 print(args)
48 os.makedirs(args.destdir, exist_ok=True)
49 target = not args.only_source
50
51 def build_dictionary(filenames):
52 d = dictionary.Dictionary()
53 for filename in filenames:
54 Tokenizer.add_file_to_dictionary(filename, d, tokenize_line)
55 return d
56
57 def train_path(lang):
58 return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '')
59
60 def file_name(prefix, lang):
61 fname = prefix
62 if lang is not None:
63 fname += f'.{lang}'
64 return fname
65
66 def dest_path(prefix, lang):
67 return os.path.join(args.destdir, file_name(prefix, lang))
68
69 def dict_path(lang):
70 return dest_path('dict', lang) + '.txt'
71
72 def dataset_dest_path(output_prefix, lang, extension):
73 base = f'{args.destdir}/{output_prefix}'
74 lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else ''
75 return f'{base}{lang_part}.{extension}'
76
77 if args.joined_dictionary:
78 assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'
79 assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'
80 src_dict = build_dictionary(set([
81 train_path(lang)
82 for lang in [args.source_lang, args.target_lang]
83 ]))
84 tgt_dict = src_dict
85 else:
86 if args.srcdict:
87 src_dict = dictionary.Dictionary.load(args.srcdict)
88 else:
89 assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
90 src_dict = build_dictionary([train_path(args.source_lang)])
91 if target:
92 if args.tgtdict:
93 tgt_dict = dictionary.Dictionary.load(args.tgtdict)
94 else:
95 assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
96 tgt_dict = build_dictionary([train_path(args.target_lang)])
97
98 src_dict.finalize(
99 threshold=args.thresholdsrc,
100 nwords=args.nwordssrc,
101 padding_factor=args.padding_factor,
102 )
103 src_dict.save(dict_path(args.source_lang))
104 if target:
105 if not args.joined_dictionary:
106 tgt_dict.finalize(
107 threshold=args.thresholdtgt,
108 nwords=args.nwordstgt,
109 padding_factor=args.padding_factor,
110 )
111 tgt_dict.save(dict_path(args.target_lang))
112
113 def make_binary_dataset(input_prefix, output_prefix, lang):
114 dict = dictionary.Dictionary.load(dict_path(lang))
115 print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))
116
117 ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_path(output_prefix, lang, 'bin'))
118
119 def consumer(tensor):
120 ds.add_item(tensor)
121
122 input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '')
123 res = Tokenizer.binarize(input_file, dict, consumer)
124 print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
125 lang, input_file, res['nseq'], res['ntok'],
126 100 * res['nunk'] / res['ntok'], dict.unk_word))
127 ds.finalize(dataset_dest_path(output_prefix, lang, 'idx'))
128
129 def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):
130 if output_format == 'binary':
131 make_binary_dataset(input_prefix, output_prefix, lang)
132 elif output_format == 'raw':
133 # Copy original text file to destination folder
134 output_text_file = dest_path(output_prefix, lang)
135 shutil.copyfile(file_name(input_prefix, lang), output_text_file)
136
137 def make_all(args, make_dataset, lang):
138 if args.trainpref:
139 make_dataset(args.trainpref, 'train', lang, args.output_format)
140 if args.validpref:
141 for k, validpref in enumerate(args.validpref.split(',')):
142 outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
143 make_dataset(validpref, outprefix, lang, args.output_format)
144 if args.testpref:
145 for k, testpref in enumerate(args.testpref.split(',')):
146 outprefix = 'test{}'.format(k) if k > 0 else 'test'
147 make_dataset(testpref, outprefix, lang, args.output_format)
148
149 make_all(args, make_dataset, args.source_lang)
150 if target:
151 make_all(args, make_dataset, args.target_lang)
152
153 print('| Wrote preprocessed data to {}'.format(args.destdir))
154
155 if args.alignfile:
156 assert args.trainpref, "--trainpref must be set if --alignfile is specified"
157 src_file_name = train_path(args.source_lang)
158 tgt_file_name = train_path(args.target_lang)
159 src_dict = dictionary.Dictionary.load(dict_path(args.source_lang))
160 tgt_dict = dictionary.Dictionary.load(dict_path(args.target_lang))
161 freq_map = {}
162 with open(args.alignfile, 'r') as align_file:
163 with open(src_file_name, 'r') as src_file:
164 with open(tgt_file_name, 'r') as tgt_file:
165 for a, s, t in zip_longest(align_file, src_file, tgt_file):
166 si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
167 ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
168 ai = list(map(lambda x: tuple(x.split('-')), a.split()))
169 for sai, tai in ai:
170 srcidx = si[int(sai)]
171 tgtidx = ti[int(tai)]
172 if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
173 assert srcidx != src_dict.pad()
174 assert srcidx != src_dict.eos()
175 assert tgtidx != tgt_dict.pad()
176 assert tgtidx != tgt_dict.eos()
177
178 if srcidx not in freq_map:
179 freq_map[srcidx] = {}
180 if tgtidx not in freq_map[srcidx]:
181 freq_map[srcidx][tgtidx] = 1
182 else:
183 freq_map[srcidx][tgtidx] += 1
184
185 align_dict = {}
186 for srcidx in freq_map.keys():
187 align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
188
189 with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
190 args.source_lang, args.target_lang)), 'w') as f:
191 for k, v in align_dict.items():
192 print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
193
194
195 if __name__ == '__main__':
196 parser = get_parser()
197 args = parser.parse_args()
198 main(args)
199
[end of preprocess.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/preprocess.py b/preprocess.py
--- a/preprocess.py
+++ b/preprocess.py
@@ -126,29 +126,32 @@
100 * res['nunk'] / res['ntok'], dict.unk_word))
ds.finalize(dataset_dest_path(output_prefix, lang, 'idx'))
- def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):
- if output_format == 'binary':
+ def make_dataset(input_prefix, output_prefix, lang):
+ if args.output_format == 'binary':
make_binary_dataset(input_prefix, output_prefix, lang)
- elif output_format == 'raw':
+ elif args.output_format == 'raw':
# Copy original text file to destination folder
- output_text_file = dest_path(output_prefix, lang)
+ output_text_file = dest_path(
+ output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang),
+ lang,
+ )
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
- def make_all(args, make_dataset, lang):
+ def make_all(lang):
if args.trainpref:
- make_dataset(args.trainpref, 'train', lang, args.output_format)
+ make_dataset(args.trainpref, 'train', lang)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(',')):
outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
- make_dataset(validpref, outprefix, lang, args.output_format)
+ make_dataset(validpref, outprefix, lang)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(',')):
outprefix = 'test{}'.format(k) if k > 0 else 'test'
- make_dataset(testpref, outprefix, lang, args.output_format)
+ make_dataset(testpref, outprefix, lang)
- make_all(args, make_dataset, args.source_lang)
+ make_all(args.source_lang)
if target:
- make_all(args, make_dataset, args.target_lang)
+ make_all(args.target_lang)
print('| Wrote preprocessed data to {}'.format(args.destdir))
|
{"golden_diff": "diff --git a/preprocess.py b/preprocess.py\n--- a/preprocess.py\n+++ b/preprocess.py\n@@ -126,29 +126,32 @@\n 100 * res['nunk'] / res['ntok'], dict.unk_word))\n ds.finalize(dataset_dest_path(output_prefix, lang, 'idx'))\n \n- def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):\n- if output_format == 'binary':\n+ def make_dataset(input_prefix, output_prefix, lang):\n+ if args.output_format == 'binary':\n make_binary_dataset(input_prefix, output_prefix, lang)\n- elif output_format == 'raw':\n+ elif args.output_format == 'raw':\n # Copy original text file to destination folder\n- output_text_file = dest_path(output_prefix, lang)\n+ output_text_file = dest_path(\n+ output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang),\n+ lang,\n+ )\n shutil.copyfile(file_name(input_prefix, lang), output_text_file)\n \n- def make_all(args, make_dataset, lang):\n+ def make_all(lang):\n if args.trainpref:\n- make_dataset(args.trainpref, 'train', lang, args.output_format)\n+ make_dataset(args.trainpref, 'train', lang)\n if args.validpref:\n for k, validpref in enumerate(args.validpref.split(',')):\n outprefix = 'valid{}'.format(k) if k > 0 else 'valid'\n- make_dataset(validpref, outprefix, lang, args.output_format)\n+ make_dataset(validpref, outprefix, lang)\n if args.testpref:\n for k, testpref in enumerate(args.testpref.split(',')):\n outprefix = 'test{}'.format(k) if k > 0 else 'test'\n- make_dataset(testpref, outprefix, lang, args.output_format)\n+ make_dataset(testpref, outprefix, lang)\n \n- make_all(args, make_dataset, args.source_lang)\n+ make_all(args.source_lang)\n if target:\n- make_all(args, make_dataset, args.target_lang)\n+ make_all(args.target_lang)\n \n print('| Wrote preprocessed data to {}'.format(args.destdir))\n", "issue": "the raw output data can not be trained\nwhen I use preprocess.py to ouput raw data by use `--output-format raw`, the output data file can not be used to train.py which I also use `--raw-text`. Through looking the source code, I change the output data file mentioned above in this way: rename 'train.src' to 'train.src-tgt.src' and 'train.tgt' to 'train.src-tgt.tgt' (assume I use `--source-lang=src --target-lang=tgt` ) and this can run.\r\nI think it's a bug and is easy to fix :)\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n#\n\nimport argparse\nfrom itertools import zip_longest\nimport os\nimport shutil\n\nfrom fairseq.data import indexed_dataset, dictionary\nfrom fairseq.tokenizer import Tokenizer, tokenize_line\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='Data pre-processing: Create dictionary and store data in binary format')\n parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')\n parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')\n parser.add_argument('--trainpref', metavar='FP', default=None, help='target language')\n parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid language prefixes')\n parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test language prefixes')\n parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')\n parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,\n help='map words appearing less than threshold times to unknown')\n parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,\n help='map words appearing less than threshold times to unknown')\n parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')\n parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')\n parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')\n parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')\n parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')\n parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],\n help='output format (optional)')\n parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')\n parser.add_argument('--only-source', action='store_true', help='Only process the source language')\n parser.add_argument('--padding-factor', metavar='N', default=8, type=int,\n help='Pad dictionary size to be multiple of N')\n return parser\n\n\ndef main(args):\n print(args)\n os.makedirs(args.destdir, exist_ok=True)\n target = not args.only_source\n\n def build_dictionary(filenames):\n d = dictionary.Dictionary()\n for filename in filenames:\n Tokenizer.add_file_to_dictionary(filename, d, tokenize_line)\n return d\n\n def train_path(lang):\n return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '')\n\n def file_name(prefix, lang):\n fname = prefix\n if lang is not None:\n fname += f'.{lang}'\n return fname\n\n def dest_path(prefix, lang):\n return os.path.join(args.destdir, file_name(prefix, lang))\n\n def dict_path(lang):\n return dest_path('dict', lang) + '.txt'\n\n def dataset_dest_path(output_prefix, lang, extension):\n base = f'{args.destdir}/{output_prefix}'\n lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else ''\n return f'{base}{lang_part}.{extension}'\n\n if args.joined_dictionary:\n assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'\n assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'\n src_dict = build_dictionary(set([\n train_path(lang)\n for lang in [args.source_lang, args.target_lang]\n ]))\n tgt_dict = src_dict\n else:\n if args.srcdict:\n src_dict = dictionary.Dictionary.load(args.srcdict)\n else:\n assert args.trainpref, \"--trainpref must be set if --srcdict is not specified\"\n src_dict = build_dictionary([train_path(args.source_lang)])\n if target:\n if args.tgtdict:\n tgt_dict = dictionary.Dictionary.load(args.tgtdict)\n else:\n assert args.trainpref, \"--trainpref must be set if --tgtdict is not specified\"\n tgt_dict = build_dictionary([train_path(args.target_lang)])\n\n src_dict.finalize(\n threshold=args.thresholdsrc,\n nwords=args.nwordssrc,\n padding_factor=args.padding_factor,\n )\n src_dict.save(dict_path(args.source_lang))\n if target:\n if not args.joined_dictionary:\n tgt_dict.finalize(\n threshold=args.thresholdtgt,\n nwords=args.nwordstgt,\n padding_factor=args.padding_factor,\n )\n tgt_dict.save(dict_path(args.target_lang))\n\n def make_binary_dataset(input_prefix, output_prefix, lang):\n dict = dictionary.Dictionary.load(dict_path(lang))\n print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))\n\n ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_path(output_prefix, lang, 'bin'))\n\n def consumer(tensor):\n ds.add_item(tensor)\n\n input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '')\n res = Tokenizer.binarize(input_file, dict, consumer)\n print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(\n lang, input_file, res['nseq'], res['ntok'],\n 100 * res['nunk'] / res['ntok'], dict.unk_word))\n ds.finalize(dataset_dest_path(output_prefix, lang, 'idx'))\n\n def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):\n if output_format == 'binary':\n make_binary_dataset(input_prefix, output_prefix, lang)\n elif output_format == 'raw':\n # Copy original text file to destination folder\n output_text_file = dest_path(output_prefix, lang)\n shutil.copyfile(file_name(input_prefix, lang), output_text_file)\n\n def make_all(args, make_dataset, lang):\n if args.trainpref:\n make_dataset(args.trainpref, 'train', lang, args.output_format)\n if args.validpref:\n for k, validpref in enumerate(args.validpref.split(',')):\n outprefix = 'valid{}'.format(k) if k > 0 else 'valid'\n make_dataset(validpref, outprefix, lang, args.output_format)\n if args.testpref:\n for k, testpref in enumerate(args.testpref.split(',')):\n outprefix = 'test{}'.format(k) if k > 0 else 'test'\n make_dataset(testpref, outprefix, lang, args.output_format)\n\n make_all(args, make_dataset, args.source_lang)\n if target:\n make_all(args, make_dataset, args.target_lang)\n\n print('| Wrote preprocessed data to {}'.format(args.destdir))\n\n if args.alignfile:\n assert args.trainpref, \"--trainpref must be set if --alignfile is specified\"\n src_file_name = train_path(args.source_lang)\n tgt_file_name = train_path(args.target_lang)\n src_dict = dictionary.Dictionary.load(dict_path(args.source_lang))\n tgt_dict = dictionary.Dictionary.load(dict_path(args.target_lang))\n freq_map = {}\n with open(args.alignfile, 'r') as align_file:\n with open(src_file_name, 'r') as src_file:\n with open(tgt_file_name, 'r') as tgt_file:\n for a, s, t in zip_longest(align_file, src_file, tgt_file):\n si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)\n ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)\n ai = list(map(lambda x: tuple(x.split('-')), a.split()))\n for sai, tai in ai:\n srcidx = si[int(sai)]\n tgtidx = ti[int(tai)]\n if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():\n assert srcidx != src_dict.pad()\n assert srcidx != src_dict.eos()\n assert tgtidx != tgt_dict.pad()\n assert tgtidx != tgt_dict.eos()\n\n if srcidx not in freq_map:\n freq_map[srcidx] = {}\n if tgtidx not in freq_map[srcidx]:\n freq_map[srcidx][tgtidx] = 1\n else:\n freq_map[srcidx][tgtidx] += 1\n\n align_dict = {}\n for srcidx in freq_map.keys():\n align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)\n\n with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(\n args.source_lang, args.target_lang)), 'w') as f:\n for k, v in align_dict.items():\n print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)\n\n\nif __name__ == '__main__':\n parser = get_parser()\n args = parser.parse_args()\n main(args)\n", "path": "preprocess.py"}]}
| 3,170 | 485 |
gh_patches_debug_31802
|
rasdani/github-patches
|
git_diff
|
microsoft__botbuilder-python-1240
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
SkillHandler doesn't return ResourceResponse when forwarding activities (Python)
See [parent](https://github.com/microsoft/botframework-sdk/issues/5919)
</issue>
<code>
[start of libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from uuid import uuid4
5
6 from botbuilder.core import Bot, BotAdapter, ChannelServiceHandler, TurnContext
7 from botbuilder.schema import (
8 Activity,
9 ActivityTypes,
10 ResourceResponse,
11 CallerIdConstants,
12 )
13 from botframework.connector.auth import (
14 AuthenticationConfiguration,
15 AuthenticationConstants,
16 ChannelProvider,
17 ClaimsIdentity,
18 CredentialProvider,
19 GovernmentConstants,
20 JwtTokenValidation,
21 )
22 from .skill_conversation_reference import SkillConversationReference
23 from .conversation_id_factory import ConversationIdFactoryBase
24
25
26 class SkillHandler(ChannelServiceHandler):
27
28 SKILL_CONVERSATION_REFERENCE_KEY = (
29 "botbuilder.core.skills.SkillConversationReference"
30 )
31
32 def __init__(
33 self,
34 adapter: BotAdapter,
35 bot: Bot,
36 conversation_id_factory: ConversationIdFactoryBase,
37 credential_provider: CredentialProvider,
38 auth_configuration: AuthenticationConfiguration,
39 channel_provider: ChannelProvider = None,
40 logger: object = None,
41 ):
42 super().__init__(credential_provider, auth_configuration, channel_provider)
43
44 if not adapter:
45 raise TypeError("adapter can't be None")
46 if not bot:
47 raise TypeError("bot can't be None")
48 if not conversation_id_factory:
49 raise TypeError("conversation_id_factory can't be None")
50
51 self._adapter = adapter
52 self._bot = bot
53 self._conversation_id_factory = conversation_id_factory
54 self._logger = logger
55
56 async def on_send_to_conversation(
57 self, claims_identity: ClaimsIdentity, conversation_id: str, activity: Activity,
58 ) -> ResourceResponse:
59 """
60 send_to_conversation() API for Skill
61
62 This method allows you to send an activity to the end of a conversation.
63
64 This is slightly different from ReplyToActivity().
65 * SendToConversation(conversationId) - will append the activity to the end
66 of the conversation according to the timestamp or semantics of the channel.
67 * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply
68 to another activity, if the channel supports it. If the channel does not
69 support nested replies, ReplyToActivity falls back to SendToConversation.
70
71 Use ReplyToActivity when replying to a specific activity in the
72 conversation.
73
74 Use SendToConversation in all other cases.
75 :param claims_identity: Claims identity for the bot.
76 :type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`
77 :param conversation_id:The conversation ID.
78 :type conversation_id: str
79 :param activity: Activity to send.
80 :type activity: Activity
81 :return:
82 """
83 return await self._process_activity(
84 claims_identity, conversation_id, None, activity,
85 )
86
87 async def on_reply_to_activity(
88 self,
89 claims_identity: ClaimsIdentity,
90 conversation_id: str,
91 activity_id: str,
92 activity: Activity,
93 ) -> ResourceResponse:
94 """
95 reply_to_activity() API for Skill.
96
97 This method allows you to reply to an activity.
98
99 This is slightly different from SendToConversation().
100 * SendToConversation(conversationId) - will append the activity to the end
101 of the conversation according to the timestamp or semantics of the channel.
102 * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply
103 to another activity, if the channel supports it. If the channel does not
104 support nested replies, ReplyToActivity falls back to SendToConversation.
105
106 Use ReplyToActivity when replying to a specific activity in the
107 conversation.
108
109 Use SendToConversation in all other cases.
110 :param claims_identity: Claims identity for the bot.
111 :type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`
112 :param conversation_id:The conversation ID.
113 :type conversation_id: str
114 :param activity: Activity to send.
115 :type activity: Activity
116 :return:
117 """
118 return await self._process_activity(
119 claims_identity, conversation_id, activity_id, activity,
120 )
121
122 async def _process_activity(
123 self,
124 claims_identity: ClaimsIdentity,
125 conversation_id: str,
126 reply_to_activity_id: str,
127 activity: Activity,
128 ) -> ResourceResponse:
129 # Get the SkillsConversationReference
130 conversation_reference_result = await self._conversation_id_factory.get_conversation_reference(
131 conversation_id
132 )
133
134 # ConversationIdFactory can return either a SkillConversationReference (the newer way),
135 # or a ConversationReference (the old way, but still here for compatibility). If a
136 # ConversationReference is returned, build a new SkillConversationReference to simplify
137 # the remainder of this method.
138 skill_conversation_reference: SkillConversationReference = None
139 if isinstance(conversation_reference_result, SkillConversationReference):
140 skill_conversation_reference = conversation_reference_result
141 else:
142 skill_conversation_reference = SkillConversationReference(
143 conversation_reference=conversation_reference_result,
144 oauth_scope=(
145 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
146 if self._channel_provider and self._channel_provider.is_government()
147 else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
148 ),
149 )
150
151 if not skill_conversation_reference:
152 raise KeyError("SkillConversationReference not found")
153
154 async def callback(context: TurnContext):
155 context.turn_state[
156 SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY
157 ] = skill_conversation_reference
158
159 TurnContext.apply_conversation_reference(
160 activity, skill_conversation_reference.conversation_reference
161 )
162
163 context.activity.id = reply_to_activity_id
164
165 app_id = JwtTokenValidation.get_app_id_from_claims(claims_identity.claims)
166 context.activity.caller_id = (
167 f"{CallerIdConstants.bot_to_bot_prefix}{app_id}"
168 )
169
170 if activity.type == ActivityTypes.end_of_conversation:
171 await self._conversation_id_factory.delete_conversation_reference(
172 conversation_id
173 )
174 self._apply_eoc_to_turn_context_activity(context, activity)
175 await self._bot.on_turn(context)
176 elif activity.type == ActivityTypes.event:
177 self._apply_event_to_turn_context_activity(context, activity)
178 await self._bot.on_turn(context)
179 else:
180 await context.send_activity(activity)
181
182 await self._adapter.continue_conversation(
183 skill_conversation_reference.conversation_reference,
184 callback,
185 claims_identity=claims_identity,
186 audience=skill_conversation_reference.oauth_scope,
187 )
188 return ResourceResponse(id=str(uuid4()))
189
190 @staticmethod
191 def _apply_eoc_to_turn_context_activity(
192 context: TurnContext, end_of_conversation_activity: Activity
193 ):
194 context.activity.type = end_of_conversation_activity.type
195 context.activity.text = end_of_conversation_activity.text
196 context.activity.code = end_of_conversation_activity.code
197
198 context.activity.reply_to_id = end_of_conversation_activity.reply_to_id
199 context.activity.value = end_of_conversation_activity.value
200 context.activity.entities = end_of_conversation_activity.entities
201 context.activity.locale = end_of_conversation_activity.locale
202 context.activity.local_timestamp = end_of_conversation_activity.local_timestamp
203 context.activity.timestamp = end_of_conversation_activity.timestamp
204 context.activity.channel_data = end_of_conversation_activity.channel_data
205 context.activity.additional_properties = (
206 end_of_conversation_activity.additional_properties
207 )
208
209 @staticmethod
210 def _apply_event_to_turn_context_activity(
211 context: TurnContext, event_activity: Activity
212 ):
213 context.activity.type = event_activity.type
214 context.activity.name = event_activity.name
215 context.activity.value = event_activity.value
216 context.activity.relates_to = event_activity.relates_to
217
218 context.activity.reply_to_id = event_activity.reply_to_id
219 context.activity.value = event_activity.value
220 context.activity.entities = event_activity.entities
221 context.activity.locale = event_activity.locale
222 context.activity.local_timestamp = event_activity.local_timestamp
223 context.activity.timestamp = event_activity.timestamp
224 context.activity.channel_data = event_activity.channel_data
225 context.activity.additional_properties = event_activity.additional_properties
226
[end of libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py
--- a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py
+++ b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py
@@ -151,7 +151,14 @@
if not skill_conversation_reference:
raise KeyError("SkillConversationReference not found")
+ if not skill_conversation_reference.conversation_reference:
+ raise KeyError("conversationReference not found")
+
+ # If an activity is sent, return the ResourceResponse
+ resource_response: ResourceResponse = None
+
async def callback(context: TurnContext):
+ nonlocal resource_response
context.turn_state[
SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY
] = skill_conversation_reference
@@ -177,7 +184,7 @@
self._apply_event_to_turn_context_activity(context, activity)
await self._bot.on_turn(context)
else:
- await context.send_activity(activity)
+ resource_response = await context.send_activity(activity)
await self._adapter.continue_conversation(
skill_conversation_reference.conversation_reference,
@@ -185,7 +192,11 @@
claims_identity=claims_identity,
audience=skill_conversation_reference.oauth_scope,
)
- return ResourceResponse(id=str(uuid4()))
+
+ if not resource_response:
+ resource_response = ResourceResponse(id=str(uuid4()))
+
+ return resource_response
@staticmethod
def _apply_eoc_to_turn_context_activity(
|
{"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py\n--- a/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py\n+++ b/libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py\n@@ -151,7 +151,14 @@\n if not skill_conversation_reference:\n raise KeyError(\"SkillConversationReference not found\")\n \n+ if not skill_conversation_reference.conversation_reference:\n+ raise KeyError(\"conversationReference not found\")\n+\n+ # If an activity is sent, return the ResourceResponse\n+ resource_response: ResourceResponse = None\n+\n async def callback(context: TurnContext):\n+ nonlocal resource_response\n context.turn_state[\n SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY\n ] = skill_conversation_reference\n@@ -177,7 +184,7 @@\n self._apply_event_to_turn_context_activity(context, activity)\n await self._bot.on_turn(context)\n else:\n- await context.send_activity(activity)\n+ resource_response = await context.send_activity(activity)\n \n await self._adapter.continue_conversation(\n skill_conversation_reference.conversation_reference,\n@@ -185,7 +192,11 @@\n claims_identity=claims_identity,\n audience=skill_conversation_reference.oauth_scope,\n )\n- return ResourceResponse(id=str(uuid4()))\n+\n+ if not resource_response:\n+ resource_response = ResourceResponse(id=str(uuid4()))\n+\n+ return resource_response\n \n @staticmethod\n def _apply_eoc_to_turn_context_activity(\n", "issue": "SkillHandler doesn't return ResourceResponse when forwarding activities (Python)\nSee [parent](https://github.com/microsoft/botframework-sdk/issues/5919)\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom uuid import uuid4\n\nfrom botbuilder.core import Bot, BotAdapter, ChannelServiceHandler, TurnContext\nfrom botbuilder.schema import (\n Activity,\n ActivityTypes,\n ResourceResponse,\n CallerIdConstants,\n)\nfrom botframework.connector.auth import (\n AuthenticationConfiguration,\n AuthenticationConstants,\n ChannelProvider,\n ClaimsIdentity,\n CredentialProvider,\n GovernmentConstants,\n JwtTokenValidation,\n)\nfrom .skill_conversation_reference import SkillConversationReference\nfrom .conversation_id_factory import ConversationIdFactoryBase\n\n\nclass SkillHandler(ChannelServiceHandler):\n\n SKILL_CONVERSATION_REFERENCE_KEY = (\n \"botbuilder.core.skills.SkillConversationReference\"\n )\n\n def __init__(\n self,\n adapter: BotAdapter,\n bot: Bot,\n conversation_id_factory: ConversationIdFactoryBase,\n credential_provider: CredentialProvider,\n auth_configuration: AuthenticationConfiguration,\n channel_provider: ChannelProvider = None,\n logger: object = None,\n ):\n super().__init__(credential_provider, auth_configuration, channel_provider)\n\n if not adapter:\n raise TypeError(\"adapter can't be None\")\n if not bot:\n raise TypeError(\"bot can't be None\")\n if not conversation_id_factory:\n raise TypeError(\"conversation_id_factory can't be None\")\n\n self._adapter = adapter\n self._bot = bot\n self._conversation_id_factory = conversation_id_factory\n self._logger = logger\n\n async def on_send_to_conversation(\n self, claims_identity: ClaimsIdentity, conversation_id: str, activity: Activity,\n ) -> ResourceResponse:\n \"\"\"\n send_to_conversation() API for Skill\n\n This method allows you to send an activity to the end of a conversation.\n\n This is slightly different from ReplyToActivity().\n * SendToConversation(conversationId) - will append the activity to the end\n of the conversation according to the timestamp or semantics of the channel.\n * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply\n to another activity, if the channel supports it. If the channel does not\n support nested replies, ReplyToActivity falls back to SendToConversation.\n\n Use ReplyToActivity when replying to a specific activity in the\n conversation.\n\n Use SendToConversation in all other cases.\n :param claims_identity: Claims identity for the bot.\n :type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`\n :param conversation_id:The conversation ID.\n :type conversation_id: str\n :param activity: Activity to send.\n :type activity: Activity\n :return:\n \"\"\"\n return await self._process_activity(\n claims_identity, conversation_id, None, activity,\n )\n\n async def on_reply_to_activity(\n self,\n claims_identity: ClaimsIdentity,\n conversation_id: str,\n activity_id: str,\n activity: Activity,\n ) -> ResourceResponse:\n \"\"\"\n reply_to_activity() API for Skill.\n\n This method allows you to reply to an activity.\n\n This is slightly different from SendToConversation().\n * SendToConversation(conversationId) - will append the activity to the end\n of the conversation according to the timestamp or semantics of the channel.\n * ReplyToActivity(conversationId,ActivityId) - adds the activity as a reply\n to another activity, if the channel supports it. If the channel does not\n support nested replies, ReplyToActivity falls back to SendToConversation.\n\n Use ReplyToActivity when replying to a specific activity in the\n conversation.\n\n Use SendToConversation in all other cases.\n :param claims_identity: Claims identity for the bot.\n :type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`\n :param conversation_id:The conversation ID.\n :type conversation_id: str\n :param activity: Activity to send.\n :type activity: Activity\n :return:\n \"\"\"\n return await self._process_activity(\n claims_identity, conversation_id, activity_id, activity,\n )\n\n async def _process_activity(\n self,\n claims_identity: ClaimsIdentity,\n conversation_id: str,\n reply_to_activity_id: str,\n activity: Activity,\n ) -> ResourceResponse:\n # Get the SkillsConversationReference\n conversation_reference_result = await self._conversation_id_factory.get_conversation_reference(\n conversation_id\n )\n\n # ConversationIdFactory can return either a SkillConversationReference (the newer way),\n # or a ConversationReference (the old way, but still here for compatibility). If a\n # ConversationReference is returned, build a new SkillConversationReference to simplify\n # the remainder of this method.\n skill_conversation_reference: SkillConversationReference = None\n if isinstance(conversation_reference_result, SkillConversationReference):\n skill_conversation_reference = conversation_reference_result\n else:\n skill_conversation_reference = SkillConversationReference(\n conversation_reference=conversation_reference_result,\n oauth_scope=(\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n if self._channel_provider and self._channel_provider.is_government()\n else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n ),\n )\n\n if not skill_conversation_reference:\n raise KeyError(\"SkillConversationReference not found\")\n\n async def callback(context: TurnContext):\n context.turn_state[\n SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY\n ] = skill_conversation_reference\n\n TurnContext.apply_conversation_reference(\n activity, skill_conversation_reference.conversation_reference\n )\n\n context.activity.id = reply_to_activity_id\n\n app_id = JwtTokenValidation.get_app_id_from_claims(claims_identity.claims)\n context.activity.caller_id = (\n f\"{CallerIdConstants.bot_to_bot_prefix}{app_id}\"\n )\n\n if activity.type == ActivityTypes.end_of_conversation:\n await self._conversation_id_factory.delete_conversation_reference(\n conversation_id\n )\n self._apply_eoc_to_turn_context_activity(context, activity)\n await self._bot.on_turn(context)\n elif activity.type == ActivityTypes.event:\n self._apply_event_to_turn_context_activity(context, activity)\n await self._bot.on_turn(context)\n else:\n await context.send_activity(activity)\n\n await self._adapter.continue_conversation(\n skill_conversation_reference.conversation_reference,\n callback,\n claims_identity=claims_identity,\n audience=skill_conversation_reference.oauth_scope,\n )\n return ResourceResponse(id=str(uuid4()))\n\n @staticmethod\n def _apply_eoc_to_turn_context_activity(\n context: TurnContext, end_of_conversation_activity: Activity\n ):\n context.activity.type = end_of_conversation_activity.type\n context.activity.text = end_of_conversation_activity.text\n context.activity.code = end_of_conversation_activity.code\n\n context.activity.reply_to_id = end_of_conversation_activity.reply_to_id\n context.activity.value = end_of_conversation_activity.value\n context.activity.entities = end_of_conversation_activity.entities\n context.activity.locale = end_of_conversation_activity.locale\n context.activity.local_timestamp = end_of_conversation_activity.local_timestamp\n context.activity.timestamp = end_of_conversation_activity.timestamp\n context.activity.channel_data = end_of_conversation_activity.channel_data\n context.activity.additional_properties = (\n end_of_conversation_activity.additional_properties\n )\n\n @staticmethod\n def _apply_event_to_turn_context_activity(\n context: TurnContext, event_activity: Activity\n ):\n context.activity.type = event_activity.type\n context.activity.name = event_activity.name\n context.activity.value = event_activity.value\n context.activity.relates_to = event_activity.relates_to\n\n context.activity.reply_to_id = event_activity.reply_to_id\n context.activity.value = event_activity.value\n context.activity.entities = event_activity.entities\n context.activity.locale = event_activity.locale\n context.activity.local_timestamp = event_activity.local_timestamp\n context.activity.timestamp = event_activity.timestamp\n context.activity.channel_data = event_activity.channel_data\n context.activity.additional_properties = event_activity.additional_properties\n", "path": "libraries/botbuilder-core/botbuilder/core/skills/skill_handler.py"}]}
| 2,877 | 362 |
gh_patches_debug_35684
|
rasdani/github-patches
|
git_diff
|
ManageIQ__integration_tests-296
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better YAML overriding
Now it does not take just the root element into the account, but it crawls throught the dictionary and only updates the values that are present in the new dictionary. It converts all dicts to Configs, other values than specified in override dict are not touched.
It also improves the `__getattribute__` behaviour - now it propagates the interface to the child nodes by converting all `dict` to `Config` before returning the value, so the dot operator can be used everywhere.
</issue>
<code>
[start of utils/conf_loader.py]
1 import os
2 from collections import OrderedDict
3
4 import py.path
5 import yaml
6 from yaml.loader import Loader
7
8
9 class OrderedYamlLoader(Loader):
10 def construct_yaml_map(self, node):
11 data = OrderedDict()
12 yield data
13 value = self.construct_mapping(node)
14 data.update(value)
15
16
17 class ConfigNotFoundException(Exception):
18 pass
19
20
21 class Config(dict):
22 """A dict subclass with knowledge of conf yamls and how to load them
23
24 Also supports descriptor access, e.g. conf.configfile
25 (compared to the normal dict access, conf['configfile'])
26 """
27 # Stash the exception on the class for convenience, e.g.
28 # try:
29 # conf[does_not_exist]
30 # except conf.NotFoundException
31 # ...
32 NotFoundException = ConfigNotFoundException
33
34 # Support for descriptor access, e.g. instance.attrname
35 # Note that this is only on the get side, for support of nefarious things
36 # like setting and deleting, use the normal dict interface.
37 def __getattribute__(self, attr):
38 # Attempt normal object attr lookup; delegate to the dict interface if that fails
39 try:
40 return super(Config, self).__getattribute__(attr)
41 except AttributeError:
42 return self[attr]
43
44 def __getitem__(self, key):
45 # Attempt a normal dict lookup to pull a cached conf
46 try:
47 return super(Config, self).__getitem__(key)
48 except KeyError:
49 # Cache miss, load the requested yaml
50 yaml_dict = load_yaml(key)
51
52 # Graft in local yaml updates if they're available
53 try:
54 local_yaml = '%s.local' % key
55 local_yaml_dict = load_yaml(local_yaml)
56 yaml_dict.update(local_yaml_dict)
57 except ConfigNotFoundException:
58 pass
59
60 # Returning self[key] instead of yaml_dict as a small sanity check
61 self[key] = yaml_dict
62 return self[key]
63
64
65 def load_yaml(filename=None):
66 # Find the requested yaml in the config dir, relative to this file's location
67 # (aiming for cfme_tests/config)
68 this_file = os.path.abspath(__file__)
69 path = py.path.local(this_file).new(basename='../conf/%s.yaml' % filename)
70
71 if path.check():
72 with path.open() as config_fh:
73 return yaml.load(config_fh, Loader=OrderedYamlLoader)
74 else:
75 msg = 'Unable to load configuration file at %s' % path
76 raise ConfigNotFoundException(msg)
77
[end of utils/conf_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/utils/conf_loader.py b/utils/conf_loader.py
--- a/utils/conf_loader.py
+++ b/utils/conf_loader.py
@@ -1,17 +1,19 @@
import os
-from collections import OrderedDict
import py.path
import yaml
from yaml.loader import Loader
-class OrderedYamlLoader(Loader):
+class YamlConfigLoader(Loader):
+ # Override the root yaml node to be a RecursiveUpdateDict
def construct_yaml_map(self, node):
- data = OrderedDict()
+ data = RecursiveUpdateDict()
yield data
value = self.construct_mapping(node)
data.update(value)
+# Do the same for child nodes of the yaml mapping type
+YamlConfigLoader.add_constructor('tag:yaml.org,2002:map', YamlConfigLoader.construct_yaml_map)
class ConfigNotFoundException(Exception):
@@ -62,6 +64,43 @@
return self[key]
+class RecursiveUpdateDict(dict):
+ def update(self, new_data):
+ """ More intelligent dictionary update.
+
+ This method changes just data that have been changed. How does it work?
+ Imagine you want to change just VM name, other things should stay the same.
+
+ Original config:
+ something:
+ somewhere:
+ VM:
+ a: 1
+ b: 2
+ name: qwer
+ c: 3
+
+ Instead of copying the whole part from original to the override with just 'name' changed,
+ you will write this:
+
+ something:
+ somewhere:
+ VM:
+ name: tzui
+
+ This digging deeper affects only dictionary values. Lists are unaffected! And so do other
+ types.
+
+ Args:
+ new_data: Update data.
+ """
+ for key, value in new_data.iteritems():
+ if isinstance(value, type(self)) and key in self:
+ type(self).update(self[key], value)
+ else:
+ self[key] = new_data[key]
+
+
def load_yaml(filename=None):
# Find the requested yaml in the config dir, relative to this file's location
# (aiming for cfme_tests/config)
@@ -70,7 +109,7 @@
if path.check():
with path.open() as config_fh:
- return yaml.load(config_fh, Loader=OrderedYamlLoader)
+ return yaml.load(config_fh, Loader=YamlConfigLoader)
else:
msg = 'Unable to load configuration file at %s' % path
raise ConfigNotFoundException(msg)
|
{"golden_diff": "diff --git a/utils/conf_loader.py b/utils/conf_loader.py\n--- a/utils/conf_loader.py\n+++ b/utils/conf_loader.py\n@@ -1,17 +1,19 @@\n import os\n-from collections import OrderedDict\n \n import py.path\n import yaml\n from yaml.loader import Loader\n \n \n-class OrderedYamlLoader(Loader):\n+class YamlConfigLoader(Loader):\n+ # Override the root yaml node to be a RecursiveUpdateDict\n def construct_yaml_map(self, node):\n- data = OrderedDict()\n+ data = RecursiveUpdateDict()\n yield data\n value = self.construct_mapping(node)\n data.update(value)\n+# Do the same for child nodes of the yaml mapping type\n+YamlConfigLoader.add_constructor('tag:yaml.org,2002:map', YamlConfigLoader.construct_yaml_map)\n \n \n class ConfigNotFoundException(Exception):\n@@ -62,6 +64,43 @@\n return self[key]\n \n \n+class RecursiveUpdateDict(dict):\n+ def update(self, new_data):\n+ \"\"\" More intelligent dictionary update.\n+\n+ This method changes just data that have been changed. How does it work?\n+ Imagine you want to change just VM name, other things should stay the same.\n+\n+ Original config:\n+ something:\n+ somewhere:\n+ VM:\n+ a: 1\n+ b: 2\n+ name: qwer\n+ c: 3\n+\n+ Instead of copying the whole part from original to the override with just 'name' changed,\n+ you will write this:\n+\n+ something:\n+ somewhere:\n+ VM:\n+ name: tzui\n+\n+ This digging deeper affects only dictionary values. Lists are unaffected! And so do other\n+ types.\n+\n+ Args:\n+ new_data: Update data.\n+ \"\"\"\n+ for key, value in new_data.iteritems():\n+ if isinstance(value, type(self)) and key in self:\n+ type(self).update(self[key], value)\n+ else:\n+ self[key] = new_data[key]\n+\n+\n def load_yaml(filename=None):\n # Find the requested yaml in the config dir, relative to this file's location\n # (aiming for cfme_tests/config)\n@@ -70,7 +109,7 @@\n \n if path.check():\n with path.open() as config_fh:\n- return yaml.load(config_fh, Loader=OrderedYamlLoader)\n+ return yaml.load(config_fh, Loader=YamlConfigLoader)\n else:\n msg = 'Unable to load configuration file at %s' % path\n raise ConfigNotFoundException(msg)\n", "issue": "Better YAML overriding\nNow it does not take just the root element into the account, but it crawls throught the dictionary and only updates the values that are present in the new dictionary. It converts all dicts to Configs, other values than specified in override dict are not touched.\n\nIt also improves the `__getattribute__` behaviour - now it propagates the interface to the child nodes by converting all `dict` to `Config` before returning the value, so the dot operator can be used everywhere.\n\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict\n\nimport py.path\nimport yaml\nfrom yaml.loader import Loader\n\n\nclass OrderedYamlLoader(Loader):\n def construct_yaml_map(self, node):\n data = OrderedDict()\n yield data\n value = self.construct_mapping(node)\n data.update(value)\n\n\nclass ConfigNotFoundException(Exception):\n pass\n\n\nclass Config(dict):\n \"\"\"A dict subclass with knowledge of conf yamls and how to load them\n\n Also supports descriptor access, e.g. conf.configfile\n (compared to the normal dict access, conf['configfile'])\n \"\"\"\n # Stash the exception on the class for convenience, e.g.\n # try:\n # conf[does_not_exist]\n # except conf.NotFoundException\n # ...\n NotFoundException = ConfigNotFoundException\n\n # Support for descriptor access, e.g. instance.attrname\n # Note that this is only on the get side, for support of nefarious things\n # like setting and deleting, use the normal dict interface.\n def __getattribute__(self, attr):\n # Attempt normal object attr lookup; delegate to the dict interface if that fails\n try:\n return super(Config, self).__getattribute__(attr)\n except AttributeError:\n return self[attr]\n\n def __getitem__(self, key):\n # Attempt a normal dict lookup to pull a cached conf\n try:\n return super(Config, self).__getitem__(key)\n except KeyError:\n # Cache miss, load the requested yaml\n yaml_dict = load_yaml(key)\n\n # Graft in local yaml updates if they're available\n try:\n local_yaml = '%s.local' % key\n local_yaml_dict = load_yaml(local_yaml)\n yaml_dict.update(local_yaml_dict)\n except ConfigNotFoundException:\n pass\n\n # Returning self[key] instead of yaml_dict as a small sanity check\n self[key] = yaml_dict\n return self[key]\n\n\ndef load_yaml(filename=None):\n # Find the requested yaml in the config dir, relative to this file's location\n # (aiming for cfme_tests/config)\n this_file = os.path.abspath(__file__)\n path = py.path.local(this_file).new(basename='../conf/%s.yaml' % filename)\n\n if path.check():\n with path.open() as config_fh:\n return yaml.load(config_fh, Loader=OrderedYamlLoader)\n else:\n msg = 'Unable to load configuration file at %s' % path\n raise ConfigNotFoundException(msg)\n", "path": "utils/conf_loader.py"}]}
| 1,317 | 563 |
gh_patches_debug_20213
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-1523
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[rllib] [docs] Document multi-agent support
We should document the new multi-agent support in rllib and have some examples in readthedocs. It would be good to cover the supported cases and which ones are not yet supported (or provide workarounds).
</issue>
<code>
[start of python/ray/rllib/examples/multiagent_pendulum_env.py]
1 from gym.spaces import Box, Tuple
2 from gym.utils import seeding
3 from gym.envs.classic_control.pendulum import PendulumEnv
4 import numpy as np
5
6 """
7 Multiagent pendulum that sums its torques to generate an action
8 """
9
10
11 class MultiAgentPendulumEnv(PendulumEnv):
12 metadata = {
13 'render.modes': ['human', 'rgb_array'],
14 'video.frames_per_second': 30
15 }
16
17 def __init__(self):
18 self.max_speed = 8
19 self.max_torque = 2.
20 self.dt = .05
21 self.viewer = None
22
23 high = np.array([1., 1., self.max_speed])
24 self.action_space = [Box(low=-self.max_torque / 2,
25 high=self.max_torque / 2, shape=(1,))
26 for _ in range(2)]
27 self.observation_space = Tuple(tuple(Box(low=-high, high=high)
28 for _ in range(2)))
29
30 self._seed()
31
32 def _seed(self, seed=None):
33 self.np_random, seed = seeding.np_random(seed)
34 return [seed]
35
36 def _step(self, u):
37 th, thdot = self.state # th := theta
38
39 summed_u = np.sum(u)
40 g = 10.
41 m = 1.
42 length = 1.
43 dt = self.dt
44
45 summed_u = np.clip(summed_u, -self.max_torque, self.max_torque)
46 self.last_u = summed_u # for rendering
47 costs = self.angle_normalize(th) ** 2 + .1 * thdot ** 2 + \
48 .001 * (summed_u ** 2)
49
50 newthdot = thdot + (-3 * g / (2 * length) * np.sin(th + np.pi) +
51 3. / (m * length ** 2) * summed_u) * dt
52 newth = th + newthdot * dt
53 newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
54
55 self.state = np.array([newth, newthdot])
56 return self._get_obs(), -costs, False, {}
57
58 def _reset(self):
59 high = np.array([np.pi, 1])
60 self.state = self.np_random.uniform(low=-high, high=high)
61 self.last_u = None
62 return self._get_obs()
63
64 def _get_obs(self):
65 theta, thetadot = self.state
66 return [np.array([np.cos(theta), np.sin(theta), thetadot])
67 for _ in range(2)]
68
69 def angle_normalize(self, x):
70 return (((x + np.pi) % (2 * np.pi)) - np.pi)
71
[end of python/ray/rllib/examples/multiagent_pendulum_env.py]
[start of python/ray/rllib/examples/multiagent_mountaincar_env.py]
1 import math
2 from gym.spaces import Box, Tuple, Discrete
3 import numpy as np
4 from gym.envs.classic_control.mountain_car import MountainCarEnv
5
6 """
7 Multiagent mountain car that sums and then
8 averages its actions to produce the velocity
9 """
10
11
12 class MultiAgentMountainCarEnv(MountainCarEnv):
13 def __init__(self):
14 self.min_position = -1.2
15 self.max_position = 0.6
16 self.max_speed = 0.07
17 self.goal_position = 0.5
18
19 self.low = np.array([self.min_position, -self.max_speed])
20 self.high = np.array([self.max_position, self.max_speed])
21
22 self.viewer = None
23
24 self.action_space = [Discrete(3) for _ in range(2)]
25 self.observation_space = Tuple(tuple(Box(self.low, self.high)
26 for _ in range(2)))
27
28 self._seed()
29 self.reset()
30
31 def _step(self, action):
32 summed_act = 0.5 * np.sum(action)
33
34 position, velocity = self.state
35 velocity += (summed_act - 1) * 0.001
36 velocity += math.cos(3 * position) * (-0.0025)
37 velocity = np.clip(velocity, -self.max_speed, self.max_speed)
38 position += velocity
39 position = np.clip(position, self.min_position, self.max_position)
40 if (position == self.min_position and velocity < 0):
41 velocity = 0
42
43 done = bool(position >= self.goal_position)
44
45 reward = position
46
47 self.state = (position, velocity)
48 return [np.array(self.state) for _ in range(2)], reward, done, {}
49
50 def _reset(self):
51 self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
52 return [np.array(self.state) for _ in range(2)]
53
[end of python/ray/rllib/examples/multiagent_mountaincar_env.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/ray/rllib/examples/multiagent_mountaincar_env.py b/python/ray/rllib/examples/multiagent_mountaincar_env.py
--- a/python/ray/rllib/examples/multiagent_mountaincar_env.py
+++ b/python/ray/rllib/examples/multiagent_mountaincar_env.py
@@ -22,8 +22,8 @@
self.viewer = None
self.action_space = [Discrete(3) for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(self.low, self.high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(self.low, self.high) for _ in range(2)])
self._seed()
self.reset()
diff --git a/python/ray/rllib/examples/multiagent_pendulum_env.py b/python/ray/rllib/examples/multiagent_pendulum_env.py
--- a/python/ray/rllib/examples/multiagent_pendulum_env.py
+++ b/python/ray/rllib/examples/multiagent_pendulum_env.py
@@ -24,8 +24,8 @@
self.action_space = [Box(low=-self.max_torque / 2,
high=self.max_torque / 2, shape=(1,))
for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(low=-high, high=high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(low=-high, high=high) for _ in range(2)])
self._seed()
|
{"golden_diff": "diff --git a/python/ray/rllib/examples/multiagent_mountaincar_env.py b/python/ray/rllib/examples/multiagent_mountaincar_env.py\n--- a/python/ray/rllib/examples/multiagent_mountaincar_env.py\n+++ b/python/ray/rllib/examples/multiagent_mountaincar_env.py\n@@ -22,8 +22,8 @@\n self.viewer = None\n \n self.action_space = [Discrete(3) for _ in range(2)]\n- self.observation_space = Tuple(tuple(Box(self.low, self.high)\n- for _ in range(2)))\n+ self.observation_space = Tuple([\n+ Box(self.low, self.high) for _ in range(2)])\n \n self._seed()\n self.reset()\ndiff --git a/python/ray/rllib/examples/multiagent_pendulum_env.py b/python/ray/rllib/examples/multiagent_pendulum_env.py\n--- a/python/ray/rllib/examples/multiagent_pendulum_env.py\n+++ b/python/ray/rllib/examples/multiagent_pendulum_env.py\n@@ -24,8 +24,8 @@\n self.action_space = [Box(low=-self.max_torque / 2,\n high=self.max_torque / 2, shape=(1,))\n for _ in range(2)]\n- self.observation_space = Tuple(tuple(Box(low=-high, high=high)\n- for _ in range(2)))\n+ self.observation_space = Tuple([\n+ Box(low=-high, high=high) for _ in range(2)])\n \n self._seed()\n", "issue": "[rllib] [docs] Document multi-agent support\nWe should document the new multi-agent support in rllib and have some examples in readthedocs. It would be good to cover the supported cases and which ones are not yet supported (or provide workarounds).\n", "before_files": [{"content": "from gym.spaces import Box, Tuple\nfrom gym.utils import seeding\nfrom gym.envs.classic_control.pendulum import PendulumEnv\nimport numpy as np\n\n\"\"\"\n Multiagent pendulum that sums its torques to generate an action\n\"\"\"\n\n\nclass MultiAgentPendulumEnv(PendulumEnv):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.max_speed = 8\n self.max_torque = 2.\n self.dt = .05\n self.viewer = None\n\n high = np.array([1., 1., self.max_speed])\n self.action_space = [Box(low=-self.max_torque / 2,\n high=self.max_torque / 2, shape=(1,))\n for _ in range(2)]\n self.observation_space = Tuple(tuple(Box(low=-high, high=high)\n for _ in range(2)))\n\n self._seed()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, u):\n th, thdot = self.state # th := theta\n\n summed_u = np.sum(u)\n g = 10.\n m = 1.\n length = 1.\n dt = self.dt\n\n summed_u = np.clip(summed_u, -self.max_torque, self.max_torque)\n self.last_u = summed_u # for rendering\n costs = self.angle_normalize(th) ** 2 + .1 * thdot ** 2 + \\\n .001 * (summed_u ** 2)\n\n newthdot = thdot + (-3 * g / (2 * length) * np.sin(th + np.pi) +\n 3. / (m * length ** 2) * summed_u) * dt\n newth = th + newthdot * dt\n newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)\n\n self.state = np.array([newth, newthdot])\n return self._get_obs(), -costs, False, {}\n\n def _reset(self):\n high = np.array([np.pi, 1])\n self.state = self.np_random.uniform(low=-high, high=high)\n self.last_u = None\n return self._get_obs()\n\n def _get_obs(self):\n theta, thetadot = self.state\n return [np.array([np.cos(theta), np.sin(theta), thetadot])\n for _ in range(2)]\n\n def angle_normalize(self, x):\n return (((x + np.pi) % (2 * np.pi)) - np.pi)\n", "path": "python/ray/rllib/examples/multiagent_pendulum_env.py"}, {"content": "import math\nfrom gym.spaces import Box, Tuple, Discrete\nimport numpy as np\nfrom gym.envs.classic_control.mountain_car import MountainCarEnv\n\n\"\"\"\nMultiagent mountain car that sums and then\naverages its actions to produce the velocity\n\"\"\"\n\n\nclass MultiAgentMountainCarEnv(MountainCarEnv):\n def __init__(self):\n self.min_position = -1.2\n self.max_position = 0.6\n self.max_speed = 0.07\n self.goal_position = 0.5\n\n self.low = np.array([self.min_position, -self.max_speed])\n self.high = np.array([self.max_position, self.max_speed])\n\n self.viewer = None\n\n self.action_space = [Discrete(3) for _ in range(2)]\n self.observation_space = Tuple(tuple(Box(self.low, self.high)\n for _ in range(2)))\n\n self._seed()\n self.reset()\n\n def _step(self, action):\n summed_act = 0.5 * np.sum(action)\n\n position, velocity = self.state\n velocity += (summed_act - 1) * 0.001\n velocity += math.cos(3 * position) * (-0.0025)\n velocity = np.clip(velocity, -self.max_speed, self.max_speed)\n position += velocity\n position = np.clip(position, self.min_position, self.max_position)\n if (position == self.min_position and velocity < 0):\n velocity = 0\n\n done = bool(position >= self.goal_position)\n\n reward = position\n\n self.state = (position, velocity)\n return [np.array(self.state) for _ in range(2)], reward, done, {}\n\n def _reset(self):\n self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])\n return [np.array(self.state) for _ in range(2)]\n", "path": "python/ray/rllib/examples/multiagent_mountaincar_env.py"}]}
| 1,912 | 343 |
gh_patches_debug_10894
|
rasdani/github-patches
|
git_diff
|
obspy__obspy-2310
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
incorrect bitmask in libgcf
libgcf.py, line 107, masks the bottom 4 bits for the compresssion code:
` compression = compress & 0b00001111 # get compression code`
This should mask off only the bottom 3 bits (the 4th is allocated for something else in the near future):
` compression = compress & 0b00000111 # get compression code`
incorrect bitmask in libgcf
libgcf.py, line 107, masks the bottom 4 bits for the compresssion code:
` compression = compress & 0b00001111 # get compression code`
This should mask off only the bottom 3 bits (the 4th is allocated for something else in the near future):
` compression = compress & 0b00000111 # get compression code`
</issue>
<code>
[start of obspy/io/gcf/libgcf.py]
1 # -*- coding: utf-8 -*-
2 # reads Guralp Compressed Format (GCF) Files
3 # By Ran Novitsky Nof @ BSL, 2016
4 # [email protected]
5 # Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)
6 # more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro
7 # last access: June, 2016
8 from __future__ import (absolute_import, division, print_function,
9 unicode_literals)
10 from future.builtins import * # NOQA
11
12 import numpy as np
13
14 from obspy import UTCDateTime
15
16 SPS_D = { # Table 3.1: special sample rates
17 157: 0.1,
18 161: 0.125,
19 162: 0.2,
20 164: 0.25,
21 167: 0.5,
22 171: 400,
23 174: 500,
24 176: 1000,
25 179: 2000,
26 181: 4000}
27 TIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator
28 171: 8.,
29 174: 2.,
30 176: 4.,
31 179: 8.,
32 181: 16.}
33 COMPRESSION_D = { # Table 3.2: format field to data type
34 1: '>i4',
35 2: '>i2',
36 4: '>i1'}
37
38
39 def is_gcf(f):
40 """
41 Test if file is GCF by reading at least 1 data block
42 """
43 header, data = read_data_block(f)
44
45
46 def decode36(data):
47 """
48 Converts an integer into a base36 string.
49 """
50 # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm
51 s = ''
52 while data:
53 imed = data % 36
54 if imed > 9:
55 c = chr(imed - 10 + ord('A'))
56 else:
57 c = chr(imed + ord('0'))
58 s = c + s
59 data = data // 36
60 return s
61
62
63 def decode_date_time(data):
64 """
65 Decode date and time field.
66
67 The date code is a 32 bit value specifying the start time of the block.
68 Bits 0-16 contain the number of seconds since midnight,
69 and bits 17-31 the number of days since 17th November 1989.
70 """
71 # prevent numpy array
72 days = int(data >> 17)
73 secs = int(data & 0x1FFFF)
74 starttime = UTCDateTime('1989-11-17') + days * 86400 + secs
75 return starttime
76
77
78 def read_data_block(f, headonly=False, channel_prefix="HH", **kwargs):
79 """
80 Read one data block from GCF file.
81
82 more details can be found here:
83 http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm
84 f - file object to read from
85 if skipData is True, Only header is returned.
86 if not a data block (SPS=0) - returns None.
87 """
88 # get ID
89 sysid = f.read(4)
90 if not sysid:
91 raise EOFError # got to EOF
92 sysid = np.frombuffer(sysid, count=1, dtype='>u4')
93 if sysid >> 31 & 0b1 > 0:
94 sysid = (sysid << 6) >> 6
95 sysid = decode36(sysid)
96 # get Stream ID
97 stid = np.frombuffer(f.read(4), count=1, dtype='>u4')
98 stid = decode36(stid)
99 # get Date & Time
100 data = np.frombuffer(f.read(4), count=1, dtype='>u4')
101 starttime = decode_date_time(data)
102 # get data format
103 # get reserved, SPS, data type compression,
104 # number of 32bit records (num_records)
105 reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,
106 dtype='>u1')
107 compression = compress & 0b00001111 # get compression code
108 t_offset = compress >> 4 # get time offset
109 if t_offset > 0:
110 starttime = starttime + t_offset / TIME_OFFSETS_D[sps]
111 if sps in SPS_D:
112 sps = SPS_D[sps] # get special SPS value if needed
113 if not sps:
114 f.seek(num_records * 4, 1) # skip if not a data block
115 if 1008 - num_records * 4 > 0:
116 # keep skipping to get 1008 record
117 f.seek(1008 - num_records * 4, 1)
118 return None
119 npts = num_records * compression # number of samples
120 header = {}
121 header['starttime'] = starttime
122 header['station'] = stid[:4]
123 header['channel'] = (channel_prefix[:2] + stid[4]).upper()
124 header['sampling_rate'] = float(sps)
125 header['npts'] = npts
126 if headonly:
127 f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)
128 # skip to end of block if only partly filled with data
129 if 1000 - num_records * 4 > 0:
130 f.seek(1000 - num_records * 4, 1)
131 return header
132 else:
133 # get FIC
134 fic = np.frombuffer(f.read(4), count=1, dtype='>i4')
135 # get incremental data
136 data = np.frombuffer(f.read(4 * num_records), count=npts,
137 dtype=COMPRESSION_D[compression])
138 # construct time series
139 data = (fic + np.cumsum(data)).astype('i4')
140 # get RIC
141 ric = np.frombuffer(f.read(4), count=1, dtype='>i4')
142 # skip to end of block if only partly filled with data
143 if 1000 - num_records * 4 > 0:
144 f.seek(1000 - num_records * 4, 1)
145 # verify last data sample matches RIC
146 if not data[-1] == ric:
147 raise ValueError("Last sample mismatch with RIC")
148 return header, data
149
150
151 def read_header(f, **kwargs):
152 """
153 Reads header only from GCF file.
154 """
155 return read_data_block(f, headonly=True, **kwargs)
156
157
158 def read(f, **kwargs):
159 """
160 Reads header and data from GCF file.
161 """
162 return read_data_block(f, headonly=False, **kwargs)
163
[end of obspy/io/gcf/libgcf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py
--- a/obspy/io/gcf/libgcf.py
+++ b/obspy/io/gcf/libgcf.py
@@ -104,7 +104,7 @@
# number of 32bit records (num_records)
reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,
dtype='>u1')
- compression = compress & 0b00001111 # get compression code
+ compression = compress & 0b00000111 # get compression code
t_offset = compress >> 4 # get time offset
if t_offset > 0:
starttime = starttime + t_offset / TIME_OFFSETS_D[sps]
|
{"golden_diff": "diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py\n--- a/obspy/io/gcf/libgcf.py\n+++ b/obspy/io/gcf/libgcf.py\n@@ -104,7 +104,7 @@\n # number of 32bit records (num_records)\n reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,\n dtype='>u1')\n- compression = compress & 0b00001111 # get compression code\n+ compression = compress & 0b00000111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n starttime = starttime + t_offset / TIME_OFFSETS_D[sps]\n", "issue": "incorrect bitmask in libgcf\nlibgcf.py, line 107, masks the bottom 4 bits for the compresssion code:\r\n` compression = compress & 0b00001111 # get compression code`\r\n\r\nThis should mask off only the bottom 3 bits (the 4th is allocated for something else in the near future):\r\n` compression = compress & 0b00000111 # get compression code`\r\n\nincorrect bitmask in libgcf\nlibgcf.py, line 107, masks the bottom 4 bits for the compresssion code:\r\n` compression = compress & 0b00001111 # get compression code`\r\n\r\nThis should mask off only the bottom 3 bits (the 4th is allocated for something else in the near future):\r\n` compression = compress & 0b00000111 # get compression code`\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# reads Guralp Compressed Format (GCF) Files\n# By Ran Novitsky Nof @ BSL, 2016\n# [email protected]\n# Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)\n# more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro\n# last access: June, 2016\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\n\nimport numpy as np\n\nfrom obspy import UTCDateTime\n\nSPS_D = { # Table 3.1: special sample rates\n 157: 0.1,\n 161: 0.125,\n 162: 0.2,\n 164: 0.25,\n 167: 0.5,\n 171: 400,\n 174: 500,\n 176: 1000,\n 179: 2000,\n 181: 4000}\nTIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator\n 171: 8.,\n 174: 2.,\n 176: 4.,\n 179: 8.,\n 181: 16.}\nCOMPRESSION_D = { # Table 3.2: format field to data type\n 1: '>i4',\n 2: '>i2',\n 4: '>i1'}\n\n\ndef is_gcf(f):\n \"\"\"\n Test if file is GCF by reading at least 1 data block\n \"\"\"\n header, data = read_data_block(f)\n\n\ndef decode36(data):\n \"\"\"\n Converts an integer into a base36 string.\n \"\"\"\n # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm\n s = ''\n while data:\n imed = data % 36\n if imed > 9:\n c = chr(imed - 10 + ord('A'))\n else:\n c = chr(imed + ord('0'))\n s = c + s\n data = data // 36\n return s\n\n\ndef decode_date_time(data):\n \"\"\"\n Decode date and time field.\n\n The date code is a 32 bit value specifying the start time of the block.\n Bits 0-16 contain the number of seconds since midnight,\n and bits 17-31 the number of days since 17th November 1989.\n \"\"\"\n # prevent numpy array\n days = int(data >> 17)\n secs = int(data & 0x1FFFF)\n starttime = UTCDateTime('1989-11-17') + days * 86400 + secs\n return starttime\n\n\ndef read_data_block(f, headonly=False, channel_prefix=\"HH\", **kwargs):\n \"\"\"\n Read one data block from GCF file.\n\n more details can be found here:\n http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm\n f - file object to read from\n if skipData is True, Only header is returned.\n if not a data block (SPS=0) - returns None.\n \"\"\"\n # get ID\n sysid = f.read(4)\n if not sysid:\n raise EOFError # got to EOF\n sysid = np.frombuffer(sysid, count=1, dtype='>u4')\n if sysid >> 31 & 0b1 > 0:\n sysid = (sysid << 6) >> 6\n sysid = decode36(sysid)\n # get Stream ID\n stid = np.frombuffer(f.read(4), count=1, dtype='>u4')\n stid = decode36(stid)\n # get Date & Time\n data = np.frombuffer(f.read(4), count=1, dtype='>u4')\n starttime = decode_date_time(data)\n # get data format\n # get reserved, SPS, data type compression,\n # number of 32bit records (num_records)\n reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,\n dtype='>u1')\n compression = compress & 0b00001111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n starttime = starttime + t_offset / TIME_OFFSETS_D[sps]\n if sps in SPS_D:\n sps = SPS_D[sps] # get special SPS value if needed\n if not sps:\n f.seek(num_records * 4, 1) # skip if not a data block\n if 1008 - num_records * 4 > 0:\n # keep skipping to get 1008 record\n f.seek(1008 - num_records * 4, 1)\n return None\n npts = num_records * compression # number of samples\n header = {}\n header['starttime'] = starttime\n header['station'] = stid[:4]\n header['channel'] = (channel_prefix[:2] + stid[4]).upper()\n header['sampling_rate'] = float(sps)\n header['npts'] = npts\n if headonly:\n f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n return header\n else:\n # get FIC\n fic = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # get incremental data\n data = np.frombuffer(f.read(4 * num_records), count=npts,\n dtype=COMPRESSION_D[compression])\n # construct time series\n data = (fic + np.cumsum(data)).astype('i4')\n # get RIC\n ric = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n # verify last data sample matches RIC\n if not data[-1] == ric:\n raise ValueError(\"Last sample mismatch with RIC\")\n return header, data\n\n\ndef read_header(f, **kwargs):\n \"\"\"\n Reads header only from GCF file.\n \"\"\"\n return read_data_block(f, headonly=True, **kwargs)\n\n\ndef read(f, **kwargs):\n \"\"\"\n Reads header and data from GCF file.\n \"\"\"\n return read_data_block(f, headonly=False, **kwargs)\n", "path": "obspy/io/gcf/libgcf.py"}]}
| 2,783 | 192 |
gh_patches_debug_2086
|
rasdani/github-patches
|
git_diff
|
google__timesketch-90
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing of JSON timelines creates duplicate timelines with same name.
Steps to reproduce
1) command line:
echo '[
{
"datetime": "2012-04-12T17:24:38-08:00",
"timestamp_desc": "Test",
"timestamp": 1334251478000000,
"message": "Test message"
}
]' > test_dupe.json
tsctl json2ts --name test_dupe --file test_dupe.json
tsctl json2ts --name test_dupe --file test_dupe.json
2) Create new sketch
3) Notice duplicate "test_dupe" timelines on list to select from.
4) Add both
5) Explore, using "*" as filter.
6) notice duplicate results.
</issue>
<code>
[start of wsgi.py]
1 #!/usr/bin/env python
2 # Copyright 2015 Google Inc. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """This module is for creating the app for a WSGI server.
16
17 Example with Gunicorn:
18 $ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application
19
20 Example configuration for Apache with mod_wsgi (a2enmod mod_wsgi):
21 <VirtualHost *:443>
22 ServerAdmin root@localhost
23 SSLEngine On
24 SSLCertificateFile /etc/apache2/cert.crt
25 SSLCertificateKeyFile /etc/apache2/cert.key
26 WSGIScriptAlias / /path/to/this/file/wsgi.py
27 </VirtualHost>
28 """
29
30 # If you installed Timesketch in a virtualenv you need to activate it.
31 # This needs to be before any imports in order to import from the virtualenv.
32 #activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'
33 #execfile(activate_virtualenv, dict(__file__=activate_virtualenv))
34
35 from timesketch import create_app
36 from timesketch.models import db_session
37
38 application = create_app()
39
40 # Remove the session after every request or app shutdown.
41 @application.teardown_appcontext
42 def shutdown_session(exception=None):
43 db_session.remove()
44
[end of wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wsgi.py b/wsgi.py
--- a/wsgi.py
+++ b/wsgi.py
@@ -37,7 +37,8 @@
application = create_app()
-# Remove the session after every request or app shutdown.
+# pylint: disable=unused-argument
@application.teardown_appcontext
def shutdown_session(exception=None):
+ """Remove the database session after every request or app shutdown."""
db_session.remove()
|
{"golden_diff": "diff --git a/wsgi.py b/wsgi.py\n--- a/wsgi.py\n+++ b/wsgi.py\n@@ -37,7 +37,8 @@\n \n application = create_app()\n \n-# Remove the session after every request or app shutdown.\n+# pylint: disable=unused-argument\n @application.teardown_appcontext\n def shutdown_session(exception=None):\n+ \"\"\"Remove the database session after every request or app shutdown.\"\"\"\n db_session.remove()\n", "issue": "Importing of JSON timelines creates duplicate timelines with same name.\nSteps to reproduce\n1) command line:\necho '[\n {\n \"datetime\": \"2012-04-12T17:24:38-08:00\",\n \"timestamp_desc\": \"Test\",\n \"timestamp\": 1334251478000000,\n \"message\": \"Test message\"\n }\n]' > test_dupe.json \ntsctl json2ts --name test_dupe --file test_dupe.json\ntsctl json2ts --name test_dupe --file test_dupe.json\n\n2) Create new sketch\n3) Notice duplicate \"test_dupe\" timelines on list to select from.\n4) Add both\n5) Explore, using \"*\" as filter.\n6) notice duplicate results.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is for creating the app for a WSGI server.\n\nExample with Gunicorn:\n$ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application\n\nExample configuration for Apache with mod_wsgi (a2enmod mod_wsgi):\n<VirtualHost *:443>\n ServerAdmin root@localhost\n SSLEngine On\n SSLCertificateFile /etc/apache2/cert.crt\n SSLCertificateKeyFile /etc/apache2/cert.key\n WSGIScriptAlias / /path/to/this/file/wsgi.py\n</VirtualHost>\n\"\"\"\n\n# If you installed Timesketch in a virtualenv you need to activate it.\n# This needs to be before any imports in order to import from the virtualenv.\n#activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'\n#execfile(activate_virtualenv, dict(__file__=activate_virtualenv))\n\nfrom timesketch import create_app\nfrom timesketch.models import db_session\n\napplication = create_app()\n\n# Remove the session after every request or app shutdown.\[email protected]_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n", "path": "wsgi.py"}]}
| 1,194 | 96 |
gh_patches_debug_57409
|
rasdani/github-patches
|
git_diff
|
kornia__kornia-1316
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing kornia causes `logging` to print to stderr?
### Describe the bug
I pip-installed the master version of kornia to access my latest PR and now my training scripts started to print all kinds of debug info. Could it be because importing kornia imports in turn `kornia.x.trainer` which has [this](https://github.com/kornia/kornia/blob/ed4eb7ab77218b021914f77cad426528a59bd780/kornia/x/trainer.py#L18) line? If so, how can I disable `x` when installing via `pip install git+https://github.com/kornia/kornia.git`?
### Reproduction steps
```bash
Import `kornia` in any script which uses `logging`.
```
### Expected behavior
Merely importing `kornia` should not toggle global settings of `logging`.
### Environment
```shell
PyTorch version: 1.9.0
Is debug build: False
CUDA used to build PyTorch: 11.1
ROCM used to build PyTorch: N/A
OS: Ubuntu 20.04.3 LTS (x86_64)
GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
Clang version: Could not collect
CMake version: Could not collect
Libc version: glibc-2.31
Python version: 3.8.10 (default, Jun 4 2021, 15:09:15) [GCC 7.5.0] (64-bit runtime)
Python platform: Linux-5.11.0-1018-gcp-x86_64-with-glibc2.17
Is CUDA available: True
CUDA runtime version: Could not collect
GPU models and configuration:
GPU 0: Tesla V100-SXM2-16GB
GPU 1: Tesla V100-SXM2-16GB
GPU 2: Tesla V100-SXM2-16GB
GPU 3: Tesla V100-SXM2-16GB
GPU 4: Tesla V100-SXM2-16GB
GPU 5: Tesla V100-SXM2-16GB
GPU 6: Tesla V100-SXM2-16GB
GPU 7: Tesla V100-SXM2-16GB
Nvidia driver version: 470.57.02
cuDNN version: Could not collect
HIP runtime version: N/A
MIOpen runtime version: N/A
Versions of relevant libraries:
[pip3] mypy-extensions==0.4.3
[pip3] numpy==1.19.5
[pip3] pytorch-lightning==1.4.7
[pip3] torch==1.9.0
[pip3] torch-dimcheck==0.0.1
[pip3] torchaudio==0.9.0a0+33b2469
[pip3] torchmetrics==0.4.1
[pip3] torchvision==0.10.0
[conda] blas 1.0 mkl
[conda] cudatoolkit 11.1.74 h6bb024c_0 nvidia
[conda] ffmpeg 4.3 hf484d3e_0 pytorch
[conda] mkl 2021.3.0 h06a4308_520
[conda] mkl-service 2.4.0 py38h7f8727e_0
[conda] mkl_fft 1.3.0 py38h42c9631_2
[conda] mkl_random 1.2.2 py38h51133e4_0
[conda] mypy-extensions 0.4.3 pypi_0 pypi
[conda] numpy 1.19.5 pypi_0 pypi
[conda] pytorch 1.9.0 py3.8_cuda11.1_cudnn8.0.5_0 pytorch
[conda] pytorch-lightning 1.4.7 pypi_0 pypi
[conda] torchaudio 0.9.0 py38 pytorch
[conda] torchmetrics 0.4.1 pypi_0 pypi
[conda] torchvision 0.10.0 py38_cu111 pytorch
```
### Additional context
_No response_
</issue>
<code>
[start of kornia/x/trainer.py]
1 import logging
2 from typing import Callable, Dict
3
4 import torch
5 import torch.nn as nn
6 from torch.utils.data import DataLoader
7
8 # the accelerator library is a requirement for the Trainer
9 # but it is optional for grousnd base user of kornia.
10 try:
11 from accelerate import Accelerator
12 except ImportError:
13 Accelerator = None
14
15 from .metrics import AverageMeter
16 from .utils import Configuration, TrainerState
17
18 logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
19
20
21 callbacks_whitelist = [
22 "preprocess", "augmentations", "evaluate", "fit", "checkpoint", "terminate"
23 ]
24
25
26 class Trainer:
27 """Base class to train the different models in kornia.
28
29 .. warning::
30 The API is experimental and subject to be modified based on the needs of kornia models.
31
32 Args:
33 model: the nn.Module to be optimized.
34 train_dataloader: the data loader used in the training loop.
35 valid_dataloader: the data loader used in the validation loop.
36 criterion: the nn.Module with the function that computes the loss.
37 optimizer: the torch optimizer object to be used during the optimization.
38 scheduler: the torch scheduler object with defiing the scheduling strategy.
39 accelerator: the Accelerator object to distribute the training.
40 config: a TrainerConfiguration structure containing the experiment hyper parameters.
41 callbacks: a dictionary containing the pointers to the functions to overrides. The
42 main supported hooks are ``evaluate``, ``preprocess``, ``augmentations`` and ``fit``.
43
44 .. important::
45 The API heavily relies on `accelerate <https://github.com/huggingface/accelerate/>`_.
46 In order to use it, you must: ``pip install kornia[x]``
47
48 .. seealso::
49 Learn how to use the API in our documentation
50 `here <https://kornia.readthedocs.io/en/latest/get-started/training.html>`_.
51 """
52 def __init__(
53 self,
54 model: nn.Module,
55 train_dataloader: DataLoader,
56 valid_dataloader: DataLoader,
57 criterion: nn.Module,
58 optimizer: torch.optim.Optimizer,
59 scheduler: torch.optim.lr_scheduler.CosineAnnealingLR,
60 config: Configuration,
61 callbacks: Dict[str, Callable] = {},
62 ) -> None:
63 # setup the accelerator
64 if Accelerator is None:
65 raise ModuleNotFoundError(
66 "accelerate library is not installed: pip install kornia[x]")
67 self.accelerator = Accelerator()
68
69 # setup the data related objects
70 self.model = self.accelerator.prepare(model)
71 self.train_dataloader = self.accelerator.prepare(train_dataloader)
72 self.valid_dataloader = self.accelerator.prepare(valid_dataloader)
73 self.criterion = criterion.to(self.device)
74 self.optimizer = self.accelerator.prepare(optimizer)
75 self.scheduler = scheduler
76 self.config = config
77
78 # configure callbacks
79 for fn_name, fn in callbacks.items():
80 if fn_name not in callbacks_whitelist:
81 raise ValueError(f"Not supported: {fn_name}.")
82 setattr(self, fn_name, fn)
83
84 # hyper-params
85 self.num_epochs = config.num_epochs
86
87 self._logger = logging.getLogger('train')
88
89 @property
90 def device(self) -> torch.device:
91 return self.accelerator.device
92
93 def backward(self, loss: torch.Tensor) -> None:
94 self.accelerator.backward(loss)
95
96 def fit_epoch(self, epoch: int) -> None:
97 # train loop
98 self.model.train()
99 losses = AverageMeter()
100 for sample_id, sample in enumerate(self.train_dataloader):
101 source, target = sample # this might change with new pytorch dataset structure
102 self.optimizer.zero_grad()
103
104 # perform the preprocess and augmentations in batch
105 img = self.preprocess(source)
106 img = self.augmentations(img)
107 # make the actual inference
108 output = self.model(img)
109 loss = self.criterion(output, target)
110 self.backward(loss)
111 self.optimizer.step()
112
113 losses.update(loss.item(), img.shape[0])
114
115 if sample_id % 50 == 0:
116 self._logger.info(
117 f"Train: {epoch + 1}/{self.num_epochs} "
118 f"Sample: {sample_id + 1}/{len(self.train_dataloader)} "
119 f"Loss: {losses.val:.3f} {losses.avg:.3f}"
120 )
121
122 def fit(self,) -> None:
123 # execute the main loop
124 # NOTE: Do not change and keep this structure clear for readability.
125 for epoch in range(self.num_epochs):
126 # call internally the training loop
127 # NOTE: override to customize your evaluation routine
128 self.fit_epoch(epoch)
129
130 # call internally the evaluation loop
131 # NOTE: override to customize your evaluation routine
132 valid_stats = self.evaluate()
133
134 self.checkpoint(self.model, epoch, valid_stats)
135
136 state = self.terminate(self.model, epoch, valid_stats)
137 if state == TrainerState.TERMINATE:
138 break
139
140 # END OF THE EPOCH
141 self.scheduler.step()
142
143 ...
144
145 def evaluate(self):
146 ...
147
148 def preprocess(self, x):
149 return x
150
151 def augmentations(self, x):
152 return x
153
154 def checkpoint(self, *args, **kwargs):
155 ...
156
157 def terminate(self, *args, **kwargs):
158 ...
159
[end of kornia/x/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kornia/x/trainer.py b/kornia/x/trainer.py
--- a/kornia/x/trainer.py
+++ b/kornia/x/trainer.py
@@ -15,9 +15,6 @@
from .metrics import AverageMeter
from .utils import Configuration, TrainerState
-logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
-
-
callbacks_whitelist = [
"preprocess", "augmentations", "evaluate", "fit", "checkpoint", "terminate"
]
|
{"golden_diff": "diff --git a/kornia/x/trainer.py b/kornia/x/trainer.py\n--- a/kornia/x/trainer.py\n+++ b/kornia/x/trainer.py\n@@ -15,9 +15,6 @@\n from .metrics import AverageMeter\n from .utils import Configuration, TrainerState\n \n-logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n-\n-\n callbacks_whitelist = [\n \"preprocess\", \"augmentations\", \"evaluate\", \"fit\", \"checkpoint\", \"terminate\"\n ]\n", "issue": "Importing kornia causes `logging` to print to stderr?\n### Describe the bug\n\nI pip-installed the master version of kornia to access my latest PR and now my training scripts started to print all kinds of debug info. Could it be because importing kornia imports in turn `kornia.x.trainer` which has [this](https://github.com/kornia/kornia/blob/ed4eb7ab77218b021914f77cad426528a59bd780/kornia/x/trainer.py#L18) line? If so, how can I disable `x` when installing via `pip install git+https://github.com/kornia/kornia.git`?\n\n### Reproduction steps\n\n```bash\nImport `kornia` in any script which uses `logging`.\n```\n\n\n### Expected behavior\n\nMerely importing `kornia` should not toggle global settings of `logging`.\n\n### Environment\n\n```shell\nPyTorch version: 1.9.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.1\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.3 LTS (x86_64)\r\nGCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nClang version: Could not collect\r\nCMake version: Could not collect\r\nLibc version: glibc-2.31\r\n\r\nPython version: 3.8.10 (default, Jun 4 2021, 15:09:15) [GCC 7.5.0] (64-bit runtime)\r\nPython platform: Linux-5.11.0-1018-gcp-x86_64-with-glibc2.17\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nGPU models and configuration: \r\nGPU 0: Tesla V100-SXM2-16GB\r\nGPU 1: Tesla V100-SXM2-16GB\r\nGPU 2: Tesla V100-SXM2-16GB\r\nGPU 3: Tesla V100-SXM2-16GB\r\nGPU 4: Tesla V100-SXM2-16GB\r\nGPU 5: Tesla V100-SXM2-16GB\r\nGPU 6: Tesla V100-SXM2-16GB\r\nGPU 7: Tesla V100-SXM2-16GB\r\n\r\nNvidia driver version: 470.57.02\r\ncuDNN version: Could not collect\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] mypy-extensions==0.4.3\r\n[pip3] numpy==1.19.5\r\n[pip3] pytorch-lightning==1.4.7\r\n[pip3] torch==1.9.0\r\n[pip3] torch-dimcheck==0.0.1\r\n[pip3] torchaudio==0.9.0a0+33b2469\r\n[pip3] torchmetrics==0.4.1\r\n[pip3] torchvision==0.10.0\r\n[conda] blas 1.0 mkl \r\n[conda] cudatoolkit 11.1.74 h6bb024c_0 nvidia\r\n[conda] ffmpeg 4.3 hf484d3e_0 pytorch\r\n[conda] mkl 2021.3.0 h06a4308_520 \r\n[conda] mkl-service 2.4.0 py38h7f8727e_0 \r\n[conda] mkl_fft 1.3.0 py38h42c9631_2 \r\n[conda] mkl_random 1.2.2 py38h51133e4_0 \r\n[conda] mypy-extensions 0.4.3 pypi_0 pypi\r\n[conda] numpy 1.19.5 pypi_0 pypi\r\n[conda] pytorch 1.9.0 py3.8_cuda11.1_cudnn8.0.5_0 pytorch\r\n[conda] pytorch-lightning 1.4.7 pypi_0 pypi\r\n[conda] torchaudio 0.9.0 py38 pytorch\r\n[conda] torchmetrics 0.4.1 pypi_0 pypi\r\n[conda] torchvision 0.10.0 py38_cu111 pytorch\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import logging\nfrom typing import Callable, Dict\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\n# the accelerator library is a requirement for the Trainer\n# but it is optional for grousnd base user of kornia.\ntry:\n from accelerate import Accelerator\nexcept ImportError:\n Accelerator = None\n\nfrom .metrics import AverageMeter\nfrom .utils import Configuration, TrainerState\n\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)\n\n\ncallbacks_whitelist = [\n \"preprocess\", \"augmentations\", \"evaluate\", \"fit\", \"checkpoint\", \"terminate\"\n]\n\n\nclass Trainer:\n \"\"\"Base class to train the different models in kornia.\n\n .. warning::\n The API is experimental and subject to be modified based on the needs of kornia models.\n\n Args:\n model: the nn.Module to be optimized.\n train_dataloader: the data loader used in the training loop.\n valid_dataloader: the data loader used in the validation loop.\n criterion: the nn.Module with the function that computes the loss.\n optimizer: the torch optimizer object to be used during the optimization.\n scheduler: the torch scheduler object with defiing the scheduling strategy.\n accelerator: the Accelerator object to distribute the training.\n config: a TrainerConfiguration structure containing the experiment hyper parameters.\n callbacks: a dictionary containing the pointers to the functions to overrides. The\n main supported hooks are ``evaluate``, ``preprocess``, ``augmentations`` and ``fit``.\n\n .. important::\n The API heavily relies on `accelerate <https://github.com/huggingface/accelerate/>`_.\n In order to use it, you must: ``pip install kornia[x]``\n\n .. seealso::\n Learn how to use the API in our documentation\n `here <https://kornia.readthedocs.io/en/latest/get-started/training.html>`_.\n \"\"\"\n def __init__(\n self,\n model: nn.Module,\n train_dataloader: DataLoader,\n valid_dataloader: DataLoader,\n criterion: nn.Module,\n optimizer: torch.optim.Optimizer,\n scheduler: torch.optim.lr_scheduler.CosineAnnealingLR,\n config: Configuration,\n callbacks: Dict[str, Callable] = {},\n ) -> None:\n # setup the accelerator\n if Accelerator is None:\n raise ModuleNotFoundError(\n \"accelerate library is not installed: pip install kornia[x]\")\n self.accelerator = Accelerator()\n\n # setup the data related objects\n self.model = self.accelerator.prepare(model)\n self.train_dataloader = self.accelerator.prepare(train_dataloader)\n self.valid_dataloader = self.accelerator.prepare(valid_dataloader)\n self.criterion = criterion.to(self.device)\n self.optimizer = self.accelerator.prepare(optimizer)\n self.scheduler = scheduler\n self.config = config\n\n # configure callbacks\n for fn_name, fn in callbacks.items():\n if fn_name not in callbacks_whitelist:\n raise ValueError(f\"Not supported: {fn_name}.\")\n setattr(self, fn_name, fn)\n\n # hyper-params\n self.num_epochs = config.num_epochs\n\n self._logger = logging.getLogger('train')\n\n @property\n def device(self) -> torch.device:\n return self.accelerator.device\n\n def backward(self, loss: torch.Tensor) -> None:\n self.accelerator.backward(loss)\n\n def fit_epoch(self, epoch: int) -> None:\n # train loop\n self.model.train()\n losses = AverageMeter()\n for sample_id, sample in enumerate(self.train_dataloader):\n source, target = sample # this might change with new pytorch dataset structure\n self.optimizer.zero_grad()\n\n # perform the preprocess and augmentations in batch\n img = self.preprocess(source)\n img = self.augmentations(img)\n # make the actual inference\n output = self.model(img)\n loss = self.criterion(output, target)\n self.backward(loss)\n self.optimizer.step()\n\n losses.update(loss.item(), img.shape[0])\n\n if sample_id % 50 == 0:\n self._logger.info(\n f\"Train: {epoch + 1}/{self.num_epochs} \"\n f\"Sample: {sample_id + 1}/{len(self.train_dataloader)} \"\n f\"Loss: {losses.val:.3f} {losses.avg:.3f}\"\n )\n\n def fit(self,) -> None:\n # execute the main loop\n # NOTE: Do not change and keep this structure clear for readability.\n for epoch in range(self.num_epochs):\n # call internally the training loop\n # NOTE: override to customize your evaluation routine\n self.fit_epoch(epoch)\n\n # call internally the evaluation loop\n # NOTE: override to customize your evaluation routine\n valid_stats = self.evaluate()\n\n self.checkpoint(self.model, epoch, valid_stats)\n\n state = self.terminate(self.model, epoch, valid_stats)\n if state == TrainerState.TERMINATE:\n break\n\n # END OF THE EPOCH\n self.scheduler.step()\n\n ...\n\n def evaluate(self):\n ...\n\n def preprocess(self, x):\n return x\n\n def augmentations(self, x):\n return x\n\n def checkpoint(self, *args, **kwargs):\n ...\n\n def terminate(self, *args, **kwargs):\n ...\n", "path": "kornia/x/trainer.py"}]}
| 3,170 | 115 |
gh_patches_debug_39744
|
rasdani/github-patches
|
git_diff
|
Pycord-Development__pycord-1250
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BridgeOption not raising BadArgument when conversion fails
### Summary
BridgeOption does not raise the correct error when conversion fails for a built-in type.
### Reproduction Steps
Create a bridge command with an Option parameter of type `int` (other built-ins also work here).
Then use the prefixed command with an invalid value.
This will raise ValueError (in the case of int), when BadArgument should be raised instead.
### Minimal Reproducible Code
```python
@bot.bridge_command()
async def test(ctx, value: Option(int, name='value')):
await ctx.respond(str(value))
```
### Expected Results
BadArgument to be raised when using `-test a`
### Actual Results
ValueError is raised
```
Traceback (most recent call last):
File "site-packages\discord\ext\commands\converter.py", line 1071, in _actual_conversion
return await converter.convert(ctx, argument)
File "site-packages\discord\ext\bridge\core.py", line 161, in convert
converted = converter(argument)
ValueError: invalid literal for int() with base 10: 'a'
```
### Intents
default + message_content
### System Information
- Python v3.10.1-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b7
- aiohttp v3.8.1
- system info: Windows 10 10.0.19042
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
_No response_
</issue>
<code>
[start of discord/ext/bridge/core.py]
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25 from typing import Union, Any
26
27 import discord.commands.options
28 from discord.commands import Option, SlashCommand
29 from discord.enums import SlashCommandOptionType
30
31 from ..commands import AutoShardedBot as ExtAutoShardedBot
32 from ..commands import BadArgument
33 from ..commands import Bot as ExtBot
34 from ..commands import (
35 Command,
36 Converter,
37 GuildChannelConverter,
38 RoleConverter,
39 UserConverter,
40 )
41
42 __all__ = ("BridgeCommand", "bridge_command", "BridgeExtCommand", "BridgeSlashCommand")
43
44 from ...utils import get
45
46
47 class BridgeSlashCommand(SlashCommand):
48 """
49 A subclass of :class:`.SlashCommand` that is used to implement bridge commands.
50 """
51 ...
52
53
54 class BridgeExtCommand(Command):
55 """
56 A subclass of :class:`.ext.commands.Command` that is used to implement bridge commands.
57 """
58 ...
59
60
61 class BridgeCommand:
62 def __init__(self, callback, **kwargs):
63 """
64 This is the base class for commands that are compatible with both traditional (prefix-based) commands and slash
65 commands.
66
67 Parameters
68 ----------
69 callback: Callable[[BridgeContext, ...], Awaitable[Any]]
70 The callback to invoke when the command is executed. The first argument will be a :class:`BridgeContext`,
71 and any additional arguments will be passed to the callback. This callback must be a coroutine.
72 kwargs: Optional[Dict[str, Any]]
73 Keyword arguments that are directly passed to the respective command constructors.
74 """
75 self.callback = callback
76 self.kwargs = kwargs
77
78 def get_ext_command(self):
79 """A method to get the ext.commands version of this command.
80
81 Returns
82 -------
83 :class:`BridgeExtCommand`
84 The respective traditional (prefix-based) version of the command.
85 """
86 command = BridgeExtCommand(self.callback, **self.kwargs)
87 return command
88
89 def get_application_command(self):
90 """A method to get the discord.commands version of this command.
91
92 Returns
93 -------
94 :class:`BridgeSlashCommand`
95 The respective slash command version of the command.
96 """
97 command = BridgeSlashCommand(self.callback, **self.kwargs)
98 return command
99
100 def add_to(self, bot: Union[ExtBot, ExtAutoShardedBot]) -> None:
101 """Adds the command to a bot.
102
103 Parameters
104 ----------
105 bot: Union[:class:`ExtBot`, :class:`ExtAutoShardedBot`]
106 The bot to add the command to.
107 """
108 bot.add_command(self.get_ext_command())
109 bot.add_application_command(self.get_application_command())
110
111
112 def bridge_command(**kwargs):
113 """A decorator that is used to wrap a function as a command.
114
115 Parameters
116 ----------
117 kwargs: Optional[Dict[str, Any]]
118 Keyword arguments that are directly passed to the respective command constructors.
119 """
120
121 def decorator(callback):
122 return BridgeCommand(callback, **kwargs)
123
124 return decorator
125
126
127 class MentionableConverter(Converter):
128 """A converter that can convert a mention to a user or a role."""
129
130 async def convert(self, ctx, argument):
131 try:
132 return await RoleConverter().convert(ctx, argument)
133 except BadArgument:
134 return await UserConverter().convert(ctx, argument)
135
136
137 def attachment_callback(*args): # pylint: disable=unused-argument
138 raise ValueError("Attachments are not supported for compatibility commands.")
139
140
141 class BridgeOption(Option, Converter):
142 async def convert(self, ctx, argument) -> Any:
143 if self.converter is not None:
144 converted = await self.converter.convert(ctx, argument)
145 else:
146 mapping = {
147 SlashCommandOptionType.string: str,
148 SlashCommandOptionType.integer: int,
149 SlashCommandOptionType.boolean: bool,
150 SlashCommandOptionType.user: UserConverter,
151 SlashCommandOptionType.channel: GuildChannelConverter,
152 SlashCommandOptionType.role: RoleConverter,
153 SlashCommandOptionType.mentionable: MentionableConverter,
154 SlashCommandOptionType.number: float,
155 SlashCommandOptionType.attachment: attachment_callback,
156 }
157 converter = mapping[self.input_type]
158 if issubclass(converter, Converter):
159 converted = await converter().convert(ctx, argument)
160 else:
161 converted = converter(argument)
162 if self.choices:
163 choices_names = [choice.name for choice in self.choices]
164 if converted in choices_names:
165 converted = get(self.choices, name=converted).value
166 else:
167 choices = [choice.value for choice in self.choices]
168 if converted not in choices:
169 print(self.choices)
170 raise ValueError(
171 f"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}"
172 )
173
174 return converted
175
176
177 discord.commands.options.Option = BridgeOption
178
[end of discord/ext/bridge/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/discord/ext/bridge/core.py b/discord/ext/bridge/core.py
--- a/discord/ext/bridge/core.py
+++ b/discord/ext/bridge/core.py
@@ -41,6 +41,8 @@
__all__ = ("BridgeCommand", "bridge_command", "BridgeExtCommand", "BridgeSlashCommand")
+from ..commands.converter import _convert_to_bool
+
from ...utils import get
@@ -140,38 +142,40 @@
class BridgeOption(Option, Converter):
async def convert(self, ctx, argument) -> Any:
- if self.converter is not None:
- converted = await self.converter.convert(ctx, argument)
- else:
- mapping = {
- SlashCommandOptionType.string: str,
- SlashCommandOptionType.integer: int,
- SlashCommandOptionType.boolean: bool,
- SlashCommandOptionType.user: UserConverter,
- SlashCommandOptionType.channel: GuildChannelConverter,
- SlashCommandOptionType.role: RoleConverter,
- SlashCommandOptionType.mentionable: MentionableConverter,
- SlashCommandOptionType.number: float,
- SlashCommandOptionType.attachment: attachment_callback,
- }
- converter = mapping[self.input_type]
- if issubclass(converter, Converter):
- converted = await converter().convert(ctx, argument)
- else:
- converted = converter(argument)
- if self.choices:
- choices_names = [choice.name for choice in self.choices]
- if converted in choices_names:
- converted = get(self.choices, name=converted).value
+ try:
+ if self.converter is not None:
+ converted = await self.converter.convert(ctx, argument)
else:
- choices = [choice.value for choice in self.choices]
- if converted not in choices:
- print(self.choices)
- raise ValueError(
- f"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}"
- )
-
- return converted
+ mapping = {
+ SlashCommandOptionType.string: str,
+ SlashCommandOptionType.integer: int,
+ SlashCommandOptionType.boolean: lambda val: _convert_to_bool(str(val)),
+ SlashCommandOptionType.user: UserConverter,
+ SlashCommandOptionType.channel: GuildChannelConverter,
+ SlashCommandOptionType.role: RoleConverter,
+ SlashCommandOptionType.mentionable: MentionableConverter,
+ SlashCommandOptionType.number: float,
+ SlashCommandOptionType.attachment: attachment_callback,
+ }
+ converter = mapping[self.input_type]
+ if issubclass(converter, Converter):
+ converted = await converter().convert(ctx, argument)
+ else:
+ converted = converter(argument)
+ if self.choices:
+ choices_names = [choice.name for choice in self.choices]
+ if converted in choices_names:
+ converted = get(self.choices, name=converted).value
+ else:
+ choices = [choice.value for choice in self.choices]
+ if converted not in choices:
+ raise ValueError(
+ f"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}"
+ )
+
+ return converted
+ except ValueError as exc:
+ raise BadArgument() from exc
discord.commands.options.Option = BridgeOption
|
{"golden_diff": "diff --git a/discord/ext/bridge/core.py b/discord/ext/bridge/core.py\n--- a/discord/ext/bridge/core.py\n+++ b/discord/ext/bridge/core.py\n@@ -41,6 +41,8 @@\n \n __all__ = (\"BridgeCommand\", \"bridge_command\", \"BridgeExtCommand\", \"BridgeSlashCommand\")\n \n+from ..commands.converter import _convert_to_bool\n+\n from ...utils import get\n \n \n@@ -140,38 +142,40 @@\n \n class BridgeOption(Option, Converter):\n async def convert(self, ctx, argument) -> Any:\n- if self.converter is not None:\n- converted = await self.converter.convert(ctx, argument)\n- else:\n- mapping = {\n- SlashCommandOptionType.string: str,\n- SlashCommandOptionType.integer: int,\n- SlashCommandOptionType.boolean: bool,\n- SlashCommandOptionType.user: UserConverter,\n- SlashCommandOptionType.channel: GuildChannelConverter,\n- SlashCommandOptionType.role: RoleConverter,\n- SlashCommandOptionType.mentionable: MentionableConverter,\n- SlashCommandOptionType.number: float,\n- SlashCommandOptionType.attachment: attachment_callback,\n- }\n- converter = mapping[self.input_type]\n- if issubclass(converter, Converter):\n- converted = await converter().convert(ctx, argument)\n- else:\n- converted = converter(argument)\n- if self.choices:\n- choices_names = [choice.name for choice in self.choices]\n- if converted in choices_names:\n- converted = get(self.choices, name=converted).value\n+ try:\n+ if self.converter is not None:\n+ converted = await self.converter.convert(ctx, argument)\n else:\n- choices = [choice.value for choice in self.choices]\n- if converted not in choices:\n- print(self.choices)\n- raise ValueError(\n- f\"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}\"\n- )\n-\n- return converted\n+ mapping = {\n+ SlashCommandOptionType.string: str,\n+ SlashCommandOptionType.integer: int,\n+ SlashCommandOptionType.boolean: lambda val: _convert_to_bool(str(val)),\n+ SlashCommandOptionType.user: UserConverter,\n+ SlashCommandOptionType.channel: GuildChannelConverter,\n+ SlashCommandOptionType.role: RoleConverter,\n+ SlashCommandOptionType.mentionable: MentionableConverter,\n+ SlashCommandOptionType.number: float,\n+ SlashCommandOptionType.attachment: attachment_callback,\n+ }\n+ converter = mapping[self.input_type]\n+ if issubclass(converter, Converter):\n+ converted = await converter().convert(ctx, argument)\n+ else:\n+ converted = converter(argument)\n+ if self.choices:\n+ choices_names = [choice.name for choice in self.choices]\n+ if converted in choices_names:\n+ converted = get(self.choices, name=converted).value\n+ else:\n+ choices = [choice.value for choice in self.choices]\n+ if converted not in choices:\n+ raise ValueError(\n+ f\"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}\"\n+ )\n+\n+ return converted\n+ except ValueError as exc:\n+ raise BadArgument() from exc\n \n \n discord.commands.options.Option = BridgeOption\n", "issue": "BridgeOption not raising BadArgument when conversion fails \n### Summary\n\nBridgeOption does not raise the correct error when conversion fails for a built-in type.\n\n### Reproduction Steps\n\nCreate a bridge command with an Option parameter of type `int` (other built-ins also work here).\r\nThen use the prefixed command with an invalid value.\r\nThis will raise ValueError (in the case of int), when BadArgument should be raised instead.\n\n### Minimal Reproducible Code\n\n```python\[email protected]_command()\r\nasync def test(ctx, value: Option(int, name='value')):\r\n await ctx.respond(str(value))\n```\n\n\n### Expected Results\n\nBadArgument to be raised when using `-test a`\n\n### Actual Results\n\nValueError is raised\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"site-packages\\discord\\ext\\commands\\converter.py\", line 1071, in _actual_conversion\r\n return await converter.convert(ctx, argument)\r\n File \"site-packages\\discord\\ext\\bridge\\core.py\", line 161, in convert\r\n converted = converter(argument)\r\nValueError: invalid literal for int() with base 10: 'a'\r\n```\n\n### Intents\n\ndefault + message_content\n\n### System Information\n\n- Python v3.10.1-final\r\n- py-cord v2.0.0-beta\r\n - py-cord pkg_resources: v2.0.0b7\r\n- aiohttp v3.8.1\r\n- system info: Windows 10 10.0.19042\n\n### Checklist\n\n- [X] I have searched the open issues for duplicates.\n- [X] I have shown the entire traceback, if possible.\n- [X] I have removed my token from display, if visible.\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom typing import Union, Any\n\nimport discord.commands.options\nfrom discord.commands import Option, SlashCommand\nfrom discord.enums import SlashCommandOptionType\n\nfrom ..commands import AutoShardedBot as ExtAutoShardedBot\nfrom ..commands import BadArgument\nfrom ..commands import Bot as ExtBot\nfrom ..commands import (\n Command,\n Converter,\n GuildChannelConverter,\n RoleConverter,\n UserConverter,\n)\n\n__all__ = (\"BridgeCommand\", \"bridge_command\", \"BridgeExtCommand\", \"BridgeSlashCommand\")\n\nfrom ...utils import get\n\n\nclass BridgeSlashCommand(SlashCommand):\n \"\"\"\n A subclass of :class:`.SlashCommand` that is used to implement bridge commands.\n \"\"\"\n ...\n\n\nclass BridgeExtCommand(Command):\n \"\"\"\n A subclass of :class:`.ext.commands.Command` that is used to implement bridge commands.\n \"\"\"\n ...\n\n\nclass BridgeCommand:\n def __init__(self, callback, **kwargs):\n \"\"\"\n This is the base class for commands that are compatible with both traditional (prefix-based) commands and slash\n commands.\n\n Parameters\n ----------\n callback: Callable[[BridgeContext, ...], Awaitable[Any]]\n The callback to invoke when the command is executed. The first argument will be a :class:`BridgeContext`,\n and any additional arguments will be passed to the callback. This callback must be a coroutine.\n kwargs: Optional[Dict[str, Any]]\n Keyword arguments that are directly passed to the respective command constructors.\n \"\"\"\n self.callback = callback\n self.kwargs = kwargs\n\n def get_ext_command(self):\n \"\"\"A method to get the ext.commands version of this command.\n\n Returns\n -------\n :class:`BridgeExtCommand`\n The respective traditional (prefix-based) version of the command.\n \"\"\"\n command = BridgeExtCommand(self.callback, **self.kwargs)\n return command\n\n def get_application_command(self):\n \"\"\"A method to get the discord.commands version of this command.\n\n Returns\n -------\n :class:`BridgeSlashCommand`\n The respective slash command version of the command.\n \"\"\"\n command = BridgeSlashCommand(self.callback, **self.kwargs)\n return command\n\n def add_to(self, bot: Union[ExtBot, ExtAutoShardedBot]) -> None:\n \"\"\"Adds the command to a bot.\n\n Parameters\n ----------\n bot: Union[:class:`ExtBot`, :class:`ExtAutoShardedBot`]\n The bot to add the command to.\n \"\"\"\n bot.add_command(self.get_ext_command())\n bot.add_application_command(self.get_application_command())\n\n\ndef bridge_command(**kwargs):\n \"\"\"A decorator that is used to wrap a function as a command.\n\n Parameters\n ----------\n kwargs: Optional[Dict[str, Any]]\n Keyword arguments that are directly passed to the respective command constructors.\n \"\"\"\n\n def decorator(callback):\n return BridgeCommand(callback, **kwargs)\n\n return decorator\n\n\nclass MentionableConverter(Converter):\n \"\"\"A converter that can convert a mention to a user or a role.\"\"\"\n\n async def convert(self, ctx, argument):\n try:\n return await RoleConverter().convert(ctx, argument)\n except BadArgument:\n return await UserConverter().convert(ctx, argument)\n\n\ndef attachment_callback(*args): # pylint: disable=unused-argument\n raise ValueError(\"Attachments are not supported for compatibility commands.\")\n\n\nclass BridgeOption(Option, Converter):\n async def convert(self, ctx, argument) -> Any:\n if self.converter is not None:\n converted = await self.converter.convert(ctx, argument)\n else:\n mapping = {\n SlashCommandOptionType.string: str,\n SlashCommandOptionType.integer: int,\n SlashCommandOptionType.boolean: bool,\n SlashCommandOptionType.user: UserConverter,\n SlashCommandOptionType.channel: GuildChannelConverter,\n SlashCommandOptionType.role: RoleConverter,\n SlashCommandOptionType.mentionable: MentionableConverter,\n SlashCommandOptionType.number: float,\n SlashCommandOptionType.attachment: attachment_callback,\n }\n converter = mapping[self.input_type]\n if issubclass(converter, Converter):\n converted = await converter().convert(ctx, argument)\n else:\n converted = converter(argument)\n if self.choices:\n choices_names = [choice.name for choice in self.choices]\n if converted in choices_names:\n converted = get(self.choices, name=converted).value\n else:\n choices = [choice.value for choice in self.choices]\n if converted not in choices:\n print(self.choices)\n raise ValueError(\n f\"{argument} is not a valid choice. Valid choices: {list(set(choices_names + choices))}\"\n )\n\n return converted\n\n\ndiscord.commands.options.Option = BridgeOption\n", "path": "discord/ext/bridge/core.py"}]}
| 2,596 | 729 |
gh_patches_debug_35392
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-3947
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The PDF option for `--post` should work with lowercase
Right now to get a PDF out of nbconvert you have to to
```
ipython nbconvert --to latex --post PDF foo.ipynb
```
Many users will try `pdf` instead and the error message is very confusing. We should just make it work with lowercase.
</issue>
<code>
[start of IPython/nbconvert/nbconvertapp.py]
1 #!/usr/bin/env python
2 """NBConvert is a utility for conversion of .ipynb files.
3
4 Command-line interface for the NbConvert conversion utility.
5 """
6 #-----------------------------------------------------------------------------
7 #Copyright (c) 2013, the IPython Development Team.
8 #
9 #Distributed under the terms of the Modified BSD License.
10 #
11 #The full license is in the file COPYING.txt, distributed with this software.
12 #-----------------------------------------------------------------------------
13
14 #-----------------------------------------------------------------------------
15 #Imports
16 #-----------------------------------------------------------------------------
17
18 # Stdlib imports
19 from __future__ import print_function
20
21 import logging
22 import sys
23 import os
24 import glob
25
26 # From IPython
27 from IPython.core.application import BaseIPythonApplication, base_aliases, base_flags
28 from IPython.config import catch_config_error, Configurable
29 from IPython.utils.traitlets import (
30 Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,
31 )
32 from IPython.utils.importstring import import_item
33 from IPython.utils.text import dedent
34
35 from .exporters.export import get_export_names, exporter_map
36 from IPython.nbconvert import exporters, transformers, writers, post_processors
37 from .utils.base import NbConvertBase
38 from .utils.exceptions import ConversionException
39
40 #-----------------------------------------------------------------------------
41 #Classes and functions
42 #-----------------------------------------------------------------------------
43
44 class DottedOrNone(DottedObjectName):
45 """
46 A string holding a valid dotted object name in Python, such as A.b3._c
47 Also allows for None type."""
48
49 default_value = u''
50
51 def validate(self, obj, value):
52 if value is not None and len(value) > 0:
53 return super(DottedOrNone, self).validate(obj, value)
54 else:
55 return value
56
57 nbconvert_aliases = {}
58 nbconvert_aliases.update(base_aliases)
59 nbconvert_aliases.update({
60 'to' : 'NbConvertApp.export_format',
61 'template' : 'Exporter.template_file',
62 'notebooks' : 'NbConvertApp.notebooks',
63 'writer' : 'NbConvertApp.writer_class',
64 'post': 'NbConvertApp.post_processor_class',
65 'output': 'NbConvertApp.output_base'
66 })
67
68 nbconvert_flags = {}
69 nbconvert_flags.update(base_flags)
70 nbconvert_flags.update({
71 'stdout' : (
72 {'NbConvertApp' : {'writer_class' : "StdoutWriter"}},
73 "Write notebook output to stdout instead of files."
74 )
75 })
76
77
78 class NbConvertApp(BaseIPythonApplication):
79 """Application used to convert to and from notebook file type (*.ipynb)"""
80
81 name = 'ipython-nbconvert'
82 aliases = nbconvert_aliases
83 flags = nbconvert_flags
84
85 def _log_level_default(self):
86 return logging.INFO
87
88 def _classes_default(self):
89 classes = [NbConvertBase]
90 for pkg in (exporters, transformers, writers):
91 for name in dir(pkg):
92 cls = getattr(pkg, name)
93 if isinstance(cls, type) and issubclass(cls, Configurable):
94 classes.append(cls)
95 return classes
96
97 description = Unicode(
98 u"""This application is used to convert notebook files (*.ipynb)
99 to various other formats.
100
101 WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.""")
102
103 output_base = Unicode('', config=True, help='''overwrite base name use for output files.
104 can only be use when converting one notebook at a time.
105 ''')
106
107 examples = Unicode(u"""
108 The simplest way to use nbconvert is
109
110 > ipython nbconvert mynotebook.ipynb
111
112 which will convert mynotebook.ipynb to the default format (probably HTML).
113
114 You can specify the export format with `--to`.
115 Options include {0}
116
117 > ipython nbconvert --to latex mynotebook.ipnynb
118
119 Both HTML and LaTeX support multiple output templates. LaTeX includes
120 'basic', 'book', and 'article'. HTML includes 'basic' and 'full'. You
121 can specify the flavor of the format used.
122
123 > ipython nbconvert --to html --template basic mynotebook.ipynb
124
125 You can also pipe the output to stdout, rather than a file
126
127 > ipython nbconvert mynotebook.ipynb --stdout
128
129 A post-processor can be used to compile a PDF
130
131 > ipython nbconvert mynotebook.ipynb --to latex --post PDF
132
133 You can get (and serve) a Reveal.js-powered slideshow
134
135 > ipython nbconvert myslides.ipynb --to slides --post serve
136
137 Multiple notebooks can be given at the command line in a couple of
138 different ways:
139
140 > ipython nbconvert notebook*.ipynb
141 > ipython nbconvert notebook1.ipynb notebook2.ipynb
142
143 or you can specify the notebooks list in a config file, containing::
144
145 c.NbConvertApp.notebooks = ["my_notebook.ipynb"]
146
147 > ipython nbconvert --config mycfg.py
148 """.format(get_export_names()))
149
150 # Writer specific variables
151 writer = Instance('IPython.nbconvert.writers.base.WriterBase',
152 help="""Instance of the writer class used to write the
153 results of the conversion.""")
154 writer_class = DottedObjectName('FilesWriter', config=True,
155 help="""Writer class used to write the
156 results of the conversion""")
157 writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',
158 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',
159 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}
160 writer_factory = Type()
161
162 def _writer_class_changed(self, name, old, new):
163 if new in self.writer_aliases:
164 new = self.writer_aliases[new]
165 self.writer_factory = import_item(new)
166
167 # Post-processor specific variables
168 post_processor = Instance('IPython.nbconvert.post_processors.base.PostProcessorBase',
169 help="""Instance of the PostProcessor class used to write the
170 results of the conversion.""")
171
172 post_processor_class = DottedOrNone(config=True,
173 help="""PostProcessor class used to write the
174 results of the conversion""")
175 post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',
176 'serve': 'IPython.nbconvert.post_processors.serve.ServePostProcessor'}
177 post_processor_factory = Type()
178
179 def _post_processor_class_changed(self, name, old, new):
180 if new in self.post_processor_aliases:
181 new = self.post_processor_aliases[new]
182 if new:
183 self.post_processor_factory = import_item(new)
184
185
186 # Other configurable variables
187 export_format = CaselessStrEnum(get_export_names(),
188 default_value="html",
189 config=True,
190 help="""The export format to be used."""
191 )
192
193 notebooks = List([], config=True, help="""List of notebooks to convert.
194 Wildcards are supported.
195 Filenames passed positionally will be added to the list.
196 """)
197
198 @catch_config_error
199 def initialize(self, argv=None):
200 super(NbConvertApp, self).initialize(argv)
201 self.init_syspath()
202 self.init_notebooks()
203 self.init_writer()
204 self.init_post_processor()
205
206
207
208 def init_syspath(self):
209 """
210 Add the cwd to the sys.path ($PYTHONPATH)
211 """
212 sys.path.insert(0, os.getcwd())
213
214
215 def init_notebooks(self):
216 """Construct the list of notebooks.
217 If notebooks are passed on the command-line,
218 they override notebooks specified in config files.
219 Glob each notebook to replace notebook patterns with filenames.
220 """
221
222 # Specifying notebooks on the command-line overrides (rather than adds)
223 # the notebook list
224 if self.extra_args:
225 patterns = self.extra_args
226 else:
227 patterns = self.notebooks
228
229 # Use glob to replace all the notebook patterns with filenames.
230 filenames = []
231 for pattern in patterns:
232
233 # Use glob to find matching filenames. Allow the user to convert
234 # notebooks without having to type the extension.
235 globbed_files = glob.glob(pattern)
236 globbed_files.extend(glob.glob(pattern + '.ipynb'))
237 if not globbed_files:
238 self.log.warn("pattern %r matched no files", pattern)
239
240 for filename in globbed_files:
241 if not filename in filenames:
242 filenames.append(filename)
243 self.notebooks = filenames
244
245 def init_writer(self):
246 """
247 Initialize the writer (which is stateless)
248 """
249 self._writer_class_changed(None, self.writer_class, self.writer_class)
250 self.writer = self.writer_factory(parent=self)
251
252 def init_post_processor(self):
253 """
254 Initialize the post_processor (which is stateless)
255 """
256 self._post_processor_class_changed(None, self.post_processor_class,
257 self.post_processor_class)
258 if self.post_processor_factory:
259 self.post_processor = self.post_processor_factory(parent=self)
260
261 def start(self):
262 """
263 Ran after initialization completed
264 """
265 super(NbConvertApp, self).start()
266 self.convert_notebooks()
267
268 def convert_notebooks(self):
269 """
270 Convert the notebooks in the self.notebook traitlet
271 """
272 # Export each notebook
273 conversion_success = 0
274
275 if self.output_base != '' and len(self.notebooks) > 1:
276 self.log.error(
277 """UsageError: --output flag or `NbConvertApp.output_base` config option
278 cannot be used when converting multiple notebooks.
279 """)
280 self.exit(1)
281
282 exporter = exporter_map[self.export_format](config=self.config)
283
284 for notebook_filename in self.notebooks:
285 self.log.info("Converting notebook %s to %s", notebook_filename, self.export_format)
286
287 # Get a unique key for the notebook and set it in the resources object.
288 basename = os.path.basename(notebook_filename)
289 notebook_name = basename[:basename.rfind('.')]
290 if self.output_base:
291 notebook_name = self.output_base
292 resources = {}
293 resources['unique_key'] = notebook_name
294 resources['output_files_dir'] = '%s_files' % notebook_name
295 self.log.info("Support files will be in %s", os.path.join(resources['output_files_dir'], ''))
296
297 # Try to export
298 try:
299 output, resources = exporter.from_filename(notebook_filename, resources=resources)
300 except ConversionException as e:
301 self.log.error("Error while converting '%s'", notebook_filename,
302 exc_info=True)
303 self.exit(1)
304 else:
305 write_resultes = self.writer.write(output, resources, notebook_name=notebook_name)
306
307 #Post-process if post processor has been defined.
308 if hasattr(self, 'post_processor') and self.post_processor:
309 self.post_processor(write_resultes)
310 conversion_success += 1
311
312 # If nothing was converted successfully, help the user.
313 if conversion_success == 0:
314 self.print_help()
315 sys.exit(-1)
316
317 #-----------------------------------------------------------------------------
318 # Main entry point
319 #-----------------------------------------------------------------------------
320
321 launch_new_instance = NbConvertApp.launch_instance
322
[end of IPython/nbconvert/nbconvertapp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/nbconvert/nbconvertapp.py b/IPython/nbconvert/nbconvertapp.py
--- a/IPython/nbconvert/nbconvertapp.py
+++ b/IPython/nbconvert/nbconvertapp.py
@@ -154,14 +154,14 @@
writer_class = DottedObjectName('FilesWriter', config=True,
help="""Writer class used to write the
results of the conversion""")
- writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',
- 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',
- 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}
+ writer_aliases = {'fileswriter': 'IPython.nbconvert.writers.files.FilesWriter',
+ 'debugwriter': 'IPython.nbconvert.writers.debug.DebugWriter',
+ 'stdoutwriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}
writer_factory = Type()
def _writer_class_changed(self, name, old, new):
- if new in self.writer_aliases:
- new = self.writer_aliases[new]
+ if new.lower() in self.writer_aliases:
+ new = self.writer_aliases[new.lower()]
self.writer_factory = import_item(new)
# Post-processor specific variables
@@ -172,13 +172,13 @@
post_processor_class = DottedOrNone(config=True,
help="""PostProcessor class used to write the
results of the conversion""")
- post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',
+ post_processor_aliases = {'pdf': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',
'serve': 'IPython.nbconvert.post_processors.serve.ServePostProcessor'}
post_processor_factory = Type()
def _post_processor_class_changed(self, name, old, new):
- if new in self.post_processor_aliases:
- new = self.post_processor_aliases[new]
+ if new.lower() in self.post_processor_aliases:
+ new = self.post_processor_aliases[new.lower()]
if new:
self.post_processor_factory = import_item(new)
|
{"golden_diff": "diff --git a/IPython/nbconvert/nbconvertapp.py b/IPython/nbconvert/nbconvertapp.py\n--- a/IPython/nbconvert/nbconvertapp.py\n+++ b/IPython/nbconvert/nbconvertapp.py\n@@ -154,14 +154,14 @@\n writer_class = DottedObjectName('FilesWriter', config=True, \n help=\"\"\"Writer class used to write the \n results of the conversion\"\"\")\n- writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',\n- 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',\n- 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}\n+ writer_aliases = {'fileswriter': 'IPython.nbconvert.writers.files.FilesWriter',\n+ 'debugwriter': 'IPython.nbconvert.writers.debug.DebugWriter',\n+ 'stdoutwriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}\n writer_factory = Type()\n \n def _writer_class_changed(self, name, old, new):\n- if new in self.writer_aliases:\n- new = self.writer_aliases[new]\n+ if new.lower() in self.writer_aliases:\n+ new = self.writer_aliases[new.lower()]\n self.writer_factory = import_item(new)\n \n # Post-processor specific variables\n@@ -172,13 +172,13 @@\n post_processor_class = DottedOrNone(config=True, \n help=\"\"\"PostProcessor class used to write the \n results of the conversion\"\"\")\n- post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',\n+ post_processor_aliases = {'pdf': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',\n 'serve': 'IPython.nbconvert.post_processors.serve.ServePostProcessor'}\n post_processor_factory = Type()\n \n def _post_processor_class_changed(self, name, old, new):\n- if new in self.post_processor_aliases:\n- new = self.post_processor_aliases[new]\n+ if new.lower() in self.post_processor_aliases:\n+ new = self.post_processor_aliases[new.lower()]\n if new:\n self.post_processor_factory = import_item(new)\n", "issue": "The PDF option for `--post` should work with lowercase \nRight now to get a PDF out of nbconvert you have to to\n\n```\nipython nbconvert --to latex --post PDF foo.ipynb\n```\n\nMany users will try `pdf` instead and the error message is very confusing. We should just make it work with lowercase.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"NBConvert is a utility for conversion of .ipynb files.\n\nCommand-line interface for the NbConvert conversion utility.\n\"\"\"\n#-----------------------------------------------------------------------------\n#Copyright (c) 2013, the IPython Development Team.\n#\n#Distributed under the terms of the Modified BSD License.\n#\n#The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n#Imports\n#-----------------------------------------------------------------------------\n\n# Stdlib imports\nfrom __future__ import print_function\n\nimport logging\nimport sys\nimport os\nimport glob\n\n# From IPython\nfrom IPython.core.application import BaseIPythonApplication, base_aliases, base_flags\nfrom IPython.config import catch_config_error, Configurable\nfrom IPython.utils.traitlets import (\n Unicode, List, Instance, DottedObjectName, Type, CaselessStrEnum,\n)\nfrom IPython.utils.importstring import import_item\nfrom IPython.utils.text import dedent\n\nfrom .exporters.export import get_export_names, exporter_map\nfrom IPython.nbconvert import exporters, transformers, writers, post_processors\nfrom .utils.base import NbConvertBase\nfrom .utils.exceptions import ConversionException\n\n#-----------------------------------------------------------------------------\n#Classes and functions\n#-----------------------------------------------------------------------------\n\nclass DottedOrNone(DottedObjectName):\n \"\"\"\n A string holding a valid dotted object name in Python, such as A.b3._c\n Also allows for None type.\"\"\"\n \n default_value = u''\n\n def validate(self, obj, value):\n if value is not None and len(value) > 0:\n return super(DottedOrNone, self).validate(obj, value)\n else:\n return value\n \nnbconvert_aliases = {}\nnbconvert_aliases.update(base_aliases)\nnbconvert_aliases.update({\n 'to' : 'NbConvertApp.export_format',\n 'template' : 'Exporter.template_file',\n 'notebooks' : 'NbConvertApp.notebooks',\n 'writer' : 'NbConvertApp.writer_class',\n 'post': 'NbConvertApp.post_processor_class',\n 'output': 'NbConvertApp.output_base'\n})\n\nnbconvert_flags = {}\nnbconvert_flags.update(base_flags)\nnbconvert_flags.update({\n 'stdout' : (\n {'NbConvertApp' : {'writer_class' : \"StdoutWriter\"}},\n \"Write notebook output to stdout instead of files.\"\n )\n})\n\n\nclass NbConvertApp(BaseIPythonApplication):\n \"\"\"Application used to convert to and from notebook file type (*.ipynb)\"\"\"\n\n name = 'ipython-nbconvert'\n aliases = nbconvert_aliases\n flags = nbconvert_flags\n \n def _log_level_default(self):\n return logging.INFO\n \n def _classes_default(self):\n classes = [NbConvertBase]\n for pkg in (exporters, transformers, writers):\n for name in dir(pkg):\n cls = getattr(pkg, name)\n if isinstance(cls, type) and issubclass(cls, Configurable):\n classes.append(cls)\n return classes\n\n description = Unicode(\n u\"\"\"This application is used to convert notebook files (*.ipynb)\n to various other formats.\n\n WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.\"\"\")\n\n output_base = Unicode('', config=True, help='''overwrite base name use for output files.\n can only be use when converting one notebook at a time.\n ''')\n\n examples = Unicode(u\"\"\"\n The simplest way to use nbconvert is\n \n > ipython nbconvert mynotebook.ipynb\n \n which will convert mynotebook.ipynb to the default format (probably HTML).\n \n You can specify the export format with `--to`.\n Options include {0}\n \n > ipython nbconvert --to latex mynotebook.ipnynb\n\n Both HTML and LaTeX support multiple output templates. LaTeX includes\n 'basic', 'book', and 'article'. HTML includes 'basic' and 'full'. You \n can specify the flavor of the format used.\n\n > ipython nbconvert --to html --template basic mynotebook.ipynb\n \n You can also pipe the output to stdout, rather than a file\n \n > ipython nbconvert mynotebook.ipynb --stdout\n\n A post-processor can be used to compile a PDF\n\n > ipython nbconvert mynotebook.ipynb --to latex --post PDF\n \n You can get (and serve) a Reveal.js-powered slideshow\n \n > ipython nbconvert myslides.ipynb --to slides --post serve\n \n Multiple notebooks can be given at the command line in a couple of \n different ways:\n \n > ipython nbconvert notebook*.ipynb\n > ipython nbconvert notebook1.ipynb notebook2.ipynb\n \n or you can specify the notebooks list in a config file, containing::\n \n c.NbConvertApp.notebooks = [\"my_notebook.ipynb\"]\n \n > ipython nbconvert --config mycfg.py\n \"\"\".format(get_export_names()))\n\n # Writer specific variables\n writer = Instance('IPython.nbconvert.writers.base.WriterBase', \n help=\"\"\"Instance of the writer class used to write the \n results of the conversion.\"\"\")\n writer_class = DottedObjectName('FilesWriter', config=True, \n help=\"\"\"Writer class used to write the \n results of the conversion\"\"\")\n writer_aliases = {'FilesWriter': 'IPython.nbconvert.writers.files.FilesWriter',\n 'DebugWriter': 'IPython.nbconvert.writers.debug.DebugWriter',\n 'StdoutWriter': 'IPython.nbconvert.writers.stdout.StdoutWriter'}\n writer_factory = Type()\n\n def _writer_class_changed(self, name, old, new):\n if new in self.writer_aliases:\n new = self.writer_aliases[new]\n self.writer_factory = import_item(new)\n\n # Post-processor specific variables\n post_processor = Instance('IPython.nbconvert.post_processors.base.PostProcessorBase', \n help=\"\"\"Instance of the PostProcessor class used to write the \n results of the conversion.\"\"\")\n\n post_processor_class = DottedOrNone(config=True, \n help=\"\"\"PostProcessor class used to write the \n results of the conversion\"\"\")\n post_processor_aliases = {'PDF': 'IPython.nbconvert.post_processors.pdf.PDFPostProcessor',\n 'serve': 'IPython.nbconvert.post_processors.serve.ServePostProcessor'}\n post_processor_factory = Type()\n\n def _post_processor_class_changed(self, name, old, new):\n if new in self.post_processor_aliases:\n new = self.post_processor_aliases[new]\n if new:\n self.post_processor_factory = import_item(new)\n\n\n # Other configurable variables\n export_format = CaselessStrEnum(get_export_names(),\n default_value=\"html\",\n config=True,\n help=\"\"\"The export format to be used.\"\"\"\n )\n\n notebooks = List([], config=True, help=\"\"\"List of notebooks to convert.\n Wildcards are supported.\n Filenames passed positionally will be added to the list.\n \"\"\")\n\n @catch_config_error\n def initialize(self, argv=None):\n super(NbConvertApp, self).initialize(argv)\n self.init_syspath()\n self.init_notebooks()\n self.init_writer()\n self.init_post_processor()\n\n\n\n def init_syspath(self):\n \"\"\"\n Add the cwd to the sys.path ($PYTHONPATH)\n \"\"\"\n sys.path.insert(0, os.getcwd())\n \n\n def init_notebooks(self):\n \"\"\"Construct the list of notebooks.\n If notebooks are passed on the command-line,\n they override notebooks specified in config files.\n Glob each notebook to replace notebook patterns with filenames.\n \"\"\"\n\n # Specifying notebooks on the command-line overrides (rather than adds)\n # the notebook list\n if self.extra_args:\n patterns = self.extra_args\n else:\n patterns = self.notebooks\n\n # Use glob to replace all the notebook patterns with filenames.\n filenames = []\n for pattern in patterns:\n \n # Use glob to find matching filenames. Allow the user to convert \n # notebooks without having to type the extension.\n globbed_files = glob.glob(pattern)\n globbed_files.extend(glob.glob(pattern + '.ipynb'))\n if not globbed_files:\n self.log.warn(\"pattern %r matched no files\", pattern)\n\n for filename in globbed_files:\n if not filename in filenames:\n filenames.append(filename)\n self.notebooks = filenames\n\n def init_writer(self):\n \"\"\"\n Initialize the writer (which is stateless)\n \"\"\"\n self._writer_class_changed(None, self.writer_class, self.writer_class)\n self.writer = self.writer_factory(parent=self)\n\n def init_post_processor(self):\n \"\"\"\n Initialize the post_processor (which is stateless)\n \"\"\"\n self._post_processor_class_changed(None, self.post_processor_class, \n self.post_processor_class)\n if self.post_processor_factory:\n self.post_processor = self.post_processor_factory(parent=self)\n\n def start(self):\n \"\"\"\n Ran after initialization completed\n \"\"\"\n super(NbConvertApp, self).start()\n self.convert_notebooks()\n\n def convert_notebooks(self):\n \"\"\"\n Convert the notebooks in the self.notebook traitlet\n \"\"\"\n # Export each notebook\n conversion_success = 0\n\n if self.output_base != '' and len(self.notebooks) > 1:\n self.log.error(\n \"\"\"UsageError: --output flag or `NbConvertApp.output_base` config option\n cannot be used when converting multiple notebooks.\n \"\"\")\n self.exit(1)\n \n exporter = exporter_map[self.export_format](config=self.config)\n\n for notebook_filename in self.notebooks:\n self.log.info(\"Converting notebook %s to %s\", notebook_filename, self.export_format)\n\n # Get a unique key for the notebook and set it in the resources object.\n basename = os.path.basename(notebook_filename)\n notebook_name = basename[:basename.rfind('.')]\n if self.output_base:\n notebook_name = self.output_base\n resources = {}\n resources['unique_key'] = notebook_name\n resources['output_files_dir'] = '%s_files' % notebook_name\n self.log.info(\"Support files will be in %s\", os.path.join(resources['output_files_dir'], ''))\n\n # Try to export\n try:\n output, resources = exporter.from_filename(notebook_filename, resources=resources)\n except ConversionException as e:\n self.log.error(\"Error while converting '%s'\", notebook_filename,\n exc_info=True)\n self.exit(1)\n else:\n write_resultes = self.writer.write(output, resources, notebook_name=notebook_name)\n\n #Post-process if post processor has been defined.\n if hasattr(self, 'post_processor') and self.post_processor:\n self.post_processor(write_resultes)\n conversion_success += 1\n\n # If nothing was converted successfully, help the user.\n if conversion_success == 0:\n self.print_help()\n sys.exit(-1)\n \n#-----------------------------------------------------------------------------\n# Main entry point\n#-----------------------------------------------------------------------------\n\nlaunch_new_instance = NbConvertApp.launch_instance\n", "path": "IPython/nbconvert/nbconvertapp.py"}]}
| 3,853 | 474 |
gh_patches_debug_16870
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-4088
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OSError: Failed to load return from the HEKClient.
When trying to search for the flares on 2014/10/24 to /25, I get a "Failed to Load Return" error.
Sunpy Version: 1.1.3
Here's a minimal reproducible example:
```
from sunpy.net import hek
client = hek.HEKClient()
tstart = '2014/10/24 20:50'
tend = '2014/10/25 00:14'
event_type = 'FL'
client.search(hek.attrs.Time(tstart,tend),hek.attrs.EventType(event_type))
```
```python
Traceback (most recent call last):
File "/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py", line 69, in _download
result = json.load(fd)
File "/home/user/anaconda3/envs/pytorch/lib/python3.8/json/__init__.py", line 293, in load
return loads(fp.read(),
File "/home/user/anaconda3/envs/pytorch/lib/python3.8/json/__init__.py", line 343, in loads
s = s.decode(detect_encoding(s), 'surrogatepass')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc5 in position 33279: invalid continuation byte
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py", line 99, in search
return self._download(ndata[0])
File "/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py", line 71, in _download
raise IOError("Failed to load return from the HEKClient.") from e
OSError: Failed to load return from the HEKClient.
```
</issue>
<code>
[start of sunpy/net/hek/hek.py]
1 """
2 Facilities to interface with the Heliophysics Events Knowledgebase.
3 """
4
5 import json
6
7 import urllib
8 from itertools import chain
9
10 from astropy.table import Table, Row, Column
11 from astropy.time import Time
12
13 from sunpy.net import attr
14 from sunpy.util import dict_keys_same, unique
15 from sunpy.net.hek import attrs
16 import sunpy.net._attrs as core_attrs
17 from sunpy.util.xml import xml_to_dict
18
19
20 __all__ = ['HEKClient']
21
22 DEFAULT_URL = 'https://www.lmsal.com/hek/her?'
23
24
25 def _freeze(obj):
26 """ Create hashable representation of result dict. """
27 if isinstance(obj, dict):
28 return tuple((k, _freeze(v)) for k, v in obj.items())
29 if isinstance(obj, list):
30 return tuple(_freeze(elem) for elem in obj)
31 return obj
32
33
34 class HEKClient:
35 """ Client to interact with the Heliophysics Event Knowledgebase (HEK).
36 The HEK stores solar feature and event data generated by algorithms and
37 human observers."""
38 # FIXME: Expose fields in .attrs with the right types
39 # that is, not all StringParamWrapper!
40
41 default = {
42 'cosec': '2',
43 'cmd': 'search',
44 'type': 'column',
45 'event_type': '**',
46 }
47 # Default to full disk.
48 attrs.walker.apply(attrs.SpatialRegion(), {}, default)
49
50 def __init__(self, url=DEFAULT_URL):
51 self.url = url
52
53 def _download(self, data):
54 """ Download all data, even if paginated. """
55 page = 1
56 results = []
57
58 while True:
59 data['page'] = page
60 fd = urllib.request.urlopen(self.url+urllib.parse.urlencode(data))
61 try:
62 result = json.load(fd)
63 except Exception as e:
64 raise IOError("Failed to load return from the HEKClient.") from e
65 finally:
66 fd.close()
67 results.extend(result['result'])
68
69 if not result['overmax']:
70 if len(results) > 0:
71 return HEKTable(dict_keys_same(results))
72 else:
73 return HEKTable()
74
75 page += 1
76
77 def search(self, *query):
78 """ Retrieves information about HEK records matching the criteria
79 given in the query expression. If multiple arguments are passed,
80 they are connected with AND. The result of a query is a list of
81 unique HEK Response objects that fulfill the criteria."""
82 query = attr.and_(*query)
83
84 data = attrs.walker.create(query, {})
85 ndata = []
86 for elem in data:
87 new = self.default.copy()
88 new.update(elem)
89 ndata.append(new)
90
91 if len(ndata) == 1:
92 return self._download(ndata[0])
93 else:
94 return self._merge(self._download(data) for data in ndata)
95
96 def _merge(self, responses):
97 """ Merge responses, removing duplicates. """
98 return list(unique(chain.from_iterable(responses), _freeze))
99
100
101 class HEKTable(Table):
102 def __getitem__(self, item):
103 table_item = super().__getitem__(item)
104
105 if table_item.__class__ == Column:
106 table_item.__class__ = HEKColumn
107 elif table_item.__class__ == Row:
108 table_item.__class__ = HEKRow
109
110 return table_item
111
112
113 class HEKColumn(Column):
114 pass
115
116
117 class HEKRow(Row):
118 """
119 Handles the response from the HEK. Each HEKRow object is a subclass
120 of `astropy.Table.row`. The column-row key-value pairs correspond to the
121 HEK feature/event properties and their values, for that record from the
122 HEK. Each HEKRow object also has extra properties that relate HEK
123 concepts to VSO concepts.
124 """
125 @property
126 def vso_time(self):
127 return core_attrs.Time(
128 Time.strptime(self['event_starttime'], "%Y-%m-%dT%H:%M:%S"),
129 Time.strptime(self['event_endtime'], "%Y-%m-%dT%H:%M:%S")
130 )
131
132 @property
133 def vso_instrument(self):
134 if self['obs_instrument'] == 'HEK':
135 raise ValueError("No instrument contained.")
136 return core_attrs.Instrument(self['obs_instrument'])
137
138 @property
139 def vso_all(self):
140 return attr.and_(self.vso_time, self.vso_instrument)
141
142 def get_voevent(self, as_dict=True,
143 base_url="http://www.lmsal.com/hek/her?"):
144 """Retrieves the VOEvent object associated with a given event and
145 returns it as either a Python dictionary or an XML string."""
146
147 # Build URL
148 params = {
149 "cmd": "export-voevent",
150 "cosec": 1,
151 "ivorn": self['kb_archivid']
152 }
153 url = base_url + urllib.parse.urlencode(params)
154
155 # Query and read response
156 response = urllib.request.urlopen(url).read()
157
158 # Return a string or dict
159 if as_dict:
160 return xml_to_dict(response)
161 else:
162 return response
163
164 def get(self, key, default=None):
165 try:
166 return self[key]
167 except KeyError:
168 return default
169
[end of sunpy/net/hek/hek.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sunpy/net/hek/hek.py b/sunpy/net/hek/hek.py
--- a/sunpy/net/hek/hek.py
+++ b/sunpy/net/hek/hek.py
@@ -1,8 +1,8 @@
"""
Facilities to interface with the Heliophysics Events Knowledgebase.
"""
-
import json
+import codecs
import urllib
from itertools import chain
@@ -59,7 +59,8 @@
data['page'] = page
fd = urllib.request.urlopen(self.url+urllib.parse.urlencode(data))
try:
- result = json.load(fd)
+ result = codecs.decode(fd.read(), encoding='utf-8', errors='replace')
+ result = json.loads(result)
except Exception as e:
raise IOError("Failed to load return from the HEKClient.") from e
finally:
|
{"golden_diff": "diff --git a/sunpy/net/hek/hek.py b/sunpy/net/hek/hek.py\n--- a/sunpy/net/hek/hek.py\n+++ b/sunpy/net/hek/hek.py\n@@ -1,8 +1,8 @@\n \"\"\"\n Facilities to interface with the Heliophysics Events Knowledgebase.\n \"\"\"\n-\n import json\n+import codecs\n \n import urllib\n from itertools import chain\n@@ -59,7 +59,8 @@\n data['page'] = page\n fd = urllib.request.urlopen(self.url+urllib.parse.urlencode(data))\n try:\n- result = json.load(fd)\n+ result = codecs.decode(fd.read(), encoding='utf-8', errors='replace')\n+ result = json.loads(result)\n except Exception as e:\n raise IOError(\"Failed to load return from the HEKClient.\") from e\n finally:\n", "issue": "OSError: Failed to load return from the HEKClient.\nWhen trying to search for the flares on 2014/10/24 to /25, I get a \"Failed to Load Return\" error. \r\nSunpy Version: 1.1.3\r\n\r\nHere's a minimal reproducible example:\r\n\r\n```\r\nfrom sunpy.net import hek\r\nclient = hek.HEKClient()\r\ntstart = '2014/10/24 20:50'\r\ntend = '2014/10/25 00:14'\r\nevent_type = 'FL'\r\nclient.search(hek.attrs.Time(tstart,tend),hek.attrs.EventType(event_type))\r\n```\r\n\r\n\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py\", line 69, in _download\r\n result = json.load(fd)\r\n File \"/home/user/anaconda3/envs/pytorch/lib/python3.8/json/__init__.py\", line 293, in load\r\n return loads(fp.read(),\r\n File \"/home/user/anaconda3/envs/pytorch/lib/python3.8/json/__init__.py\", line 343, in loads\r\n s = s.decode(detect_encoding(s), 'surrogatepass')\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xc5 in position 33279: invalid continuation byte\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py\", line 99, in search\r\n return self._download(ndata[0])\r\n File \"/home/user/anaconda3/envs/pytorch/lib/python3.8/site-packages/sunpy/net/hek/hek.py\", line 71, in _download\r\n raise IOError(\"Failed to load return from the HEKClient.\") from e\r\nOSError: Failed to load return from the HEKClient.\r\n```\n", "before_files": [{"content": "\"\"\"\nFacilities to interface with the Heliophysics Events Knowledgebase.\n\"\"\"\n\nimport json\n\nimport urllib\nfrom itertools import chain\n\nfrom astropy.table import Table, Row, Column\nfrom astropy.time import Time\n\nfrom sunpy.net import attr\nfrom sunpy.util import dict_keys_same, unique\nfrom sunpy.net.hek import attrs\nimport sunpy.net._attrs as core_attrs\nfrom sunpy.util.xml import xml_to_dict\n\n\n__all__ = ['HEKClient']\n\nDEFAULT_URL = 'https://www.lmsal.com/hek/her?'\n\n\ndef _freeze(obj):\n \"\"\" Create hashable representation of result dict. \"\"\"\n if isinstance(obj, dict):\n return tuple((k, _freeze(v)) for k, v in obj.items())\n if isinstance(obj, list):\n return tuple(_freeze(elem) for elem in obj)\n return obj\n\n\nclass HEKClient:\n \"\"\" Client to interact with the Heliophysics Event Knowledgebase (HEK).\n The HEK stores solar feature and event data generated by algorithms and\n human observers.\"\"\"\n # FIXME: Expose fields in .attrs with the right types\n # that is, not all StringParamWrapper!\n\n default = {\n 'cosec': '2',\n 'cmd': 'search',\n 'type': 'column',\n 'event_type': '**',\n }\n # Default to full disk.\n attrs.walker.apply(attrs.SpatialRegion(), {}, default)\n\n def __init__(self, url=DEFAULT_URL):\n self.url = url\n\n def _download(self, data):\n \"\"\" Download all data, even if paginated. \"\"\"\n page = 1\n results = []\n\n while True:\n data['page'] = page\n fd = urllib.request.urlopen(self.url+urllib.parse.urlencode(data))\n try:\n result = json.load(fd)\n except Exception as e:\n raise IOError(\"Failed to load return from the HEKClient.\") from e\n finally:\n fd.close()\n results.extend(result['result'])\n\n if not result['overmax']:\n if len(results) > 0:\n return HEKTable(dict_keys_same(results))\n else:\n return HEKTable()\n\n page += 1\n\n def search(self, *query):\n \"\"\" Retrieves information about HEK records matching the criteria\n given in the query expression. If multiple arguments are passed,\n they are connected with AND. The result of a query is a list of\n unique HEK Response objects that fulfill the criteria.\"\"\"\n query = attr.and_(*query)\n\n data = attrs.walker.create(query, {})\n ndata = []\n for elem in data:\n new = self.default.copy()\n new.update(elem)\n ndata.append(new)\n\n if len(ndata) == 1:\n return self._download(ndata[0])\n else:\n return self._merge(self._download(data) for data in ndata)\n\n def _merge(self, responses):\n \"\"\" Merge responses, removing duplicates. \"\"\"\n return list(unique(chain.from_iterable(responses), _freeze))\n\n\nclass HEKTable(Table):\n def __getitem__(self, item):\n table_item = super().__getitem__(item)\n\n if table_item.__class__ == Column:\n table_item.__class__ = HEKColumn\n elif table_item.__class__ == Row:\n table_item.__class__ = HEKRow\n\n return table_item\n\n\nclass HEKColumn(Column):\n pass\n\n\nclass HEKRow(Row):\n \"\"\"\n Handles the response from the HEK. Each HEKRow object is a subclass\n of `astropy.Table.row`. The column-row key-value pairs correspond to the\n HEK feature/event properties and their values, for that record from the\n HEK. Each HEKRow object also has extra properties that relate HEK\n concepts to VSO concepts.\n \"\"\"\n @property\n def vso_time(self):\n return core_attrs.Time(\n Time.strptime(self['event_starttime'], \"%Y-%m-%dT%H:%M:%S\"),\n Time.strptime(self['event_endtime'], \"%Y-%m-%dT%H:%M:%S\")\n )\n\n @property\n def vso_instrument(self):\n if self['obs_instrument'] == 'HEK':\n raise ValueError(\"No instrument contained.\")\n return core_attrs.Instrument(self['obs_instrument'])\n\n @property\n def vso_all(self):\n return attr.and_(self.vso_time, self.vso_instrument)\n\n def get_voevent(self, as_dict=True,\n base_url=\"http://www.lmsal.com/hek/her?\"):\n \"\"\"Retrieves the VOEvent object associated with a given event and\n returns it as either a Python dictionary or an XML string.\"\"\"\n\n # Build URL\n params = {\n \"cmd\": \"export-voevent\",\n \"cosec\": 1,\n \"ivorn\": self['kb_archivid']\n }\n url = base_url + urllib.parse.urlencode(params)\n\n # Query and read response\n response = urllib.request.urlopen(url).read()\n\n # Return a string or dict\n if as_dict:\n return xml_to_dict(response)\n else:\n return response\n\n def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default\n", "path": "sunpy/net/hek/hek.py"}]}
| 2,591 | 189 |
gh_patches_debug_11147
|
rasdani/github-patches
|
git_diff
|
dask__dask-10113
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Removal of dask.compatibility.entry_points has broken dask-kubernetes
It looks like `dask.compatibility.entry_points` was removed in #10070 without warning. This was being used in `dask-kubernetes` so CI is now failing.
https://github.com/dask/dask-kubernetes/actions/runs/4499027159/jobs/7916366189?pr=683
cc @graingert @jrbourbeau
</issue>
<code>
[start of dask/compatibility.py]
1 import sys
2
3 from packaging.version import parse as parse_version
4
5 _PY_VERSION = parse_version(".".join(map(str, sys.version_info[:3])))
6
7 _EMSCRIPTEN = sys.platform == "emscripten"
8
[end of dask/compatibility.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/dask/compatibility.py b/dask/compatibility.py
--- a/dask/compatibility.py
+++ b/dask/compatibility.py
@@ -1,7 +1,19 @@
import sys
+import warnings
+from importlib_metadata import entry_points as _entry_points
from packaging.version import parse as parse_version
_PY_VERSION = parse_version(".".join(map(str, sys.version_info[:3])))
_EMSCRIPTEN = sys.platform == "emscripten"
+
+
+def entry_points(group=None):
+ warnings.warn(
+ "`dask.compatibility.entry_points` has been replaced by `importlib_metadata.entry_points` and will be removed "
+ "in a future version. Please use `importlib_metadata.entry_points` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _entry_points(group=group)
|
{"golden_diff": "diff --git a/dask/compatibility.py b/dask/compatibility.py\n--- a/dask/compatibility.py\n+++ b/dask/compatibility.py\n@@ -1,7 +1,19 @@\n import sys\n+import warnings\n \n+from importlib_metadata import entry_points as _entry_points\n from packaging.version import parse as parse_version\n \n _PY_VERSION = parse_version(\".\".join(map(str, sys.version_info[:3])))\n \n _EMSCRIPTEN = sys.platform == \"emscripten\"\n+\n+\n+def entry_points(group=None):\n+ warnings.warn(\n+ \"`dask.compatibility.entry_points` has been replaced by `importlib_metadata.entry_points` and will be removed \"\n+ \"in a future version. Please use `importlib_metadata.entry_points` instead.\",\n+ DeprecationWarning,\n+ stacklevel=2,\n+ )\n+ return _entry_points(group=group)\n", "issue": "Removal of dask.compatibility.entry_points has broken dask-kubernetes\nIt looks like `dask.compatibility.entry_points` was removed in #10070 without warning. This was being used in `dask-kubernetes` so CI is now failing.\r\n\r\nhttps://github.com/dask/dask-kubernetes/actions/runs/4499027159/jobs/7916366189?pr=683\r\n\r\ncc @graingert @jrbourbeau \n", "before_files": [{"content": "import sys\n\nfrom packaging.version import parse as parse_version\n\n_PY_VERSION = parse_version(\".\".join(map(str, sys.version_info[:3])))\n\n_EMSCRIPTEN = sys.platform == \"emscripten\"\n", "path": "dask/compatibility.py"}]}
| 703 | 194 |
gh_patches_debug_2563
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-297
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to launch the debugger
Getting the following error in master when debugging in VSC:
```
Could not connect to None: 60857
Traceback (most recent call last):
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py", line 1620, in main
debugger.connect(host, port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py", line 326, in connect
s = start_server(port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py", line 1766, in start_server
server = _create_server(port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py", line 1701, in _create_server
server.bind(('127.0.0.1', port))
OSError: [Errno 48] Address already in u
```
</issue>
<code>
[start of ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from ptvsd.__main__ import run_module, run_file
6
7
8 __author__ = "Microsoft Corporation <[email protected]>"
9 __version__ = "4.0.0a5"
10
11 # TODO: not needed?
12 DONT_DEBUG = []
13
14
15 def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):
16 # TODO: docstring
17 address = (None, port_num)
18 if run_as == 'module':
19 run_module(address, filename, **kwargs)
20 else:
21 run_file(address, filename, **kwargs)
22
[end of ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -14,7 +14,7 @@
def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):
# TODO: docstring
- address = (None, port_num)
+ address = ('localhost', port_num)
if run_as == 'module':
run_module(address, filename, **kwargs)
else:
|
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -14,7 +14,7 @@\n \n def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):\n # TODO: docstring\n- address = (None, port_num)\n+ address = ('localhost', port_num)\n if run_as == 'module':\n run_module(address, filename, **kwargs)\n else:\n", "issue": "Unable to launch the debugger\nGetting the following error in master when debugging in VSC:\r\n```\r\nCould not connect to None: 60857\r\nTraceback (most recent call last):\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py\", line 1620, in main\r\n debugger.connect(host, port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py\", line 326, in connect\r\n s = start_server(port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py\", line 1766, in start_server\r\n server = _create_server(port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py\", line 1701, in _create_server\r\n server.bind(('127.0.0.1', port))\r\nOSError: [Errno 48] Address already in u\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom ptvsd.__main__ import run_module, run_file\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a5\"\n\n# TODO: not needed?\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n run_module(address, filename, **kwargs)\n else:\n run_file(address, filename, **kwargs)\n", "path": "ptvsd/debugger.py"}]}
| 983 | 120 |
gh_patches_debug_31566
|
rasdani/github-patches
|
git_diff
|
getsentry__sentry-python-141
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log more extra data for Celery
The old integration in celery used to log arguments to the task and more. Add that to our celery integration
</issue>
<code>
[start of sentry_sdk/integrations/celery.py]
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.signals import task_failure, task_prerun, task_postrun
6 from celery.exceptions import SoftTimeLimitExceeded
7
8 from sentry_sdk.hub import Hub
9 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 task_prerun.connect(_handle_task_prerun, weak=False)
20 task_postrun.connect(_handle_task_postrun, weak=False)
21 task_failure.connect(_process_failure_signal, weak=False)
22
23 # This logger logs every status of every task that ran on the worker.
24 # Meaning that every task's breadcrumbs are full of stuff like "Task
25 # <foo> raised unexpected <bar>".
26 ignore_logger("celery.worker.job")
27
28
29 def _process_failure_signal(sender, task_id, einfo, **kw):
30 # einfo from celery is not reliable
31 exc_info = sys.exc_info()
32
33 hub = Hub.current
34 integration = hub.get_integration(CeleryIntegration)
35 if integration is None:
36 return
37
38 if hasattr(sender, "throws") and isinstance(einfo.exception, sender.throws):
39 return
40
41 if isinstance(einfo.exception, SoftTimeLimitExceeded):
42 # TODO: Move this into event processor
43 with hub.push_scope() as scope:
44 scope.fingerprint = [
45 "celery",
46 "SoftTimeLimitExceeded",
47 getattr(sender, "name", sender),
48 ]
49 _capture_event(hub, exc_info)
50 else:
51 _capture_event(hub, exc_info)
52
53
54 def _handle_task_prerun(sender, task, **kw):
55 hub = Hub.current
56 if hub.get_integration(CeleryIntegration) is not None:
57 scope = hub.push_scope().__enter__()
58 with capture_internal_exceptions():
59 scope.transaction = task.name
60
61
62 def _handle_task_postrun(sender, task_id, task, **kw):
63 hub = Hub.current
64 if hub.get_integration(CeleryIntegration) is not None:
65 hub.pop_scope_unsafe()
66
67
68 def _capture_event(hub, exc_info):
69 event, hint = event_from_exception(
70 exc_info,
71 client_options=hub.client.options,
72 mechanism={"type": "celery", "handled": False},
73 )
74 hub.capture_event(event, hint=hint)
75
[end of sentry_sdk/integrations/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -35,28 +35,48 @@
if integration is None:
return
- if hasattr(sender, "throws") and isinstance(einfo.exception, sender.throws):
- return
-
- if isinstance(einfo.exception, SoftTimeLimitExceeded):
- # TODO: Move this into event processor
- with hub.push_scope() as scope:
- scope.fingerprint = [
- "celery",
- "SoftTimeLimitExceeded",
- getattr(sender, "name", sender),
- ]
- _capture_event(hub, exc_info)
- else:
- _capture_event(hub, exc_info)
+ _capture_event(hub, exc_info)
-def _handle_task_prerun(sender, task, **kw):
+def _handle_task_prerun(sender, task, args, kwargs, **_):
hub = Hub.current
if hub.get_integration(CeleryIntegration) is not None:
scope = hub.push_scope().__enter__()
+ scope.add_event_processor(_make_event_processor(args, kwargs, task))
+
+
+def _make_event_processor(args, kwargs, task):
+ def event_processor(event, hint):
+ with capture_internal_exceptions():
+ if "transaction" not in event:
+ event["transaction"] = task.name
+
with capture_internal_exceptions():
- scope.transaction = task.name
+ extra = event.setdefault("extra", {})
+ extra["celery-job"] = {
+ "task_name": task.name,
+ "args": args,
+ "kwargs": kwargs,
+ }
+
+ if "exc_info" in hint:
+ with capture_internal_exceptions():
+ if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
+ event["fingerprint"] = [
+ "celery",
+ "SoftTimeLimitExceeded",
+ getattr(task, "name", task),
+ ]
+
+ with capture_internal_exceptions():
+ if hasattr(task, "throws") and isinstance(
+ hint["exc_info"][1], task.throws
+ ):
+ return None
+
+ return event
+
+ return event_processor
def _handle_task_postrun(sender, task_id, task, **kw):
|
{"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -35,28 +35,48 @@\n if integration is None:\n return\n \n- if hasattr(sender, \"throws\") and isinstance(einfo.exception, sender.throws):\n- return\n-\n- if isinstance(einfo.exception, SoftTimeLimitExceeded):\n- # TODO: Move this into event processor\n- with hub.push_scope() as scope:\n- scope.fingerprint = [\n- \"celery\",\n- \"SoftTimeLimitExceeded\",\n- getattr(sender, \"name\", sender),\n- ]\n- _capture_event(hub, exc_info)\n- else:\n- _capture_event(hub, exc_info)\n+ _capture_event(hub, exc_info)\n \n \n-def _handle_task_prerun(sender, task, **kw):\n+def _handle_task_prerun(sender, task, args, kwargs, **_):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n scope = hub.push_scope().__enter__()\n+ scope.add_event_processor(_make_event_processor(args, kwargs, task))\n+\n+\n+def _make_event_processor(args, kwargs, task):\n+ def event_processor(event, hint):\n+ with capture_internal_exceptions():\n+ if \"transaction\" not in event:\n+ event[\"transaction\"] = task.name\n+\n with capture_internal_exceptions():\n- scope.transaction = task.name\n+ extra = event.setdefault(\"extra\", {})\n+ extra[\"celery-job\"] = {\n+ \"task_name\": task.name,\n+ \"args\": args,\n+ \"kwargs\": kwargs,\n+ }\n+\n+ if \"exc_info\" in hint:\n+ with capture_internal_exceptions():\n+ if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n+ event[\"fingerprint\"] = [\n+ \"celery\",\n+ \"SoftTimeLimitExceeded\",\n+ getattr(task, \"name\", task),\n+ ]\n+\n+ with capture_internal_exceptions():\n+ if hasattr(task, \"throws\") and isinstance(\n+ hint[\"exc_info\"][1], task.throws\n+ ):\n+ return None\n+\n+ return event\n+\n+ return event_processor\n \n \n def _handle_task_postrun(sender, task_id, task, **kw):\n", "issue": "Log more extra data for Celery\nThe old integration in celery used to log arguments to the task and more. Add that to our celery integration\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.signals import task_failure, task_prerun, task_postrun\nfrom celery.exceptions import SoftTimeLimitExceeded\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n task_prerun.connect(_handle_task_prerun, weak=False)\n task_postrun.connect(_handle_task_postrun, weak=False)\n task_failure.connect(_process_failure_signal, weak=False)\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _process_failure_signal(sender, task_id, einfo, **kw):\n # einfo from celery is not reliable\n exc_info = sys.exc_info()\n\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n if integration is None:\n return\n\n if hasattr(sender, \"throws\") and isinstance(einfo.exception, sender.throws):\n return\n\n if isinstance(einfo.exception, SoftTimeLimitExceeded):\n # TODO: Move this into event processor\n with hub.push_scope() as scope:\n scope.fingerprint = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(sender, \"name\", sender),\n ]\n _capture_event(hub, exc_info)\n else:\n _capture_event(hub, exc_info)\n\n\ndef _handle_task_prerun(sender, task, **kw):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n scope = hub.push_scope().__enter__()\n with capture_internal_exceptions():\n scope.transaction = task.name\n\n\ndef _handle_task_postrun(sender, task_id, task, **kw):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.pop_scope_unsafe()\n\n\ndef _capture_event(hub, exc_info):\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n", "path": "sentry_sdk/integrations/celery.py"}]}
| 1,259 | 542 |
gh_patches_debug_5102
|
rasdani/github-patches
|
git_diff
|
encode__starlette-623
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GraphQL response should not include error key if no error occured
The [GraphQL Spec](https://graphql.github.io/graphql-spec/June2018/#sec-Errors) states that:
> If no errors were encountered during the requested operation, the errors entry should not be present in the result.
Currently, if no errors are encountered, starlette will return `{"data": {...}, "errors": null}`.
This is only a small thing, but enough to break some clients.
I have a PR for this incoming.
</issue>
<code>
[start of starlette/graphql.py]
1 import json
2 import typing
3
4 from starlette import status
5 from starlette.background import BackgroundTasks
6 from starlette.concurrency import run_in_threadpool
7 from starlette.requests import Request
8 from starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response
9 from starlette.types import Receive, Scope, Send
10
11 try:
12 import graphene
13 from graphql.execution.executors.asyncio import AsyncioExecutor
14 from graphql.error import format_error as format_graphql_error
15 from graphql.error import GraphQLError
16 except ImportError: # pragma: nocover
17 graphene = None # type: ignore
18 AsyncioExecutor = None # type: ignore
19 format_graphql_error = None # type: ignore
20 GraphQLError = None # type: ignore
21
22
23 class GraphQLApp:
24 def __init__(
25 self,
26 schema: "graphene.Schema",
27 executor: typing.Any = None,
28 executor_class: type = None,
29 graphiql: bool = True,
30 ) -> None:
31 self.schema = schema
32 self.graphiql = graphiql
33 if executor is None:
34 # New style in 0.10.0. Use 'executor_class'.
35 # See issue https://github.com/encode/starlette/issues/242
36 self.executor = executor
37 self.executor_class = executor_class
38 self.is_async = executor_class is not None and issubclass(
39 executor_class, AsyncioExecutor
40 )
41 else:
42 # Old style. Use 'executor'.
43 # We should remove this in the next median/major version bump.
44 self.executor = executor
45 self.executor_class = None
46 self.is_async = isinstance(executor, AsyncioExecutor)
47
48 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
49 if self.executor is None and self.executor_class is not None:
50 self.executor = self.executor_class()
51
52 request = Request(scope, receive=receive)
53 response = await self.handle_graphql(request)
54 await response(scope, receive, send)
55
56 async def handle_graphql(self, request: Request) -> Response:
57 if request.method in ("GET", "HEAD"):
58 if "text/html" in request.headers.get("Accept", ""):
59 if not self.graphiql:
60 return PlainTextResponse(
61 "Not Found", status_code=status.HTTP_404_NOT_FOUND
62 )
63 return await self.handle_graphiql(request)
64
65 data = request.query_params # type: typing.Mapping[str, typing.Any]
66
67 elif request.method == "POST":
68 content_type = request.headers.get("Content-Type", "")
69
70 if "application/json" in content_type:
71 data = await request.json()
72 elif "application/graphql" in content_type:
73 body = await request.body()
74 text = body.decode()
75 data = {"query": text}
76 elif "query" in request.query_params:
77 data = request.query_params
78 else:
79 return PlainTextResponse(
80 "Unsupported Media Type",
81 status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
82 )
83
84 else:
85 return PlainTextResponse(
86 "Method Not Allowed", status_code=status.HTTP_405_METHOD_NOT_ALLOWED
87 )
88
89 try:
90 query = data["query"]
91 variables = data.get("variables")
92 operation_name = data.get("operationName")
93 except KeyError:
94 return PlainTextResponse(
95 "No GraphQL query found in the request",
96 status_code=status.HTTP_400_BAD_REQUEST,
97 )
98
99 background = BackgroundTasks()
100 context = {"request": request, "background": background}
101
102 result = await self.execute(
103 query, variables=variables, context=context, operation_name=operation_name
104 )
105 error_data = (
106 [format_graphql_error(err) for err in result.errors]
107 if result.errors
108 else None
109 )
110 response_data = {"data": result.data, "errors": error_data}
111 status_code = (
112 status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK
113 )
114
115 return JSONResponse(
116 response_data, status_code=status_code, background=background
117 )
118
119 async def execute( # type: ignore
120 self, query, variables=None, context=None, operation_name=None
121 ):
122 if self.is_async:
123 return await self.schema.execute(
124 query,
125 variables=variables,
126 operation_name=operation_name,
127 executor=self.executor,
128 return_promise=True,
129 context=context,
130 )
131 else:
132 return await run_in_threadpool(
133 self.schema.execute,
134 query,
135 variables=variables,
136 operation_name=operation_name,
137 context=context,
138 )
139
140 async def handle_graphiql(self, request: Request) -> Response:
141 text = GRAPHIQL.replace("{{REQUEST_PATH}}", json.dumps(request.url.path))
142 return HTMLResponse(text)
143
144
145 GRAPHIQL = """
146 <!--
147 * Copyright (c) Facebook, Inc.
148 * All rights reserved.
149 *
150 * This source code is licensed under the license found in the
151 * LICENSE file in the root directory of this source tree.
152 -->
153 <!DOCTYPE html>
154 <html>
155 <head>
156 <style>
157 body {
158 height: 100%;
159 margin: 0;
160 width: 100%;
161 overflow: hidden;
162 }
163 #graphiql {
164 height: 100vh;
165 }
166 </style>
167 <!--
168 This GraphiQL example depends on Promise and fetch, which are available in
169 modern browsers, but can be "polyfilled" for older browsers.
170 GraphiQL itself depends on React DOM.
171 If you do not want to rely on a CDN, you can host these files locally or
172 include them directly in your favored resource bunder.
173 -->
174 <link href="//cdn.jsdelivr.net/npm/[email protected]/graphiql.css" rel="stylesheet"/>
175 <script src="//cdn.jsdelivr.net/npm/[email protected]/fetch.min.js"></script>
176 <script src="//cdn.jsdelivr.net/npm/[email protected]/umd/react.production.min.js"></script>
177 <script src="//cdn.jsdelivr.net/npm/[email protected]/umd/react-dom.production.min.js"></script>
178 <script src="//cdn.jsdelivr.net/npm/[email protected]/graphiql.min.js"></script>
179 </head>
180 <body>
181 <div id="graphiql">Loading...</div>
182 <script>
183 /**
184 * This GraphiQL example illustrates how to use some of GraphiQL's props
185 * in order to enable reading and updating the URL parameters, making
186 * link sharing of queries a little bit easier.
187 *
188 * This is only one example of this kind of feature, GraphiQL exposes
189 * various React params to enable interesting integrations.
190 */
191 // Parse the search string to get url parameters.
192 var search = window.location.search;
193 var parameters = {};
194 search.substr(1).split('&').forEach(function (entry) {
195 var eq = entry.indexOf('=');
196 if (eq >= 0) {
197 parameters[decodeURIComponent(entry.slice(0, eq))] =
198 decodeURIComponent(entry.slice(eq + 1));
199 }
200 });
201 // if variables was provided, try to format it.
202 if (parameters.variables) {
203 try {
204 parameters.variables =
205 JSON.stringify(JSON.parse(parameters.variables), null, 2);
206 } catch (e) {
207 // Do nothing, we want to display the invalid JSON as a string, rather
208 // than present an error.
209 }
210 }
211 // When the query and variables string is edited, update the URL bar so
212 // that it can be easily shared
213 function onEditQuery(newQuery) {
214 parameters.query = newQuery;
215 updateURL();
216 }
217 function onEditVariables(newVariables) {
218 parameters.variables = newVariables;
219 updateURL();
220 }
221 function onEditOperationName(newOperationName) {
222 parameters.operationName = newOperationName;
223 updateURL();
224 }
225 function updateURL() {
226 var newSearch = '?' + Object.keys(parameters).filter(function (key) {
227 return Boolean(parameters[key]);
228 }).map(function (key) {
229 return encodeURIComponent(key) + '=' +
230 encodeURIComponent(parameters[key]);
231 }).join('&');
232 history.replaceState(null, null, newSearch);
233 }
234 // Defines a GraphQL fetcher using the fetch API. You're not required to
235 // use fetch, and could instead implement graphQLFetcher however you like,
236 // as long as it returns a Promise or Observable.
237 function graphQLFetcher(graphQLParams) {
238 // This example expects a GraphQL server at the path /graphql.
239 // Change this to point wherever you host your GraphQL server.
240 return fetch({{REQUEST_PATH}}, {
241 method: 'post',
242 headers: {
243 'Accept': 'application/json',
244 'Content-Type': 'application/json',
245 },
246 body: JSON.stringify(graphQLParams),
247 credentials: 'include',
248 }).then(function (response) {
249 return response.text();
250 }).then(function (responseBody) {
251 try {
252 return JSON.parse(responseBody);
253 } catch (error) {
254 return responseBody;
255 }
256 });
257 }
258 // Render <GraphiQL /> into the body.
259 // See the README in the top level of this module to learn more about
260 // how you can customize GraphiQL by providing different values or
261 // additional child elements.
262 ReactDOM.render(
263 React.createElement(GraphiQL, {
264 fetcher: graphQLFetcher,
265 query: parameters.query,
266 variables: parameters.variables,
267 operationName: parameters.operationName,
268 onEditQuery: onEditQuery,
269 onEditVariables: onEditVariables,
270 onEditOperationName: onEditOperationName
271 }),
272 document.getElementById('graphiql')
273 );
274 </script>
275 </body>
276 </html>
277 """
278
[end of starlette/graphql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlette/graphql.py b/starlette/graphql.py
--- a/starlette/graphql.py
+++ b/starlette/graphql.py
@@ -107,7 +107,9 @@
if result.errors
else None
)
- response_data = {"data": result.data, "errors": error_data}
+ response_data = {"data": result.data}
+ if error_data:
+ response_data["errors"] = error_data
status_code = (
status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK
)
|
{"golden_diff": "diff --git a/starlette/graphql.py b/starlette/graphql.py\n--- a/starlette/graphql.py\n+++ b/starlette/graphql.py\n@@ -107,7 +107,9 @@\n if result.errors\n else None\n )\n- response_data = {\"data\": result.data, \"errors\": error_data}\n+ response_data = {\"data\": result.data}\n+ if error_data:\n+ response_data[\"errors\"] = error_data\n status_code = (\n status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK\n )\n", "issue": "GraphQL response should not include error key if no error occured\nThe [GraphQL Spec](https://graphql.github.io/graphql-spec/June2018/#sec-Errors) states that:\r\n> If no errors were encountered during the requested operation, the errors entry should not be present in the result.\r\n\r\nCurrently, if no errors are encountered, starlette will return `{\"data\": {...}, \"errors\": null}`. \r\nThis is only a small thing, but enough to break some clients.\r\n\r\nI have a PR for this incoming.\n", "before_files": [{"content": "import json\nimport typing\n\nfrom starlette import status\nfrom starlette.background import BackgroundTasks\nfrom starlette.concurrency import run_in_threadpool\nfrom starlette.requests import Request\nfrom starlette.responses import HTMLResponse, JSONResponse, PlainTextResponse, Response\nfrom starlette.types import Receive, Scope, Send\n\ntry:\n import graphene\n from graphql.execution.executors.asyncio import AsyncioExecutor\n from graphql.error import format_error as format_graphql_error\n from graphql.error import GraphQLError\nexcept ImportError: # pragma: nocover\n graphene = None # type: ignore\n AsyncioExecutor = None # type: ignore\n format_graphql_error = None # type: ignore\n GraphQLError = None # type: ignore\n\n\nclass GraphQLApp:\n def __init__(\n self,\n schema: \"graphene.Schema\",\n executor: typing.Any = None,\n executor_class: type = None,\n graphiql: bool = True,\n ) -> None:\n self.schema = schema\n self.graphiql = graphiql\n if executor is None:\n # New style in 0.10.0. Use 'executor_class'.\n # See issue https://github.com/encode/starlette/issues/242\n self.executor = executor\n self.executor_class = executor_class\n self.is_async = executor_class is not None and issubclass(\n executor_class, AsyncioExecutor\n )\n else:\n # Old style. Use 'executor'.\n # We should remove this in the next median/major version bump.\n self.executor = executor\n self.executor_class = None\n self.is_async = isinstance(executor, AsyncioExecutor)\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if self.executor is None and self.executor_class is not None:\n self.executor = self.executor_class()\n\n request = Request(scope, receive=receive)\n response = await self.handle_graphql(request)\n await response(scope, receive, send)\n\n async def handle_graphql(self, request: Request) -> Response:\n if request.method in (\"GET\", \"HEAD\"):\n if \"text/html\" in request.headers.get(\"Accept\", \"\"):\n if not self.graphiql:\n return PlainTextResponse(\n \"Not Found\", status_code=status.HTTP_404_NOT_FOUND\n )\n return await self.handle_graphiql(request)\n\n data = request.query_params # type: typing.Mapping[str, typing.Any]\n\n elif request.method == \"POST\":\n content_type = request.headers.get(\"Content-Type\", \"\")\n\n if \"application/json\" in content_type:\n data = await request.json()\n elif \"application/graphql\" in content_type:\n body = await request.body()\n text = body.decode()\n data = {\"query\": text}\n elif \"query\" in request.query_params:\n data = request.query_params\n else:\n return PlainTextResponse(\n \"Unsupported Media Type\",\n status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,\n )\n\n else:\n return PlainTextResponse(\n \"Method Not Allowed\", status_code=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n\n try:\n query = data[\"query\"]\n variables = data.get(\"variables\")\n operation_name = data.get(\"operationName\")\n except KeyError:\n return PlainTextResponse(\n \"No GraphQL query found in the request\",\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n\n background = BackgroundTasks()\n context = {\"request\": request, \"background\": background}\n\n result = await self.execute(\n query, variables=variables, context=context, operation_name=operation_name\n )\n error_data = (\n [format_graphql_error(err) for err in result.errors]\n if result.errors\n else None\n )\n response_data = {\"data\": result.data, \"errors\": error_data}\n status_code = (\n status.HTTP_400_BAD_REQUEST if result.errors else status.HTTP_200_OK\n )\n\n return JSONResponse(\n response_data, status_code=status_code, background=background\n )\n\n async def execute( # type: ignore\n self, query, variables=None, context=None, operation_name=None\n ):\n if self.is_async:\n return await self.schema.execute(\n query,\n variables=variables,\n operation_name=operation_name,\n executor=self.executor,\n return_promise=True,\n context=context,\n )\n else:\n return await run_in_threadpool(\n self.schema.execute,\n query,\n variables=variables,\n operation_name=operation_name,\n context=context,\n )\n\n async def handle_graphiql(self, request: Request) -> Response:\n text = GRAPHIQL.replace(\"{{REQUEST_PATH}}\", json.dumps(request.url.path))\n return HTMLResponse(text)\n\n\nGRAPHIQL = \"\"\"\n<!--\n * Copyright (c) Facebook, Inc.\n * All rights reserved.\n *\n * This source code is licensed under the license found in the\n * LICENSE file in the root directory of this source tree.\n-->\n<!DOCTYPE html>\n<html>\n <head>\n <style>\n body {\n height: 100%;\n margin: 0;\n width: 100%;\n overflow: hidden;\n }\n #graphiql {\n height: 100vh;\n }\n </style>\n <!--\n This GraphiQL example depends on Promise and fetch, which are available in\n modern browsers, but can be \"polyfilled\" for older browsers.\n GraphiQL itself depends on React DOM.\n If you do not want to rely on a CDN, you can host these files locally or\n include them directly in your favored resource bunder.\n -->\n <link href=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.css\" rel=\"stylesheet\"/>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/fetch.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/umd/react-dom.production.min.js\"></script>\n <script src=\"//cdn.jsdelivr.net/npm/[email protected]/graphiql.min.js\"></script>\n </head>\n <body>\n <div id=\"graphiql\">Loading...</div>\n <script>\n /**\n * This GraphiQL example illustrates how to use some of GraphiQL's props\n * in order to enable reading and updating the URL parameters, making\n * link sharing of queries a little bit easier.\n *\n * This is only one example of this kind of feature, GraphiQL exposes\n * various React params to enable interesting integrations.\n */\n // Parse the search string to get url parameters.\n var search = window.location.search;\n var parameters = {};\n search.substr(1).split('&').forEach(function (entry) {\n var eq = entry.indexOf('=');\n if (eq >= 0) {\n parameters[decodeURIComponent(entry.slice(0, eq))] =\n decodeURIComponent(entry.slice(eq + 1));\n }\n });\n // if variables was provided, try to format it.\n if (parameters.variables) {\n try {\n parameters.variables =\n JSON.stringify(JSON.parse(parameters.variables), null, 2);\n } catch (e) {\n // Do nothing, we want to display the invalid JSON as a string, rather\n // than present an error.\n }\n }\n // When the query and variables string is edited, update the URL bar so\n // that it can be easily shared\n function onEditQuery(newQuery) {\n parameters.query = newQuery;\n updateURL();\n }\n function onEditVariables(newVariables) {\n parameters.variables = newVariables;\n updateURL();\n }\n function onEditOperationName(newOperationName) {\n parameters.operationName = newOperationName;\n updateURL();\n }\n function updateURL() {\n var newSearch = '?' + Object.keys(parameters).filter(function (key) {\n return Boolean(parameters[key]);\n }).map(function (key) {\n return encodeURIComponent(key) + '=' +\n encodeURIComponent(parameters[key]);\n }).join('&');\n history.replaceState(null, null, newSearch);\n }\n // Defines a GraphQL fetcher using the fetch API. You're not required to\n // use fetch, and could instead implement graphQLFetcher however you like,\n // as long as it returns a Promise or Observable.\n function graphQLFetcher(graphQLParams) {\n // This example expects a GraphQL server at the path /graphql.\n // Change this to point wherever you host your GraphQL server.\n return fetch({{REQUEST_PATH}}, {\n method: 'post',\n headers: {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify(graphQLParams),\n credentials: 'include',\n }).then(function (response) {\n return response.text();\n }).then(function (responseBody) {\n try {\n return JSON.parse(responseBody);\n } catch (error) {\n return responseBody;\n }\n });\n }\n // Render <GraphiQL /> into the body.\n // See the README in the top level of this module to learn more about\n // how you can customize GraphiQL by providing different values or\n // additional child elements.\n ReactDOM.render(\n React.createElement(GraphiQL, {\n fetcher: graphQLFetcher,\n query: parameters.query,\n variables: parameters.variables,\n operationName: parameters.operationName,\n onEditQuery: onEditQuery,\n onEditVariables: onEditVariables,\n onEditOperationName: onEditOperationName\n }),\n document.getElementById('graphiql')\n );\n </script>\n </body>\n</html>\n\"\"\"\n", "path": "starlette/graphql.py"}]}
| 3,508 | 126 |
gh_patches_debug_34657
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-14125
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)
**Describe the bug**
Created a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem.
Essentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error
> ResolveError: Directory 'backend' does not contain any BUILD files
After a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets.
For example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.
As per the README in the repo:
```bash
# Should create a pex at dist/backend/projecta/projecta.pex
# Docker image created successfully as projecta-container:latest
./pants package backend/projecta::
# Should create a pex at dist/backend.projectc/projectc.pex
# Docker image created successfully as projectc-container:latest
./pants package backend/projectc::
```
```bash
# Should create a pex at dist/backend/projectb.pex
./pants package backend/projectb:projectb
# FAILS: With ResolveError
./pants package backend/projectb:projectb-container
```
So, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.
This isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.
**Pants version**
- 2.8.0
- 2.9.0rc1
**OS**
macOS 12.1
Untested on Linux
</issue>
<code>
[start of src/python/pants/backend/docker/util_rules/dependencies.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
5 from pants.backend.docker.target_types import DockerDependenciesField
6 from pants.core.goals.package import PackageFieldSet
7 from pants.engine.addresses import Addresses, UnparsedAddressInputs
8 from pants.engine.rules import Get, collect_rules, rule
9 from pants.engine.target import (
10 FieldSetsPerTarget,
11 FieldSetsPerTargetRequest,
12 InjectDependenciesRequest,
13 InjectedDependencies,
14 Targets,
15 )
16 from pants.engine.unions import UnionRule
17
18
19 class InjectDockerDependencies(InjectDependenciesRequest):
20 inject_for = DockerDependenciesField
21
22
23 @rule
24 async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:
25 """Inspects COPY instructions in the Dockerfile for references to known targets."""
26 dockerfile_info = await Get(
27 DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)
28 )
29
30 targets = await Get(
31 Targets,
32 UnparsedAddressInputs(
33 dockerfile_info.putative_target_addresses,
34 owning_address=dockerfile_info.address,
35 ),
36 )
37 package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))
38 referenced_targets = (
39 field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
40 )
41 return InjectedDependencies(Addresses(referenced_targets))
42
43
44 def rules():
45 return [
46 *collect_rules(),
47 UnionRule(InjectDependenciesRequest, InjectDockerDependencies),
48 ]
49
[end of src/python/pants/backend/docker/util_rules/dependencies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py
--- a/src/python/pants/backend/docker/util_rules/dependencies.py
+++ b/src/python/pants/backend/docker/util_rules/dependencies.py
@@ -3,6 +3,7 @@
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
from pants.backend.docker.target_types import DockerDependenciesField
+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses
from pants.core.goals.package import PackageFieldSet
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.rules import Get, collect_rules, rule
@@ -22,18 +23,28 @@
@rule
async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:
- """Inspects COPY instructions in the Dockerfile for references to known targets."""
+ """Inspects COPY instructions in the Dockerfile for references to known packagable targets."""
dockerfile_info = await Get(
DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)
)
- targets = await Get(
- Targets,
+ # Parse all putative target addresses.
+ putative_addresses = await Get(
+ Addresses,
UnparsedAddressInputs(
dockerfile_info.putative_target_addresses,
owning_address=dockerfile_info.address,
),
)
+
+ # Get the target for those addresses that are known.
+ directories = {address.spec_path for address in putative_addresses}
+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))
+ targets = await Get(
+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))
+ )
+
+ # Only keep those targets that we can "package".
package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))
referenced_targets = (
field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
|
{"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py\n--- a/src/python/pants/backend/docker/util_rules/dependencies.py\n+++ b/src/python/pants/backend/docker/util_rules/dependencies.py\n@@ -3,6 +3,7 @@\n \n from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\n from pants.backend.docker.target_types import DockerDependenciesField\n+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses\n from pants.core.goals.package import PackageFieldSet\n from pants.engine.addresses import Addresses, UnparsedAddressInputs\n from pants.engine.rules import Get, collect_rules, rule\n@@ -22,18 +23,28 @@\n \n @rule\n async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n- \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n+ \"\"\"Inspects COPY instructions in the Dockerfile for references to known packagable targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n \n- targets = await Get(\n- Targets,\n+ # Parse all putative target addresses.\n+ putative_addresses = await Get(\n+ Addresses,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=dockerfile_info.address,\n ),\n )\n+\n+ # Get the target for those addresses that are known.\n+ directories = {address.spec_path for address in putative_addresses}\n+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))\n+ targets = await Get(\n+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))\n+ )\n+\n+ # Only keep those targets that we can \"package\".\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n", "issue": "ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)\n**Describe the bug**\r\n\r\nCreated a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. \r\n\r\nEssentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error \r\n\r\n> ResolveError: Directory 'backend' does not contain any BUILD files\r\n\r\nAfter a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. \r\n\r\nFor example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.\r\n\r\nAs per the README in the repo:\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projecta/projecta.pex\r\n# Docker image created successfully as projecta-container:latest\r\n./pants package backend/projecta::\r\n\r\n# Should create a pex at dist/backend.projectc/projectc.pex\r\n# Docker image created successfully as projectc-container:latest\r\n./pants package backend/projectc::\r\n```\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projectb.pex\r\n./pants package backend/projectb:projectb\r\n\r\n# FAILS: With ResolveError\r\n./pants package backend/projectb:projectb-container \r\n```\r\n\r\nSo, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.\r\n\r\nThis isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.\r\n\r\n**Pants version**\r\n\r\n- 2.8.0\r\n- 2.9.0rc1\r\n\r\n**OS**\r\n\r\nmacOS 12.1\r\nUntested on Linux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\nfrom pants.backend.docker.target_types import DockerDependenciesField\nfrom pants.core.goals.package import PackageFieldSet\nfrom pants.engine.addresses import Addresses, UnparsedAddressInputs\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import (\n FieldSetsPerTarget,\n FieldSetsPerTargetRequest,\n InjectDependenciesRequest,\n InjectedDependencies,\n Targets,\n)\nfrom pants.engine.unions import UnionRule\n\n\nclass InjectDockerDependencies(InjectDependenciesRequest):\n inject_for = DockerDependenciesField\n\n\n@rule\nasync def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n\n targets = await Get(\n Targets,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=dockerfile_info.address,\n ),\n )\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n )\n return InjectedDependencies(Addresses(referenced_targets))\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(InjectDependenciesRequest, InjectDockerDependencies),\n ]\n", "path": "src/python/pants/backend/docker/util_rules/dependencies.py"}]}
| 1,458 | 452 |
gh_patches_debug_39570
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-3117
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
perf: add fast path for simple selections in dask backend
For simple selections we don't need to bother with the `dd.concat` here: https://github.com/ibis-project/ibis/blob/master/ibis/backends/dask/execution/selection.py#L154 and should probably select on the data directly
</issue>
<code>
[start of ibis/backends/dask/execution/selection.py]
1 """Dispatching code for Selection operations.
2 """
3
4
5 import functools
6 import operator
7 from typing import Optional
8
9 import dask.dataframe as dd
10 import pandas
11 from toolz import concatv
12
13 import ibis.expr.operations as ops
14 import ibis.expr.types as ir
15 from ibis.backends.pandas.execution.selection import (
16 compute_projection,
17 compute_projection_table_expr,
18 map_new_column_names_to_data,
19 remap_overlapping_column_names,
20 )
21 from ibis.expr.scope import Scope
22 from ibis.expr.typing import TimeContext
23
24 from ..core import execute
25 from ..dispatch import execute_node
26 from ..execution import constants
27 from ..execution.util import (
28 add_partitioned_sorted_column,
29 coerce_to_output,
30 compute_sorted_frame,
31 )
32
33
34 @compute_projection.register(ir.ScalarExpr, ops.Selection, dd.DataFrame)
35 def compute_projection_scalar_expr(
36 expr,
37 parent,
38 data,
39 scope: Scope,
40 timecontext: Optional[TimeContext] = None,
41 **kwargs,
42 ):
43 name = expr._name
44 assert name is not None, 'Scalar selection name is None'
45
46 op = expr.op()
47 parent_table_op = parent.table.op()
48
49 data_columns = frozenset(data.columns)
50
51 scope = scope.merge_scopes(
52 Scope(
53 {
54 t: map_new_column_names_to_data(
55 remap_overlapping_column_names(
56 parent_table_op, t, data_columns
57 ),
58 data,
59 )
60 },
61 timecontext,
62 )
63 for t in op.root_tables()
64 )
65 scalar = execute(expr, scope=scope, **kwargs)
66 return data.assign(**{name: scalar})[name]
67
68
69 @compute_projection.register(ir.ColumnExpr, ops.Selection, dd.DataFrame)
70 def compute_projection_column_expr(
71 expr,
72 parent,
73 data,
74 scope: Scope,
75 timecontext: Optional[TimeContext],
76 **kwargs,
77 ):
78 result_name = getattr(expr, '_name', None)
79 op = expr.op()
80 parent_table_op = parent.table.op()
81
82 if isinstance(op, ops.TableColumn):
83 # slightly faster path for simple column selection
84 name = op.name
85
86 if name in data:
87 return data[name].rename(result_name or name)
88
89 if not isinstance(parent_table_op, ops.Join):
90 raise KeyError(name)
91 (root_table,) = op.root_tables()
92 left_root, right_root = ops.distinct_roots(
93 parent_table_op.left, parent_table_op.right
94 )
95 suffixes = {
96 left_root: constants.LEFT_JOIN_SUFFIX,
97 right_root: constants.RIGHT_JOIN_SUFFIX,
98 }
99 return data.loc[:, name + suffixes[root_table]].rename(
100 result_name or name
101 )
102
103 data_columns = frozenset(data.columns)
104
105 scope = scope.merge_scopes(
106 Scope(
107 {
108 t: map_new_column_names_to_data(
109 remap_overlapping_column_names(
110 parent_table_op, t, data_columns
111 ),
112 data,
113 )
114 },
115 timecontext,
116 )
117 for t in op.root_tables()
118 )
119
120 result = execute(expr, scope=scope, timecontext=timecontext, **kwargs)
121 result = coerce_to_output(result, expr, data.index)
122 assert result_name is not None, 'Column selection name is None'
123
124 return result
125
126
127 compute_projection.register(ir.TableExpr, ops.Selection, dd.DataFrame)(
128 compute_projection_table_expr
129 )
130
131
132 @execute_node.register(ops.Selection, dd.DataFrame)
133 def execute_selection_dataframe(
134 op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs
135 ):
136 selections = op.selections
137 predicates = op.predicates
138 sort_keys = op.sort_keys
139 result = data
140
141 # Build up the individual dask structures from column expressions
142 if selections:
143 # Create a unique row identifier and set it as the index. This is used
144 # in dd.concat to merge the pieces back together.
145 data = add_partitioned_sorted_column(data)
146 data_pieces = []
147 for selection in selections:
148 dask_object = compute_projection(
149 selection,
150 op,
151 data,
152 scope=scope,
153 timecontext=timecontext,
154 **kwargs,
155 )
156 data_pieces.append(dask_object)
157
158 result = dd.concat(data_pieces, axis=1)
159 result.reset_index(drop=True)
160
161 if predicates:
162 predicates = _compute_predicates(
163 op.table.op(), predicates, data, scope, timecontext, **kwargs
164 )
165 predicate = functools.reduce(operator.and_, predicates)
166 result = result.loc[predicate]
167
168 if sort_keys:
169 if len(sort_keys) > 1:
170 raise NotImplementedError(
171 """
172 Multi-key sorting is not implemented for the Dask backend
173 """
174 )
175 sort_key = sort_keys[0]
176 ascending = getattr(sort_key.op(), 'ascending', True)
177 if not ascending:
178 raise NotImplementedError(
179 "Descending sort is not supported for the Dask backend"
180 )
181 result = compute_sorted_frame(
182 result,
183 order_by=sort_key,
184 scope=scope,
185 timecontext=timecontext,
186 **kwargs,
187 )
188
189 return result
190 else:
191 grouping_keys = ordering_keys = ()
192
193 # return early if we do not have any temporary grouping or ordering columns
194 assert not grouping_keys, 'group by should never show up in Selection'
195 if not ordering_keys:
196 return result
197
198 # create a sequence of columns that we need to drop
199 temporary_columns = pandas.Index(
200 concatv(grouping_keys, ordering_keys)
201 ).difference(data.columns)
202
203 # no reason to call drop if we don't need to
204 if temporary_columns.empty:
205 return result
206
207 # drop every temporary column we created for ordering or grouping
208 return result.drop(temporary_columns, axis=1)
209
210
211 def _compute_predicates(
212 table_op,
213 predicates,
214 data,
215 scope: Scope,
216 timecontext: Optional[TimeContext],
217 **kwargs,
218 ):
219 """Compute the predicates for a table operation.
220
221 Parameters
222 ----------
223 table_op : TableNode
224 predicates : List[ir.ColumnExpr]
225 data : pd.DataFrame
226 scope : Scope
227 timecontext: Optional[TimeContext]
228 kwargs : dict
229
230 Returns
231 -------
232 computed_predicate : pd.Series[bool]
233
234 Notes
235 -----
236 This handles the cases where the predicates are computed columns, in
237 addition to the simple case of named columns coming directly from the input
238 table.
239 """
240 for predicate in predicates:
241 # Map each root table of the predicate to the data so that we compute
242 # predicates on the result instead of any left or right tables if the
243 # Selection is on a Join. Project data to only inlude columns from
244 # the root table.
245 root_tables = predicate.op().root_tables()
246
247 # handle suffixes
248 data_columns = frozenset(data.columns)
249
250 additional_scope = Scope()
251 for root_table in root_tables:
252 mapping = remap_overlapping_column_names(
253 table_op, root_table, data_columns
254 )
255 if mapping is not None:
256 new_data = data.loc[:, mapping.keys()].rename(columns=mapping)
257 else:
258 new_data = data
259 additional_scope = additional_scope.merge_scope(
260 Scope({root_table: new_data}, timecontext)
261 )
262
263 scope = scope.merge_scope(additional_scope)
264 yield execute(predicate, scope=scope, **kwargs)
265
[end of ibis/backends/dask/execution/selection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ibis/backends/dask/execution/selection.py b/ibis/backends/dask/execution/selection.py
--- a/ibis/backends/dask/execution/selection.py
+++ b/ibis/backends/dask/execution/selection.py
@@ -4,7 +4,7 @@
import functools
import operator
-from typing import Optional
+from typing import List, Optional
import dask.dataframe as dd
import pandas
@@ -47,7 +47,6 @@
parent_table_op = parent.table.op()
data_columns = frozenset(data.columns)
-
scope = scope.merge_scopes(
Scope(
{
@@ -129,6 +128,42 @@
)
+def build_df_from_selection(
+ selections: List[ir.ColumnExpr], data: dd.DataFrame
+) -> dd.DataFrame:
+ """Build up a df by doing direct selections, renaming if necessary."""
+ cols = [
+ (s.op().name, getattr(s, "_name", s.op().name)) for s in selections
+ ]
+ renamed_cols = {
+ col: renamed_col for col, renamed_col in cols if col != renamed_col
+ }
+
+ result = data[[col for col, _ in cols]]
+ if renamed_cols:
+ result = result.rename(columns=renamed_cols)
+
+ return result
+
+
+def build_df_from_projection(
+ selections: List[ir.Expr], op: ops.Selection, data: dd.DataFrame, **kwargs
+) -> dd.DataFrame:
+ """
+ Build up a df from individual pieces by dispatching to `compute_projection`
+ for each expression.
+ """
+
+ # Create a unique row identifier and set it as the index. This is
+ # used in dd.concat to merge the pieces back together.
+ data = add_partitioned_sorted_column(data)
+ data_pieces = [
+ compute_projection(s, op, data, **kwargs) for s in selections
+ ]
+
+ return dd.concat(data_pieces, axis=1).reset_index(drop=True)
+
+
@execute_node.register(ops.Selection, dd.DataFrame)
def execute_selection_dataframe(
op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs
@@ -138,25 +173,22 @@
sort_keys = op.sort_keys
result = data
- # Build up the individual dask structures from column expressions
if selections:
- # Create a unique row identifier and set it as the index. This is used
- # in dd.concat to merge the pieces back together.
- data = add_partitioned_sorted_column(data)
- data_pieces = []
- for selection in selections:
- dask_object = compute_projection(
- selection,
+ # if we are just performing select operations and all columns are in
+ # the table we can do a direct selection
+ if all(isinstance(s.op(), ops.TableColumn) for s in selections) and {
+ s.op().name for s in selections
+ }.issubset(set(result.columns)):
+ result = build_df_from_selection(selections, data)
+ else:
+ result = build_df_from_projection(
+ selections,
op,
data,
scope=scope,
timecontext=timecontext,
**kwargs,
)
- data_pieces.append(dask_object)
-
- result = dd.concat(data_pieces, axis=1)
- result.reset_index(drop=True)
if predicates:
predicates = _compute_predicates(
|
{"golden_diff": "diff --git a/ibis/backends/dask/execution/selection.py b/ibis/backends/dask/execution/selection.py\n--- a/ibis/backends/dask/execution/selection.py\n+++ b/ibis/backends/dask/execution/selection.py\n@@ -4,7 +4,7 @@\n \n import functools\n import operator\n-from typing import Optional\n+from typing import List, Optional\n \n import dask.dataframe as dd\n import pandas\n@@ -47,7 +47,6 @@\n parent_table_op = parent.table.op()\n \n data_columns = frozenset(data.columns)\n-\n scope = scope.merge_scopes(\n Scope(\n {\n@@ -129,6 +128,42 @@\n )\n \n \n+def build_df_from_selection(\n+ selections: List[ir.ColumnExpr], data: dd.DataFrame\n+) -> dd.DataFrame:\n+ \"\"\"Build up a df by doing direct selections, renaming if necessary.\"\"\"\n+ cols = [\n+ (s.op().name, getattr(s, \"_name\", s.op().name)) for s in selections\n+ ]\n+ renamed_cols = {\n+ col: renamed_col for col, renamed_col in cols if col != renamed_col\n+ }\n+\n+ result = data[[col for col, _ in cols]]\n+ if renamed_cols:\n+ result = result.rename(columns=renamed_cols)\n+\n+ return result\n+\n+\n+def build_df_from_projection(\n+ selections: List[ir.Expr], op: ops.Selection, data: dd.DataFrame, **kwargs\n+) -> dd.DataFrame:\n+ \"\"\"\n+ Build up a df from individual pieces by dispatching to `compute_projection`\n+ for each expression.\n+ \"\"\"\n+\n+ # Create a unique row identifier and set it as the index. This is\n+ # used in dd.concat to merge the pieces back together.\n+ data = add_partitioned_sorted_column(data)\n+ data_pieces = [\n+ compute_projection(s, op, data, **kwargs) for s in selections\n+ ]\n+\n+ return dd.concat(data_pieces, axis=1).reset_index(drop=True)\n+\n+\n @execute_node.register(ops.Selection, dd.DataFrame)\n def execute_selection_dataframe(\n op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs\n@@ -138,25 +173,22 @@\n sort_keys = op.sort_keys\n result = data\n \n- # Build up the individual dask structures from column expressions\n if selections:\n- # Create a unique row identifier and set it as the index. This is used\n- # in dd.concat to merge the pieces back together.\n- data = add_partitioned_sorted_column(data)\n- data_pieces = []\n- for selection in selections:\n- dask_object = compute_projection(\n- selection,\n+ # if we are just performing select operations and all columns are in\n+ # the table we can do a direct selection\n+ if all(isinstance(s.op(), ops.TableColumn) for s in selections) and {\n+ s.op().name for s in selections\n+ }.issubset(set(result.columns)):\n+ result = build_df_from_selection(selections, data)\n+ else:\n+ result = build_df_from_projection(\n+ selections,\n op,\n data,\n scope=scope,\n timecontext=timecontext,\n **kwargs,\n )\n- data_pieces.append(dask_object)\n-\n- result = dd.concat(data_pieces, axis=1)\n- result.reset_index(drop=True)\n \n if predicates:\n predicates = _compute_predicates(\n", "issue": "perf: add fast path for simple selections in dask backend\nFor simple selections we don't need to bother with the `dd.concat` here: https://github.com/ibis-project/ibis/blob/master/ibis/backends/dask/execution/selection.py#L154 and should probably select on the data directly \n", "before_files": [{"content": "\"\"\"Dispatching code for Selection operations.\n\"\"\"\n\n\nimport functools\nimport operator\nfrom typing import Optional\n\nimport dask.dataframe as dd\nimport pandas\nfrom toolz import concatv\n\nimport ibis.expr.operations as ops\nimport ibis.expr.types as ir\nfrom ibis.backends.pandas.execution.selection import (\n compute_projection,\n compute_projection_table_expr,\n map_new_column_names_to_data,\n remap_overlapping_column_names,\n)\nfrom ibis.expr.scope import Scope\nfrom ibis.expr.typing import TimeContext\n\nfrom ..core import execute\nfrom ..dispatch import execute_node\nfrom ..execution import constants\nfrom ..execution.util import (\n add_partitioned_sorted_column,\n coerce_to_output,\n compute_sorted_frame,\n)\n\n\n@compute_projection.register(ir.ScalarExpr, ops.Selection, dd.DataFrame)\ndef compute_projection_scalar_expr(\n expr,\n parent,\n data,\n scope: Scope,\n timecontext: Optional[TimeContext] = None,\n **kwargs,\n):\n name = expr._name\n assert name is not None, 'Scalar selection name is None'\n\n op = expr.op()\n parent_table_op = parent.table.op()\n\n data_columns = frozenset(data.columns)\n\n scope = scope.merge_scopes(\n Scope(\n {\n t: map_new_column_names_to_data(\n remap_overlapping_column_names(\n parent_table_op, t, data_columns\n ),\n data,\n )\n },\n timecontext,\n )\n for t in op.root_tables()\n )\n scalar = execute(expr, scope=scope, **kwargs)\n return data.assign(**{name: scalar})[name]\n\n\n@compute_projection.register(ir.ColumnExpr, ops.Selection, dd.DataFrame)\ndef compute_projection_column_expr(\n expr,\n parent,\n data,\n scope: Scope,\n timecontext: Optional[TimeContext],\n **kwargs,\n):\n result_name = getattr(expr, '_name', None)\n op = expr.op()\n parent_table_op = parent.table.op()\n\n if isinstance(op, ops.TableColumn):\n # slightly faster path for simple column selection\n name = op.name\n\n if name in data:\n return data[name].rename(result_name or name)\n\n if not isinstance(parent_table_op, ops.Join):\n raise KeyError(name)\n (root_table,) = op.root_tables()\n left_root, right_root = ops.distinct_roots(\n parent_table_op.left, parent_table_op.right\n )\n suffixes = {\n left_root: constants.LEFT_JOIN_SUFFIX,\n right_root: constants.RIGHT_JOIN_SUFFIX,\n }\n return data.loc[:, name + suffixes[root_table]].rename(\n result_name or name\n )\n\n data_columns = frozenset(data.columns)\n\n scope = scope.merge_scopes(\n Scope(\n {\n t: map_new_column_names_to_data(\n remap_overlapping_column_names(\n parent_table_op, t, data_columns\n ),\n data,\n )\n },\n timecontext,\n )\n for t in op.root_tables()\n )\n\n result = execute(expr, scope=scope, timecontext=timecontext, **kwargs)\n result = coerce_to_output(result, expr, data.index)\n assert result_name is not None, 'Column selection name is None'\n\n return result\n\n\ncompute_projection.register(ir.TableExpr, ops.Selection, dd.DataFrame)(\n compute_projection_table_expr\n)\n\n\n@execute_node.register(ops.Selection, dd.DataFrame)\ndef execute_selection_dataframe(\n op, data, scope: Scope, timecontext: Optional[TimeContext], **kwargs\n):\n selections = op.selections\n predicates = op.predicates\n sort_keys = op.sort_keys\n result = data\n\n # Build up the individual dask structures from column expressions\n if selections:\n # Create a unique row identifier and set it as the index. This is used\n # in dd.concat to merge the pieces back together.\n data = add_partitioned_sorted_column(data)\n data_pieces = []\n for selection in selections:\n dask_object = compute_projection(\n selection,\n op,\n data,\n scope=scope,\n timecontext=timecontext,\n **kwargs,\n )\n data_pieces.append(dask_object)\n\n result = dd.concat(data_pieces, axis=1)\n result.reset_index(drop=True)\n\n if predicates:\n predicates = _compute_predicates(\n op.table.op(), predicates, data, scope, timecontext, **kwargs\n )\n predicate = functools.reduce(operator.and_, predicates)\n result = result.loc[predicate]\n\n if sort_keys:\n if len(sort_keys) > 1:\n raise NotImplementedError(\n \"\"\"\n Multi-key sorting is not implemented for the Dask backend\n \"\"\"\n )\n sort_key = sort_keys[0]\n ascending = getattr(sort_key.op(), 'ascending', True)\n if not ascending:\n raise NotImplementedError(\n \"Descending sort is not supported for the Dask backend\"\n )\n result = compute_sorted_frame(\n result,\n order_by=sort_key,\n scope=scope,\n timecontext=timecontext,\n **kwargs,\n )\n\n return result\n else:\n grouping_keys = ordering_keys = ()\n\n # return early if we do not have any temporary grouping or ordering columns\n assert not grouping_keys, 'group by should never show up in Selection'\n if not ordering_keys:\n return result\n\n # create a sequence of columns that we need to drop\n temporary_columns = pandas.Index(\n concatv(grouping_keys, ordering_keys)\n ).difference(data.columns)\n\n # no reason to call drop if we don't need to\n if temporary_columns.empty:\n return result\n\n # drop every temporary column we created for ordering or grouping\n return result.drop(temporary_columns, axis=1)\n\n\ndef _compute_predicates(\n table_op,\n predicates,\n data,\n scope: Scope,\n timecontext: Optional[TimeContext],\n **kwargs,\n):\n \"\"\"Compute the predicates for a table operation.\n\n Parameters\n ----------\n table_op : TableNode\n predicates : List[ir.ColumnExpr]\n data : pd.DataFrame\n scope : Scope\n timecontext: Optional[TimeContext]\n kwargs : dict\n\n Returns\n -------\n computed_predicate : pd.Series[bool]\n\n Notes\n -----\n This handles the cases where the predicates are computed columns, in\n addition to the simple case of named columns coming directly from the input\n table.\n \"\"\"\n for predicate in predicates:\n # Map each root table of the predicate to the data so that we compute\n # predicates on the result instead of any left or right tables if the\n # Selection is on a Join. Project data to only inlude columns from\n # the root table.\n root_tables = predicate.op().root_tables()\n\n # handle suffixes\n data_columns = frozenset(data.columns)\n\n additional_scope = Scope()\n for root_table in root_tables:\n mapping = remap_overlapping_column_names(\n table_op, root_table, data_columns\n )\n if mapping is not None:\n new_data = data.loc[:, mapping.keys()].rename(columns=mapping)\n else:\n new_data = data\n additional_scope = additional_scope.merge_scope(\n Scope({root_table: new_data}, timecontext)\n )\n\n scope = scope.merge_scope(additional_scope)\n yield execute(predicate, scope=scope, **kwargs)\n", "path": "ibis/backends/dask/execution/selection.py"}]}
| 2,893 | 779 |
gh_patches_debug_1339
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-7520
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kivy.uix.Video._on_eos might be called after unoad.
**Software Versions**
* Python: 3.7
* OS: linux
* Kivy: 2.0.0
* Kivy installation method: pip
**Describe the bug**
When using ffpyplayer based video implementation, it's possible that ``eos`` gets set from frame fetching thread after the video has been unload, which results in an ``AttributeError``, since ``self._video`` gets set to ``None`` in ``kivy.uix.Video.unload``.
**Proposed fix**
Add additional check whether ``self._video`` is set in ``_do_eos`` (https://github.com/kivy/kivy/blob/master/kivy/uix/video.py#L260)
```python
def _on_eos(self, *largs):
if not self._video or self._video.eos != 'loop':
self.state = 'stop'
self.eos = True
```
Any objections? Otherwise i'd create a PR for this.
</issue>
<code>
[start of kivy/uix/video.py]
1 '''
2 Video
3 =====
4
5 The :class:`Video` widget is used to display video files and streams.
6 Depending on your Video core provider, platform, and plugins, you will
7 be able to play different formats. For example, the pygame video
8 provider only supports MPEG1 on Linux and OSX. GStreamer is more
9 versatile, and can read many video containers and codecs such as MKV,
10 OGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our
11 :class:`~kivy.core.video.VideoBase` implementation is used under the
12 hood.
13
14 Video loading is asynchronous - many properties are not available until
15 the video is loaded (when the texture is created)::
16
17 def on_position_change(instance, value):
18 print('The position in the video is', value)
19
20 def on_duration_change(instance, value):
21 print('The duration of the video is', value)
22
23 video = Video(source='PandaSneezes.avi')
24 video.bind(
25 position=on_position_change,
26 duration=on_duration_change
27 )
28
29 One can define a preview image which gets displayed until the video is
30 started/loaded by passing ``preview`` to the constructor::
31
32 video = Video(
33 source='PandaSneezes.avi',
34 preview='PandaSneezes_preview.png'
35 )
36
37 One can display the placeholder image when the video stops by reacting on eos::
38
39 def on_eos_change(self, inst, val):
40 if val and self.preview:
41 self.set_texture_from_resource(self.preview)
42
43 video.bind(eos=on_eos_change)
44 '''
45
46 __all__ = ('Video', )
47
48 from kivy.clock import Clock
49 from kivy.uix.image import Image
50 from kivy.core.video import Video as CoreVideo
51 from kivy.resources import resource_find
52 from kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty,
53 OptionProperty, StringProperty)
54
55
56 class Video(Image):
57 '''Video class. See module documentation for more information.
58 '''
59
60 preview = StringProperty(None, allownone=True)
61 '''Filename / source of a preview image displayed before video starts.
62
63 :attr:`preview` is a :class:`~kivy.properties.StringProperty` and
64 defaults to None.
65
66 If set, it gets displayed until the video is loaded/started.
67
68 .. versionadded:: 2.1.0
69 '''
70
71 state = OptionProperty('stop', options=('play', 'pause', 'stop'))
72 '''String, indicates whether to play, pause, or stop the video::
73
74 # start playing the video at creation
75 video = Video(source='movie.mkv', state='play')
76
77 # create the video, and start later
78 video = Video(source='movie.mkv')
79 # and later
80 video.state = 'play'
81
82 :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
83 to 'stop'.
84 '''
85
86 play = BooleanProperty(False, deprecated=True)
87 '''
88 .. deprecated:: 1.4.0
89 Use :attr:`state` instead.
90
91 Boolean, indicates whether the video is playing or not.
92 You can start/stop the video by setting this property::
93
94 # start playing the video at creation
95 video = Video(source='movie.mkv', play=True)
96
97 # create the video, and start later
98 video = Video(source='movie.mkv')
99 # and later
100 video.play = True
101
102 :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to
103 False.
104
105 .. deprecated:: 1.4.0
106 Use :attr:`state` instead.
107 '''
108
109 eos = BooleanProperty(False)
110 '''Boolean, indicates whether the video has finished playing or not
111 (reached the end of the stream).
112
113 :attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to
114 False.
115 '''
116
117 loaded = BooleanProperty(False)
118 '''Boolean, indicates whether the video is loaded and ready for playback
119 or not.
120
121 .. versionadded:: 1.6.0
122
123 :attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults
124 to False.
125 '''
126
127 position = NumericProperty(-1)
128 '''Position of the video between 0 and :attr:`duration`. The position
129 defaults to -1 and is set to a real position when the video is loaded.
130
131 :attr:`position` is a :class:`~kivy.properties.NumericProperty` and
132 defaults to -1.
133 '''
134
135 duration = NumericProperty(-1)
136 '''Duration of the video. The duration defaults to -1, and is set to a real
137 duration when the video is loaded.
138
139 :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and
140 defaults to -1.
141 '''
142
143 volume = NumericProperty(1.)
144 '''Volume of the video, in the range 0-1. 1 means full volume, 0
145 means mute.
146
147 :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults
148 to 1.
149 '''
150
151 options = ObjectProperty({})
152 '''Options to pass at Video core object creation.
153
154 .. versionadded:: 1.0.4
155
156 :attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults
157 to {}.
158 '''
159
160 _video_load_event = None
161
162 def __init__(self, **kwargs):
163 self._video = None
164 super(Video, self).__init__(**kwargs)
165 self.fbind('source', self._trigger_video_load)
166
167 if "eos" in kwargs:
168 self.options["eos"] = kwargs["eos"]
169 if self.source:
170 self._trigger_video_load()
171
172 def texture_update(self, *largs):
173 if self.preview:
174 self.set_texture_from_resource(self.preview)
175 else:
176 self.set_texture_from_resource(self.source)
177
178 def seek(self, percent, precise=True):
179 '''Change the position to a percentage (strictly, a proportion)
180 of duration.
181
182 :Parameters:
183 `percent`: float or int
184 Position to seek as a proportion of the total duration,
185 must be between 0-1.
186 `precise`: bool, defaults to True
187 Precise seeking is slower, but seeks to exact requested
188 percent.
189
190 .. warning::
191 Calling seek() before the video is loaded has no effect.
192
193 .. versionadded:: 1.2.0
194
195 .. versionchanged:: 1.10.1
196 The `precise` keyword argument has been added.
197 '''
198 if self._video is None:
199 raise Exception('Video not loaded.')
200 self._video.seek(percent, precise=precise)
201
202 def _trigger_video_load(self, *largs):
203 ev = self._video_load_event
204 if ev is None:
205 ev = self._video_load_event = Clock.schedule_once(
206 self._do_video_load, -1)
207 ev()
208
209 def _do_video_load(self, *largs):
210 if CoreVideo is None:
211 return
212 self.unload()
213 if not self.source:
214 self._video = None
215 self.texture = None
216 else:
217 filename = self.source
218 # Check if filename is not url
219 if '://' not in filename:
220 filename = resource_find(filename)
221 self._video = CoreVideo(filename=filename, **self.options)
222 self._video.volume = self.volume
223 self._video.bind(on_load=self._on_load,
224 on_frame=self._on_video_frame,
225 on_eos=self._on_eos)
226 if self.state == 'play' or self.play:
227 self._video.play()
228 self.duration = 1.
229 self.position = 0.
230
231 def on_play(self, instance, value):
232 value = 'play' if value else 'stop'
233 return self.on_state(instance, value)
234
235 def on_state(self, instance, value):
236 if not self._video:
237 return
238 if value == 'play':
239 if self.eos:
240 self._video.stop()
241 self._video.position = 0.
242 self.eos = False
243 self._video.play()
244 elif value == 'pause':
245 self._video.pause()
246 else:
247 self._video.stop()
248 self._video.position = 0
249
250 def _on_video_frame(self, *largs):
251 video = self._video
252 if not video:
253 return
254 self.duration = video.duration
255 self.position = video.position
256 self.texture = video.texture
257 self.canvas.ask_update()
258
259 def _on_eos(self, *largs):
260 if self._video.eos != 'loop':
261 self.state = 'stop'
262 self.eos = True
263
264 def _on_load(self, *largs):
265 self.loaded = True
266 self._on_video_frame(largs)
267
268 def on_volume(self, instance, value):
269 if self._video:
270 self._video.volume = value
271
272 def unload(self):
273 '''Unload the video. The playback will be stopped.
274
275 .. versionadded:: 1.8.0
276 '''
277 if self._video:
278 self._video.stop()
279 self._video.unload()
280 self._video = None
281 self.loaded = False
282
283
284 if __name__ == '__main__':
285 from kivy.app import App
286 import sys
287
288 if len(sys.argv) != 2:
289 print("usage: %s file" % sys.argv[0])
290 sys.exit(1)
291
292 class VideoApp(App):
293 def build(self):
294 self.v = Video(source=sys.argv[1], state='play')
295 self.v.bind(state=self.replay)
296 return self.v
297
298 def replay(self, *args):
299 if self.v.state == 'stop':
300 self.v.state = 'play'
301
302 VideoApp().run()
303
[end of kivy/uix/video.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/uix/video.py b/kivy/uix/video.py
--- a/kivy/uix/video.py
+++ b/kivy/uix/video.py
@@ -257,7 +257,7 @@
self.canvas.ask_update()
def _on_eos(self, *largs):
- if self._video.eos != 'loop':
+ if not self._video or self._video.eos != 'loop':
self.state = 'stop'
self.eos = True
|
{"golden_diff": "diff --git a/kivy/uix/video.py b/kivy/uix/video.py\n--- a/kivy/uix/video.py\n+++ b/kivy/uix/video.py\n@@ -257,7 +257,7 @@\n self.canvas.ask_update()\n \n def _on_eos(self, *largs):\n- if self._video.eos != 'loop':\n+ if not self._video or self._video.eos != 'loop':\n self.state = 'stop'\n self.eos = True\n", "issue": "kivy.uix.Video._on_eos might be called after unoad.\n**Software Versions**\r\n* Python: 3.7\r\n* OS: linux\r\n* Kivy: 2.0.0\r\n* Kivy installation method: pip\r\n\r\n**Describe the bug**\r\nWhen using ffpyplayer based video implementation, it's possible that ``eos`` gets set from frame fetching thread after the video has been unload, which results in an ``AttributeError``, since ``self._video`` gets set to ``None`` in ``kivy.uix.Video.unload``.\r\n\r\n**Proposed fix**\r\nAdd additional check whether ``self._video`` is set in ``_do_eos`` (https://github.com/kivy/kivy/blob/master/kivy/uix/video.py#L260)\r\n\r\n```python\r\n def _on_eos(self, *largs):\r\n if not self._video or self._video.eos != 'loop':\r\n self.state = 'stop'\r\n self.eos = True\r\n```\r\n\r\nAny objections? Otherwise i'd create a PR for this.\n", "before_files": [{"content": "'''\nVideo\n=====\n\nThe :class:`Video` widget is used to display video files and streams.\nDepending on your Video core provider, platform, and plugins, you will\nbe able to play different formats. For example, the pygame video\nprovider only supports MPEG1 on Linux and OSX. GStreamer is more\nversatile, and can read many video containers and codecs such as MKV,\nOGV, AVI, MOV, FLV (if the correct gstreamer plugins are installed). Our\n:class:`~kivy.core.video.VideoBase` implementation is used under the\nhood.\n\nVideo loading is asynchronous - many properties are not available until\nthe video is loaded (when the texture is created)::\n\n def on_position_change(instance, value):\n print('The position in the video is', value)\n\n def on_duration_change(instance, value):\n print('The duration of the video is', value)\n\n video = Video(source='PandaSneezes.avi')\n video.bind(\n position=on_position_change,\n duration=on_duration_change\n )\n\nOne can define a preview image which gets displayed until the video is\nstarted/loaded by passing ``preview`` to the constructor::\n\n video = Video(\n source='PandaSneezes.avi',\n preview='PandaSneezes_preview.png'\n )\n\nOne can display the placeholder image when the video stops by reacting on eos::\n\n def on_eos_change(self, inst, val):\n if val and self.preview:\n self.set_texture_from_resource(self.preview)\n\n video.bind(eos=on_eos_change)\n'''\n\n__all__ = ('Video', )\n\nfrom kivy.clock import Clock\nfrom kivy.uix.image import Image\nfrom kivy.core.video import Video as CoreVideo\nfrom kivy.resources import resource_find\nfrom kivy.properties import (BooleanProperty, NumericProperty, ObjectProperty,\n OptionProperty, StringProperty)\n\n\nclass Video(Image):\n '''Video class. See module documentation for more information.\n '''\n\n preview = StringProperty(None, allownone=True)\n '''Filename / source of a preview image displayed before video starts.\n\n :attr:`preview` is a :class:`~kivy.properties.StringProperty` and\n defaults to None.\n\n If set, it gets displayed until the video is loaded/started.\n\n .. versionadded:: 2.1.0\n '''\n\n state = OptionProperty('stop', options=('play', 'pause', 'stop'))\n '''String, indicates whether to play, pause, or stop the video::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', state='play')\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.state = 'play'\n\n :attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults\n to 'stop'.\n '''\n\n play = BooleanProperty(False, deprecated=True)\n '''\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n\n Boolean, indicates whether the video is playing or not.\n You can start/stop the video by setting this property::\n\n # start playing the video at creation\n video = Video(source='movie.mkv', play=True)\n\n # create the video, and start later\n video = Video(source='movie.mkv')\n # and later\n video.play = True\n\n :attr:`play` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n\n .. deprecated:: 1.4.0\n Use :attr:`state` instead.\n '''\n\n eos = BooleanProperty(False)\n '''Boolean, indicates whether the video has finished playing or not\n (reached the end of the stream).\n\n :attr:`eos` is a :class:`~kivy.properties.BooleanProperty` and defaults to\n False.\n '''\n\n loaded = BooleanProperty(False)\n '''Boolean, indicates whether the video is loaded and ready for playback\n or not.\n\n .. versionadded:: 1.6.0\n\n :attr:`loaded` is a :class:`~kivy.properties.BooleanProperty` and defaults\n to False.\n '''\n\n position = NumericProperty(-1)\n '''Position of the video between 0 and :attr:`duration`. The position\n defaults to -1 and is set to a real position when the video is loaded.\n\n :attr:`position` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n duration = NumericProperty(-1)\n '''Duration of the video. The duration defaults to -1, and is set to a real\n duration when the video is loaded.\n\n :attr:`duration` is a :class:`~kivy.properties.NumericProperty` and\n defaults to -1.\n '''\n\n volume = NumericProperty(1.)\n '''Volume of the video, in the range 0-1. 1 means full volume, 0\n means mute.\n\n :attr:`volume` is a :class:`~kivy.properties.NumericProperty` and defaults\n to 1.\n '''\n\n options = ObjectProperty({})\n '''Options to pass at Video core object creation.\n\n .. versionadded:: 1.0.4\n\n :attr:`options` is an :class:`kivy.properties.ObjectProperty` and defaults\n to {}.\n '''\n\n _video_load_event = None\n\n def __init__(self, **kwargs):\n self._video = None\n super(Video, self).__init__(**kwargs)\n self.fbind('source', self._trigger_video_load)\n\n if \"eos\" in kwargs:\n self.options[\"eos\"] = kwargs[\"eos\"]\n if self.source:\n self._trigger_video_load()\n\n def texture_update(self, *largs):\n if self.preview:\n self.set_texture_from_resource(self.preview)\n else:\n self.set_texture_from_resource(self.source)\n\n def seek(self, percent, precise=True):\n '''Change the position to a percentage (strictly, a proportion)\n of duration.\n\n :Parameters:\n `percent`: float or int\n Position to seek as a proportion of the total duration,\n must be between 0-1.\n `precise`: bool, defaults to True\n Precise seeking is slower, but seeks to exact requested\n percent.\n\n .. warning::\n Calling seek() before the video is loaded has no effect.\n\n .. versionadded:: 1.2.0\n\n .. versionchanged:: 1.10.1\n The `precise` keyword argument has been added.\n '''\n if self._video is None:\n raise Exception('Video not loaded.')\n self._video.seek(percent, precise=precise)\n\n def _trigger_video_load(self, *largs):\n ev = self._video_load_event\n if ev is None:\n ev = self._video_load_event = Clock.schedule_once(\n self._do_video_load, -1)\n ev()\n\n def _do_video_load(self, *largs):\n if CoreVideo is None:\n return\n self.unload()\n if not self.source:\n self._video = None\n self.texture = None\n else:\n filename = self.source\n # Check if filename is not url\n if '://' not in filename:\n filename = resource_find(filename)\n self._video = CoreVideo(filename=filename, **self.options)\n self._video.volume = self.volume\n self._video.bind(on_load=self._on_load,\n on_frame=self._on_video_frame,\n on_eos=self._on_eos)\n if self.state == 'play' or self.play:\n self._video.play()\n self.duration = 1.\n self.position = 0.\n\n def on_play(self, instance, value):\n value = 'play' if value else 'stop'\n return self.on_state(instance, value)\n\n def on_state(self, instance, value):\n if not self._video:\n return\n if value == 'play':\n if self.eos:\n self._video.stop()\n self._video.position = 0.\n self.eos = False\n self._video.play()\n elif value == 'pause':\n self._video.pause()\n else:\n self._video.stop()\n self._video.position = 0\n\n def _on_video_frame(self, *largs):\n video = self._video\n if not video:\n return\n self.duration = video.duration\n self.position = video.position\n self.texture = video.texture\n self.canvas.ask_update()\n\n def _on_eos(self, *largs):\n if self._video.eos != 'loop':\n self.state = 'stop'\n self.eos = True\n\n def _on_load(self, *largs):\n self.loaded = True\n self._on_video_frame(largs)\n\n def on_volume(self, instance, value):\n if self._video:\n self._video.volume = value\n\n def unload(self):\n '''Unload the video. The playback will be stopped.\n\n .. versionadded:: 1.8.0\n '''\n if self._video:\n self._video.stop()\n self._video.unload()\n self._video = None\n self.loaded = False\n\n\nif __name__ == '__main__':\n from kivy.app import App\n import sys\n\n if len(sys.argv) != 2:\n print(\"usage: %s file\" % sys.argv[0])\n sys.exit(1)\n\n class VideoApp(App):\n def build(self):\n self.v = Video(source=sys.argv[1], state='play')\n self.v.bind(state=self.replay)\n return self.v\n\n def replay(self, *args):\n if self.v.state == 'stop':\n self.v.state = 'play'\n\n VideoApp().run()\n", "path": "kivy/uix/video.py"}]}
| 3,728 | 112 |
gh_patches_debug_12409
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-7262
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Unable to `python_requires_extend` a package with a '.' in the name
### Environment Details (include every applicable attribute)
* Operating System+version: Windows 10
* Conan version: 1.25.1
* Python version: 3.8.1
### Steps to reproduce (Include if Applicable)
- Create a package with a '.' in the name (E.g. my.package/1.2.3)
- Create a recipe for another package with
```
python_requires = 'my.package/1.2.3'
python_requires_extend = 'my.package.MyPackage'
```
- Receive the error ERROR: too many values to unpack (expected 2)
</issue>
<code>
[start of conans/client/graph/python_requires.py]
1 import os
2 from collections import namedtuple
3 from contextlib import contextmanager
4
5 from conans.client.loader import parse_conanfile
6 from conans.client.recorder.action_recorder import ActionRecorder
7 from conans.errors import ConanException, NotFoundException
8 from conans.model.ref import ConanFileReference
9 from conans.model.requires import Requirement
10 from conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR
11 from conans.util.conan_v2_mode import conan_v2_behavior
12
13 PythonRequire = namedtuple("PythonRequire", ["ref", "module", "conanfile",
14 "exports_folder", "exports_sources_folder"])
15
16
17 class PyRequire(object):
18 def __init__(self, module, conanfile, ref, path):
19 self.module = module
20 self.conanfile = conanfile
21 self.ref = ref
22 self.path = path
23
24
25 class PyRequires(object):
26 """ this is the object that replaces the declared conanfile.py_requires"""
27 def __init__(self):
28 self._pyrequires = {} # {pkg-name: PythonRequire}
29 self._transitive = {}
30
31 def update_transitive(self, conanfile):
32 transitive = getattr(conanfile, "python_requires", None)
33 if not transitive:
34 return
35 for name, transitive_py_require in transitive.all_items():
36 existing = self._pyrequires.get(name)
37 if existing and existing.ref != transitive_py_require.ref:
38 raise ConanException("Conflict in py_requires %s - %s"
39 % (existing.ref, transitive_py_require.ref))
40 self._transitive[name] = transitive_py_require
41
42 def all_items(self):
43 new_dict = self._pyrequires.copy()
44 new_dict.update(self._transitive)
45 return new_dict.items()
46
47 def all_refs(self):
48 return ([r.ref for r in self._pyrequires.values()] +
49 [r.ref for r in self._transitive.values()])
50
51 def items(self):
52 return self._pyrequires.items()
53
54 def __getitem__(self, item):
55 try:
56 return self._pyrequires[item]
57 except KeyError:
58 raise ConanException("'%s' is not a python_require" % item)
59
60 def __setitem__(self, key, value):
61 # single item assignment, direct
62 existing = self._pyrequires.get(key)
63 if existing:
64 raise ConanException("The python_require '%s' already exists" % key)
65 self._pyrequires[key] = value
66
67
68 class PyRequireLoader(object):
69 def __init__(self, proxy, range_resolver):
70 self._proxy = proxy
71 self._range_resolver = range_resolver
72 self._cached_py_requires = {}
73
74 def enable_remotes(self, check_updates=False, update=False, remotes=None):
75 self._check_updates = check_updates
76 self._update = update
77 self._remotes = remotes
78
79 @contextmanager
80 def capture_requires(self):
81 # DO nothing, just to stay compatible with the interface of python_requires
82 yield []
83
84 def load_py_requires(self, conanfile, lock_python_requires, loader):
85 if not hasattr(conanfile, "python_requires") or isinstance(conanfile.python_requires, dict):
86 return
87 py_requires_refs = conanfile.python_requires
88 if isinstance(py_requires_refs, str):
89 py_requires_refs = [py_requires_refs, ]
90
91 py_requires = self._resolve_py_requires(py_requires_refs, lock_python_requires, loader)
92 if hasattr(conanfile, "python_requires_extend"):
93 py_requires_extend = conanfile.python_requires_extend
94 if isinstance(py_requires_extend, str):
95 py_requires_extend = [py_requires_extend, ]
96 for p in py_requires_extend:
97 pkg_name, base_class_name = p.split(".")
98 base_class = getattr(py_requires[pkg_name].module, base_class_name)
99 conanfile.__bases__ = (base_class,) + conanfile.__bases__
100 conanfile.python_requires = py_requires
101
102 def _resolve_py_requires(self, py_requires_refs, lock_python_requires, loader):
103 result = PyRequires()
104 for py_requires_ref in py_requires_refs:
105 py_requires_ref = self._resolve_ref(py_requires_ref, lock_python_requires)
106 try:
107 py_require = self._cached_py_requires[py_requires_ref]
108 except KeyError:
109 conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,
110 lock_python_requires,
111 py_requires_ref)
112 py_require = PyRequire(module, conanfile, new_ref, path)
113 self._cached_py_requires[py_requires_ref] = py_require
114 result[py_require.ref.name] = py_require
115 # Update transitive and check conflicts
116 result.update_transitive(py_require.conanfile)
117 return result
118
119 def _resolve_ref(self, py_requires_ref, lock_python_requires):
120 ref = ConanFileReference.loads(py_requires_ref)
121 if lock_python_requires:
122 locked = {r.name: r for r in lock_python_requires}[ref.name]
123 ref = locked
124 else:
125 requirement = Requirement(ref)
126 self._range_resolver.resolve(requirement, "py_require", update=self._update,
127 remotes=self._remotes)
128 ref = requirement.ref
129 return ref
130
131 def _load_pyreq_conanfile(self, loader, lock_python_requires, ref):
132 recipe = self._proxy.get_recipe(ref, self._check_updates, self._update,
133 remotes=self._remotes, recorder=ActionRecorder())
134 path, _, _, new_ref = recipe
135 conanfile, module = loader.load_basic_module(path, lock_python_requires, user=new_ref.user,
136 channel=new_ref.channel)
137 conanfile.name = new_ref.name
138 conanfile.version = str(new_ref.version) \
139 if os.environ.get(CONAN_V2_MODE_ENVVAR, False) else new_ref.version
140
141 if getattr(conanfile, "alias", None):
142 ref = ConanFileReference.loads(conanfile.alias)
143 conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,
144 lock_python_requires,
145 ref)
146 return conanfile, module, new_ref, os.path.dirname(path)
147
148
149 class ConanPythonRequire(object):
150 def __init__(self, proxy, range_resolver):
151 self._cached_requires = {} # {reference: PythonRequire}
152 self._proxy = proxy
153 self._range_resolver = range_resolver
154 self._requires = None
155 self.valid = True
156 self._check_updates = False
157 self._update = False
158 self._remote_name = None
159 self.locked_versions = None
160
161 def enable_remotes(self, check_updates=False, update=False, remotes=None):
162 self._check_updates = check_updates
163 self._update = update
164 self._remotes = remotes
165
166 @contextmanager
167 def capture_requires(self):
168 old_requires = self._requires
169 self._requires = []
170 yield self._requires
171 self._requires = old_requires
172
173 def _look_for_require(self, reference):
174 ref = ConanFileReference.loads(reference)
175 ref = self.locked_versions[ref.name] if self.locked_versions is not None else ref
176 try:
177 python_require = self._cached_requires[ref]
178 except KeyError:
179 requirement = Requirement(ref)
180 self._range_resolver.resolve(requirement, "python_require", update=self._update,
181 remotes=self._remotes)
182 ref = requirement.ref
183 result = self._proxy.get_recipe(ref, self._check_updates, self._update,
184 remotes=self._remotes,
185 recorder=ActionRecorder())
186 path, _, _, new_ref = result
187 module, conanfile = parse_conanfile(conanfile_path=path, python_requires=self)
188
189 # Check for alias
190 if getattr(conanfile, "alias", None):
191 # Will register also the aliased
192 python_require = self._look_for_require(conanfile.alias)
193 else:
194 package_layout = self._proxy._cache.package_layout(new_ref, conanfile.short_paths)
195 exports_sources_folder = package_layout.export_sources()
196 exports_folder = package_layout.export()
197 python_require = PythonRequire(new_ref, module, conanfile,
198 exports_folder, exports_sources_folder)
199 self._cached_requires[ref] = python_require
200
201 return python_require
202
203 def __call__(self, reference):
204 conan_v2_behavior("Old syntax for python_requires is deprecated")
205 if not self.valid:
206 raise ConanException("Invalid use of python_requires(%s)" % reference)
207 try:
208 python_req = self._look_for_require(reference)
209 self._requires.append(python_req)
210 return python_req.module
211 except NotFoundException:
212 raise ConanException('Unable to find python_requires("{}") in remotes'.format(reference))
213
[end of conans/client/graph/python_requires.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conans/client/graph/python_requires.py b/conans/client/graph/python_requires.py
--- a/conans/client/graph/python_requires.py
+++ b/conans/client/graph/python_requires.py
@@ -94,7 +94,7 @@
if isinstance(py_requires_extend, str):
py_requires_extend = [py_requires_extend, ]
for p in py_requires_extend:
- pkg_name, base_class_name = p.split(".")
+ pkg_name, base_class_name = p.rsplit(".", 1)
base_class = getattr(py_requires[pkg_name].module, base_class_name)
conanfile.__bases__ = (base_class,) + conanfile.__bases__
conanfile.python_requires = py_requires
|
{"golden_diff": "diff --git a/conans/client/graph/python_requires.py b/conans/client/graph/python_requires.py\n--- a/conans/client/graph/python_requires.py\n+++ b/conans/client/graph/python_requires.py\n@@ -94,7 +94,7 @@\n if isinstance(py_requires_extend, str):\n py_requires_extend = [py_requires_extend, ]\n for p in py_requires_extend:\n- pkg_name, base_class_name = p.split(\".\")\n+ pkg_name, base_class_name = p.rsplit(\".\", 1)\n base_class = getattr(py_requires[pkg_name].module, base_class_name)\n conanfile.__bases__ = (base_class,) + conanfile.__bases__\n conanfile.python_requires = py_requires\n", "issue": "[bug] Unable to `python_requires_extend` a package with a '.' in the name\n### Environment Details (include every applicable attribute)\r\n * Operating System+version: Windows 10\r\n * Conan version: 1.25.1\r\n * Python version: 3.8.1\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n- Create a package with a '.' in the name (E.g. my.package/1.2.3)\r\n- Create a recipe for another package with \r\n```\r\npython_requires = 'my.package/1.2.3'\r\npython_requires_extend = 'my.package.MyPackage'\r\n```\r\n\r\n- Receive the error ERROR: too many values to unpack (expected 2)\r\n\n", "before_files": [{"content": "import os\nfrom collections import namedtuple\nfrom contextlib import contextmanager\n\nfrom conans.client.loader import parse_conanfile\nfrom conans.client.recorder.action_recorder import ActionRecorder\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.model.ref import ConanFileReference\nfrom conans.model.requires import Requirement\nfrom conans.util.conan_v2_mode import CONAN_V2_MODE_ENVVAR\nfrom conans.util.conan_v2_mode import conan_v2_behavior\n\nPythonRequire = namedtuple(\"PythonRequire\", [\"ref\", \"module\", \"conanfile\",\n \"exports_folder\", \"exports_sources_folder\"])\n\n\nclass PyRequire(object):\n def __init__(self, module, conanfile, ref, path):\n self.module = module\n self.conanfile = conanfile\n self.ref = ref\n self.path = path\n\n\nclass PyRequires(object):\n \"\"\" this is the object that replaces the declared conanfile.py_requires\"\"\"\n def __init__(self):\n self._pyrequires = {} # {pkg-name: PythonRequire}\n self._transitive = {}\n\n def update_transitive(self, conanfile):\n transitive = getattr(conanfile, \"python_requires\", None)\n if not transitive:\n return\n for name, transitive_py_require in transitive.all_items():\n existing = self._pyrequires.get(name)\n if existing and existing.ref != transitive_py_require.ref:\n raise ConanException(\"Conflict in py_requires %s - %s\"\n % (existing.ref, transitive_py_require.ref))\n self._transitive[name] = transitive_py_require\n\n def all_items(self):\n new_dict = self._pyrequires.copy()\n new_dict.update(self._transitive)\n return new_dict.items()\n\n def all_refs(self):\n return ([r.ref for r in self._pyrequires.values()] +\n [r.ref for r in self._transitive.values()])\n\n def items(self):\n return self._pyrequires.items()\n\n def __getitem__(self, item):\n try:\n return self._pyrequires[item]\n except KeyError:\n raise ConanException(\"'%s' is not a python_require\" % item)\n\n def __setitem__(self, key, value):\n # single item assignment, direct\n existing = self._pyrequires.get(key)\n if existing:\n raise ConanException(\"The python_require '%s' already exists\" % key)\n self._pyrequires[key] = value\n\n\nclass PyRequireLoader(object):\n def __init__(self, proxy, range_resolver):\n self._proxy = proxy\n self._range_resolver = range_resolver\n self._cached_py_requires = {}\n\n def enable_remotes(self, check_updates=False, update=False, remotes=None):\n self._check_updates = check_updates\n self._update = update\n self._remotes = remotes\n\n @contextmanager\n def capture_requires(self):\n # DO nothing, just to stay compatible with the interface of python_requires\n yield []\n\n def load_py_requires(self, conanfile, lock_python_requires, loader):\n if not hasattr(conanfile, \"python_requires\") or isinstance(conanfile.python_requires, dict):\n return\n py_requires_refs = conanfile.python_requires\n if isinstance(py_requires_refs, str):\n py_requires_refs = [py_requires_refs, ]\n\n py_requires = self._resolve_py_requires(py_requires_refs, lock_python_requires, loader)\n if hasattr(conanfile, \"python_requires_extend\"):\n py_requires_extend = conanfile.python_requires_extend\n if isinstance(py_requires_extend, str):\n py_requires_extend = [py_requires_extend, ]\n for p in py_requires_extend:\n pkg_name, base_class_name = p.split(\".\")\n base_class = getattr(py_requires[pkg_name].module, base_class_name)\n conanfile.__bases__ = (base_class,) + conanfile.__bases__\n conanfile.python_requires = py_requires\n\n def _resolve_py_requires(self, py_requires_refs, lock_python_requires, loader):\n result = PyRequires()\n for py_requires_ref in py_requires_refs:\n py_requires_ref = self._resolve_ref(py_requires_ref, lock_python_requires)\n try:\n py_require = self._cached_py_requires[py_requires_ref]\n except KeyError:\n conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,\n lock_python_requires,\n py_requires_ref)\n py_require = PyRequire(module, conanfile, new_ref, path)\n self._cached_py_requires[py_requires_ref] = py_require\n result[py_require.ref.name] = py_require\n # Update transitive and check conflicts\n result.update_transitive(py_require.conanfile)\n return result\n\n def _resolve_ref(self, py_requires_ref, lock_python_requires):\n ref = ConanFileReference.loads(py_requires_ref)\n if lock_python_requires:\n locked = {r.name: r for r in lock_python_requires}[ref.name]\n ref = locked\n else:\n requirement = Requirement(ref)\n self._range_resolver.resolve(requirement, \"py_require\", update=self._update,\n remotes=self._remotes)\n ref = requirement.ref\n return ref\n\n def _load_pyreq_conanfile(self, loader, lock_python_requires, ref):\n recipe = self._proxy.get_recipe(ref, self._check_updates, self._update,\n remotes=self._remotes, recorder=ActionRecorder())\n path, _, _, new_ref = recipe\n conanfile, module = loader.load_basic_module(path, lock_python_requires, user=new_ref.user,\n channel=new_ref.channel)\n conanfile.name = new_ref.name\n conanfile.version = str(new_ref.version) \\\n if os.environ.get(CONAN_V2_MODE_ENVVAR, False) else new_ref.version\n\n if getattr(conanfile, \"alias\", None):\n ref = ConanFileReference.loads(conanfile.alias)\n conanfile, module, new_ref, path = self._load_pyreq_conanfile(loader,\n lock_python_requires,\n ref)\n return conanfile, module, new_ref, os.path.dirname(path)\n\n\nclass ConanPythonRequire(object):\n def __init__(self, proxy, range_resolver):\n self._cached_requires = {} # {reference: PythonRequire}\n self._proxy = proxy\n self._range_resolver = range_resolver\n self._requires = None\n self.valid = True\n self._check_updates = False\n self._update = False\n self._remote_name = None\n self.locked_versions = None\n\n def enable_remotes(self, check_updates=False, update=False, remotes=None):\n self._check_updates = check_updates\n self._update = update\n self._remotes = remotes\n\n @contextmanager\n def capture_requires(self):\n old_requires = self._requires\n self._requires = []\n yield self._requires\n self._requires = old_requires\n\n def _look_for_require(self, reference):\n ref = ConanFileReference.loads(reference)\n ref = self.locked_versions[ref.name] if self.locked_versions is not None else ref\n try:\n python_require = self._cached_requires[ref]\n except KeyError:\n requirement = Requirement(ref)\n self._range_resolver.resolve(requirement, \"python_require\", update=self._update,\n remotes=self._remotes)\n ref = requirement.ref\n result = self._proxy.get_recipe(ref, self._check_updates, self._update,\n remotes=self._remotes,\n recorder=ActionRecorder())\n path, _, _, new_ref = result\n module, conanfile = parse_conanfile(conanfile_path=path, python_requires=self)\n\n # Check for alias\n if getattr(conanfile, \"alias\", None):\n # Will register also the aliased\n python_require = self._look_for_require(conanfile.alias)\n else:\n package_layout = self._proxy._cache.package_layout(new_ref, conanfile.short_paths)\n exports_sources_folder = package_layout.export_sources()\n exports_folder = package_layout.export()\n python_require = PythonRequire(new_ref, module, conanfile,\n exports_folder, exports_sources_folder)\n self._cached_requires[ref] = python_require\n\n return python_require\n\n def __call__(self, reference):\n conan_v2_behavior(\"Old syntax for python_requires is deprecated\")\n if not self.valid:\n raise ConanException(\"Invalid use of python_requires(%s)\" % reference)\n try:\n python_req = self._look_for_require(reference)\n self._requires.append(python_req)\n return python_req.module\n except NotFoundException:\n raise ConanException('Unable to find python_requires(\"{}\") in remotes'.format(reference))\n", "path": "conans/client/graph/python_requires.py"}]}
| 3,121 | 155 |
gh_patches_debug_21630
|
rasdani/github-patches
|
git_diff
|
ipython__ipython-4599
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Qtconsole docstring pop-up fails on method containing defaulted enum argument
[We've found](http://trac.mantidproject.org/mantid/ticket/8422) that an error is generated in the (admittedly rare) situation where a C++ enum is exposed to Python via boost python and included in a python function as a default to an argument. This is seen in IPython 1.1 and the current tip of master.
Here's the simplest example I could come up with, though it does still require compiling C++ and linking to boost python. In a C++ file:
``` c++
#include <boost/python.hpp>
enum MyEnum
{
Red,
Yellow,
Blue
};
BOOST_PYTHON_MODULE(enum_test)
{
using namespace boost::python;
enum_<MyEnum>("MyEnum")
.value("Red", Red)
.value("Yellow", Yellow)
.value("Blue", Blue);
}
```
This should be compiled to a shared library with something like `gcc -fPIC -I /usr/include/python2.6 -shared -o enum_test.so enum_test.cpp -lboost_python`
Then, in the IPython qtconsole enter:
```
In [1]: import enum_test
In [2]: def MyFunc(color = enum_test.MyEnum.Red):
...: pass
...:
In [3]: MyFunc(
```
On typing the opening parenthesis a stack trace will appear that culminates in:
```
File "/usr/lib/python2.6/site-packages/IPython/kernel/zmq/session.py", line 83, in <lambda>
json_unpacker = lambda s: extract_dates(jsonapi.loads(s))
File "/usr/lib64/python2.6/site-packages/zmq/utils/jsonapi.py", line 81, in loads
return jsonmod.loads(s, **kwargs)
File "/usr/lib64/python2.6/site-packages/simplejson/__init__.py", line 307, in loads
return _default_decoder.decode(s)
File "/usr/lib64/python2.6/site-packages/simplejson/decoder.py", line 335, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "/usr/lib64/python2.6/site-packages/simplejson/decoder.py", line 353, in raw_decode
raise ValueError("No JSON object could be decoded")
```
The string that's going into the raw_decode function of decoder.py and leads to the exception is:
```
{"base_class":"<type 'function'>","init_definition":null,"type_name":"function","name":"MyFunc","isclass":null,"namespace":"Interactive","isalias":false,"init_docstring":null,"argspec":{"args":["color"],"varkw":null,"defaults":[Red],"varargs":null},"source":null,"length":null,"call_def":null,"call_docstring":null,"file":"/home/enumproblem/<ipython-input-2-b6e10dea3e06>","string_form":"<function MyFunc at 0x16a8de8>","found":true,"class_docstring":null,"definition":"\u001b[0mMyFunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcolor\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0menum_test\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMyEnum\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mRed\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n","docstring":"<no docstring>","ismagic":false}
```
</issue>
<code>
[start of IPython/utils/jsonutil.py]
1 """Utilities to manipulate JSON objects.
2 """
3 #-----------------------------------------------------------------------------
4 # Copyright (C) 2010-2011 The IPython Development Team
5 #
6 # Distributed under the terms of the BSD License. The full license is in
7 # the file COPYING.txt, distributed as part of this software.
8 #-----------------------------------------------------------------------------
9
10 #-----------------------------------------------------------------------------
11 # Imports
12 #-----------------------------------------------------------------------------
13 # stdlib
14 import math
15 import re
16 import types
17 from datetime import datetime
18
19 try:
20 # base64.encodestring is deprecated in Python 3.x
21 from base64 import encodebytes
22 except ImportError:
23 # Python 2.x
24 from base64 import encodestring as encodebytes
25
26 from IPython.utils import py3compat
27 from IPython.utils.py3compat import string_types, unicode_type, iteritems
28 from IPython.utils.encoding import DEFAULT_ENCODING
29 next_attr_name = '__next__' if py3compat.PY3 else 'next'
30
31 #-----------------------------------------------------------------------------
32 # Globals and constants
33 #-----------------------------------------------------------------------------
34
35 # timestamp formats
36 ISO8601 = "%Y-%m-%dT%H:%M:%S.%f"
37 ISO8601_PAT=re.compile(r"^(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{1,6})Z?([\+\-]\d{2}:?\d{2})?$")
38
39 #-----------------------------------------------------------------------------
40 # Classes and functions
41 #-----------------------------------------------------------------------------
42
43 def rekey(dikt):
44 """Rekey a dict that has been forced to use str keys where there should be
45 ints by json."""
46 for k in dikt:
47 if isinstance(k, string_types):
48 ik=fk=None
49 try:
50 ik = int(k)
51 except ValueError:
52 try:
53 fk = float(k)
54 except ValueError:
55 continue
56 if ik is not None:
57 nk = ik
58 else:
59 nk = fk
60 if nk in dikt:
61 raise KeyError("already have key %r"%nk)
62 dikt[nk] = dikt.pop(k)
63 return dikt
64
65 def parse_date(s):
66 """parse an ISO8601 date string
67
68 If it is None or not a valid ISO8601 timestamp,
69 it will be returned unmodified.
70 Otherwise, it will return a datetime object.
71 """
72 if s is None:
73 return s
74 m = ISO8601_PAT.match(s)
75 if m:
76 # FIXME: add actual timezone support
77 # this just drops the timezone info
78 notz = m.groups()[0]
79 return datetime.strptime(notz, ISO8601)
80 return s
81
82 def extract_dates(obj):
83 """extract ISO8601 dates from unpacked JSON"""
84 if isinstance(obj, dict):
85 new_obj = {} # don't clobber
86 for k,v in iteritems(obj):
87 new_obj[k] = extract_dates(v)
88 obj = new_obj
89 elif isinstance(obj, (list, tuple)):
90 obj = [ extract_dates(o) for o in obj ]
91 elif isinstance(obj, string_types):
92 obj = parse_date(obj)
93 return obj
94
95 def squash_dates(obj):
96 """squash datetime objects into ISO8601 strings"""
97 if isinstance(obj, dict):
98 obj = dict(obj) # don't clobber
99 for k,v in iteritems(obj):
100 obj[k] = squash_dates(v)
101 elif isinstance(obj, (list, tuple)):
102 obj = [ squash_dates(o) for o in obj ]
103 elif isinstance(obj, datetime):
104 obj = obj.isoformat()
105 return obj
106
107 def date_default(obj):
108 """default function for packing datetime objects in JSON."""
109 if isinstance(obj, datetime):
110 return obj.isoformat()
111 else:
112 raise TypeError("%r is not JSON serializable"%obj)
113
114
115 # constants for identifying png/jpeg data
116 PNG = b'\x89PNG\r\n\x1a\n'
117 # front of PNG base64-encoded
118 PNG64 = b'iVBORw0KG'
119 JPEG = b'\xff\xd8'
120 # front of JPEG base64-encoded
121 JPEG64 = b'/9'
122
123 def encode_images(format_dict):
124 """b64-encodes images in a displaypub format dict
125
126 Perhaps this should be handled in json_clean itself?
127
128 Parameters
129 ----------
130
131 format_dict : dict
132 A dictionary of display data keyed by mime-type
133
134 Returns
135 -------
136
137 format_dict : dict
138 A copy of the same dictionary,
139 but binary image data ('image/png' or 'image/jpeg')
140 is base64-encoded.
141
142 """
143 encoded = format_dict.copy()
144
145 pngdata = format_dict.get('image/png')
146 if isinstance(pngdata, bytes):
147 # make sure we don't double-encode
148 if not pngdata.startswith(PNG64):
149 pngdata = encodebytes(pngdata)
150 encoded['image/png'] = pngdata.decode('ascii')
151
152 jpegdata = format_dict.get('image/jpeg')
153 if isinstance(jpegdata, bytes):
154 # make sure we don't double-encode
155 if not jpegdata.startswith(JPEG64):
156 jpegdata = encodebytes(jpegdata)
157 encoded['image/jpeg'] = jpegdata.decode('ascii')
158
159 return encoded
160
161
162 def json_clean(obj):
163 """Clean an object to ensure it's safe to encode in JSON.
164
165 Atomic, immutable objects are returned unmodified. Sets and tuples are
166 converted to lists, lists are copied and dicts are also copied.
167
168 Note: dicts whose keys could cause collisions upon encoding (such as a dict
169 with both the number 1 and the string '1' as keys) will cause a ValueError
170 to be raised.
171
172 Parameters
173 ----------
174 obj : any python object
175
176 Returns
177 -------
178 out : object
179
180 A version of the input which will not cause an encoding error when
181 encoded as JSON. Note that this function does not *encode* its inputs,
182 it simply sanitizes it so that there will be no encoding errors later.
183
184 Examples
185 --------
186 >>> json_clean(4)
187 4
188 >>> json_clean(list(range(10)))
189 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
190 >>> sorted(json_clean(dict(x=1, y=2)).items())
191 [('x', 1), ('y', 2)]
192 >>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items())
193 [('x', 1), ('y', 2), ('z', [1, 2, 3])]
194 >>> json_clean(True)
195 True
196 """
197 # types that are 'atomic' and ok in json as-is. bool doesn't need to be
198 # listed explicitly because bools pass as int instances
199 atomic_ok = (unicode_type, int, type(None))
200
201 # containers that we need to convert into lists
202 container_to_list = (tuple, set, types.GeneratorType)
203
204 if isinstance(obj, float):
205 # cast out-of-range floats to their reprs
206 if math.isnan(obj) or math.isinf(obj):
207 return repr(obj)
208 return obj
209
210 if isinstance(obj, atomic_ok):
211 return obj
212
213 if isinstance(obj, bytes):
214 return obj.decode(DEFAULT_ENCODING, 'replace')
215
216 if isinstance(obj, container_to_list) or (
217 hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):
218 obj = list(obj)
219
220 if isinstance(obj, list):
221 return [json_clean(x) for x in obj]
222
223 if isinstance(obj, dict):
224 # First, validate that the dict won't lose data in conversion due to
225 # key collisions after stringification. This can happen with keys like
226 # True and 'true' or 1 and '1', which collide in JSON.
227 nkeys = len(obj)
228 nkeys_collapsed = len(set(map(str, obj)))
229 if nkeys != nkeys_collapsed:
230 raise ValueError('dict can not be safely converted to JSON: '
231 'key collision would lead to dropped values')
232 # If all OK, proceed by making the new dict that will be json-safe
233 out = {}
234 for k,v in iteritems(obj):
235 out[str(k)] = json_clean(v)
236 return out
237
238 # If we get here, we don't know how to handle the object, so we just get
239 # its repr and return that. This will catch lambdas, open sockets, class
240 # objects, and any other complicated contraption that json can't encode
241 return repr(obj)
242
[end of IPython/utils/jsonutil.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/IPython/utils/jsonutil.py b/IPython/utils/jsonutil.py
--- a/IPython/utils/jsonutil.py
+++ b/IPython/utils/jsonutil.py
@@ -194,9 +194,8 @@
>>> json_clean(True)
True
"""
- # types that are 'atomic' and ok in json as-is. bool doesn't need to be
- # listed explicitly because bools pass as int instances
- atomic_ok = (unicode_type, int, type(None))
+ # types that are 'atomic' and ok in json as-is.
+ atomic_ok = (unicode_type, type(None))
# containers that we need to convert into lists
container_to_list = (tuple, set, types.GeneratorType)
@@ -205,7 +204,14 @@
# cast out-of-range floats to their reprs
if math.isnan(obj) or math.isinf(obj):
return repr(obj)
- return obj
+ return float(obj)
+
+ if isinstance(obj, int):
+ # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)
+ if isinstance(obj, bool):
+ # bools are ints, but we don't want to cast them to 0,1
+ return obj
+ return int(obj)
if isinstance(obj, atomic_ok):
return obj
|
{"golden_diff": "diff --git a/IPython/utils/jsonutil.py b/IPython/utils/jsonutil.py\n--- a/IPython/utils/jsonutil.py\n+++ b/IPython/utils/jsonutil.py\n@@ -194,9 +194,8 @@\n >>> json_clean(True)\n True\n \"\"\"\n- # types that are 'atomic' and ok in json as-is. bool doesn't need to be\n- # listed explicitly because bools pass as int instances\n- atomic_ok = (unicode_type, int, type(None))\n+ # types that are 'atomic' and ok in json as-is.\n+ atomic_ok = (unicode_type, type(None))\n \n # containers that we need to convert into lists\n container_to_list = (tuple, set, types.GeneratorType)\n@@ -205,7 +204,14 @@\n # cast out-of-range floats to their reprs\n if math.isnan(obj) or math.isinf(obj):\n return repr(obj)\n- return obj\n+ return float(obj)\n+ \n+ if isinstance(obj, int):\n+ # cast int to int, in case subclasses override __str__ (e.g. boost enum, #4598)\n+ if isinstance(obj, bool):\n+ # bools are ints, but we don't want to cast them to 0,1\n+ return obj\n+ return int(obj)\n \n if isinstance(obj, atomic_ok):\n return obj\n", "issue": "Qtconsole docstring pop-up fails on method containing defaulted enum argument\n[We've found](http://trac.mantidproject.org/mantid/ticket/8422) that an error is generated in the (admittedly rare) situation where a C++ enum is exposed to Python via boost python and included in a python function as a default to an argument. This is seen in IPython 1.1 and the current tip of master.\n\nHere's the simplest example I could come up with, though it does still require compiling C++ and linking to boost python. In a C++ file:\n\n``` c++\n#include <boost/python.hpp>\n\nenum MyEnum\n{\n Red,\n Yellow,\n Blue\n};\n\nBOOST_PYTHON_MODULE(enum_test)\n{\n using namespace boost::python;\n\n enum_<MyEnum>(\"MyEnum\")\n .value(\"Red\", Red)\n .value(\"Yellow\", Yellow)\n .value(\"Blue\", Blue);\n}\n```\n\nThis should be compiled to a shared library with something like `gcc -fPIC -I /usr/include/python2.6 -shared -o enum_test.so enum_test.cpp -lboost_python`\n\nThen, in the IPython qtconsole enter:\n\n```\nIn [1]: import enum_test\n\nIn [2]: def MyFunc(color = enum_test.MyEnum.Red):\n ...: pass\n ...: \n\nIn [3]: MyFunc(\n```\n\nOn typing the opening parenthesis a stack trace will appear that culminates in:\n\n```\n File \"/usr/lib/python2.6/site-packages/IPython/kernel/zmq/session.py\", line 83, in <lambda>\n json_unpacker = lambda s: extract_dates(jsonapi.loads(s))\n File \"/usr/lib64/python2.6/site-packages/zmq/utils/jsonapi.py\", line 81, in loads\n return jsonmod.loads(s, **kwargs)\n File \"/usr/lib64/python2.6/site-packages/simplejson/__init__.py\", line 307, in loads\n return _default_decoder.decode(s)\n File \"/usr/lib64/python2.6/site-packages/simplejson/decoder.py\", line 335, in decode\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\n File \"/usr/lib64/python2.6/site-packages/simplejson/decoder.py\", line 353, in raw_decode\n raise ValueError(\"No JSON object could be decoded\")\n```\n\nThe string that's going into the raw_decode function of decoder.py and leads to the exception is:\n\n```\n{\"base_class\":\"<type 'function'>\",\"init_definition\":null,\"type_name\":\"function\",\"name\":\"MyFunc\",\"isclass\":null,\"namespace\":\"Interactive\",\"isalias\":false,\"init_docstring\":null,\"argspec\":{\"args\":[\"color\"],\"varkw\":null,\"defaults\":[Red],\"varargs\":null},\"source\":null,\"length\":null,\"call_def\":null,\"call_docstring\":null,\"file\":\"/home/enumproblem/<ipython-input-2-b6e10dea3e06>\",\"string_form\":\"<function MyFunc at 0x16a8de8>\",\"found\":true,\"class_docstring\":null,\"definition\":\"\\u001b[0mMyFunc\\u001b[0m\\u001b[1;33m(\\u001b[0m\\u001b[0mcolor\\u001b[0m\\u001b[1;33m=\\u001b[0m\\u001b[0menum_test\\u001b[0m\\u001b[1;33m.\\u001b[0m\\u001b[0mMyEnum\\u001b[0m\\u001b[1;33m.\\u001b[0m\\u001b[0mRed\\u001b[0m\\u001b[1;33m)\\u001b[0m\\u001b[1;33m\\u001b[0m\\u001b[0m\\n\",\"docstring\":\"<no docstring>\",\"ismagic\":false}\n```\n\n", "before_files": [{"content": "\"\"\"Utilities to manipulate JSON objects.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING.txt, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n# stdlib\nimport math\nimport re\nimport types\nfrom datetime import datetime\n\ntry:\n # base64.encodestring is deprecated in Python 3.x\n from base64 import encodebytes\nexcept ImportError:\n # Python 2.x\n from base64 import encodestring as encodebytes\n\nfrom IPython.utils import py3compat\nfrom IPython.utils.py3compat import string_types, unicode_type, iteritems\nfrom IPython.utils.encoding import DEFAULT_ENCODING\nnext_attr_name = '__next__' if py3compat.PY3 else 'next'\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n# timestamp formats\nISO8601 = \"%Y-%m-%dT%H:%M:%S.%f\"\nISO8601_PAT=re.compile(r\"^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{1,6})Z?([\\+\\-]\\d{2}:?\\d{2})?$\")\n\n#-----------------------------------------------------------------------------\n# Classes and functions\n#-----------------------------------------------------------------------------\n\ndef rekey(dikt):\n \"\"\"Rekey a dict that has been forced to use str keys where there should be\n ints by json.\"\"\"\n for k in dikt:\n if isinstance(k, string_types):\n ik=fk=None\n try:\n ik = int(k)\n except ValueError:\n try:\n fk = float(k)\n except ValueError:\n continue\n if ik is not None:\n nk = ik\n else:\n nk = fk\n if nk in dikt:\n raise KeyError(\"already have key %r\"%nk)\n dikt[nk] = dikt.pop(k)\n return dikt\n\ndef parse_date(s):\n \"\"\"parse an ISO8601 date string\n \n If it is None or not a valid ISO8601 timestamp,\n it will be returned unmodified.\n Otherwise, it will return a datetime object.\n \"\"\"\n if s is None:\n return s\n m = ISO8601_PAT.match(s)\n if m:\n # FIXME: add actual timezone support\n # this just drops the timezone info\n notz = m.groups()[0]\n return datetime.strptime(notz, ISO8601)\n return s\n\ndef extract_dates(obj):\n \"\"\"extract ISO8601 dates from unpacked JSON\"\"\"\n if isinstance(obj, dict):\n new_obj = {} # don't clobber\n for k,v in iteritems(obj):\n new_obj[k] = extract_dates(v)\n obj = new_obj\n elif isinstance(obj, (list, tuple)):\n obj = [ extract_dates(o) for o in obj ]\n elif isinstance(obj, string_types):\n obj = parse_date(obj)\n return obj\n\ndef squash_dates(obj):\n \"\"\"squash datetime objects into ISO8601 strings\"\"\"\n if isinstance(obj, dict):\n obj = dict(obj) # don't clobber\n for k,v in iteritems(obj):\n obj[k] = squash_dates(v)\n elif isinstance(obj, (list, tuple)):\n obj = [ squash_dates(o) for o in obj ]\n elif isinstance(obj, datetime):\n obj = obj.isoformat()\n return obj\n\ndef date_default(obj):\n \"\"\"default function for packing datetime objects in JSON.\"\"\"\n if isinstance(obj, datetime):\n return obj.isoformat()\n else:\n raise TypeError(\"%r is not JSON serializable\"%obj)\n\n\n# constants for identifying png/jpeg data\nPNG = b'\\x89PNG\\r\\n\\x1a\\n'\n# front of PNG base64-encoded\nPNG64 = b'iVBORw0KG'\nJPEG = b'\\xff\\xd8'\n# front of JPEG base64-encoded\nJPEG64 = b'/9'\n\ndef encode_images(format_dict):\n \"\"\"b64-encodes images in a displaypub format dict\n\n Perhaps this should be handled in json_clean itself?\n\n Parameters\n ----------\n\n format_dict : dict\n A dictionary of display data keyed by mime-type\n\n Returns\n -------\n\n format_dict : dict\n A copy of the same dictionary,\n but binary image data ('image/png' or 'image/jpeg')\n is base64-encoded.\n\n \"\"\"\n encoded = format_dict.copy()\n\n pngdata = format_dict.get('image/png')\n if isinstance(pngdata, bytes):\n # make sure we don't double-encode\n if not pngdata.startswith(PNG64):\n pngdata = encodebytes(pngdata)\n encoded['image/png'] = pngdata.decode('ascii')\n\n jpegdata = format_dict.get('image/jpeg')\n if isinstance(jpegdata, bytes):\n # make sure we don't double-encode\n if not jpegdata.startswith(JPEG64):\n jpegdata = encodebytes(jpegdata)\n encoded['image/jpeg'] = jpegdata.decode('ascii')\n\n return encoded\n\n\ndef json_clean(obj):\n \"\"\"Clean an object to ensure it's safe to encode in JSON.\n\n Atomic, immutable objects are returned unmodified. Sets and tuples are\n converted to lists, lists are copied and dicts are also copied.\n\n Note: dicts whose keys could cause collisions upon encoding (such as a dict\n with both the number 1 and the string '1' as keys) will cause a ValueError\n to be raised.\n\n Parameters\n ----------\n obj : any python object\n\n Returns\n -------\n out : object\n\n A version of the input which will not cause an encoding error when\n encoded as JSON. Note that this function does not *encode* its inputs,\n it simply sanitizes it so that there will be no encoding errors later.\n\n Examples\n --------\n >>> json_clean(4)\n 4\n >>> json_clean(list(range(10)))\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> sorted(json_clean(dict(x=1, y=2)).items())\n [('x', 1), ('y', 2)]\n >>> sorted(json_clean(dict(x=1, y=2, z=[1,2,3])).items())\n [('x', 1), ('y', 2), ('z', [1, 2, 3])]\n >>> json_clean(True)\n True\n \"\"\"\n # types that are 'atomic' and ok in json as-is. bool doesn't need to be\n # listed explicitly because bools pass as int instances\n atomic_ok = (unicode_type, int, type(None))\n\n # containers that we need to convert into lists\n container_to_list = (tuple, set, types.GeneratorType)\n\n if isinstance(obj, float):\n # cast out-of-range floats to their reprs\n if math.isnan(obj) or math.isinf(obj):\n return repr(obj)\n return obj\n\n if isinstance(obj, atomic_ok):\n return obj\n\n if isinstance(obj, bytes):\n return obj.decode(DEFAULT_ENCODING, 'replace')\n\n if isinstance(obj, container_to_list) or (\n hasattr(obj, '__iter__') and hasattr(obj, next_attr_name)):\n obj = list(obj)\n\n if isinstance(obj, list):\n return [json_clean(x) for x in obj]\n\n if isinstance(obj, dict):\n # First, validate that the dict won't lose data in conversion due to\n # key collisions after stringification. This can happen with keys like\n # True and 'true' or 1 and '1', which collide in JSON.\n nkeys = len(obj)\n nkeys_collapsed = len(set(map(str, obj)))\n if nkeys != nkeys_collapsed:\n raise ValueError('dict can not be safely converted to JSON: '\n 'key collision would lead to dropped values')\n # If all OK, proceed by making the new dict that will be json-safe\n out = {}\n for k,v in iteritems(obj):\n out[str(k)] = json_clean(v)\n return out\n\n # If we get here, we don't know how to handle the object, so we just get\n # its repr and return that. This will catch lambdas, open sockets, class\n # objects, and any other complicated contraption that json can't encode\n return repr(obj)\n", "path": "IPython/utils/jsonutil.py"}]}
| 3,962 | 314 |
gh_patches_debug_1116
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-895
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs build broken with Sphinx v3.1.0
# Description
Today (2020-06-08) [Sphinx `v3.1.0`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.0) was released which now classifies pyhf's particular usages of the "autoclass" directive as an Error in the docs generated for [`interpolators/code0.py`](https://github.com/scikit-hep/pyhf/blob/62becc2e469f89babf75534a2decfb3ace6ff179/src/pyhf/interpolators/code0.py)
```
Warning, treated as error:
/home/runner/work/pyhf/pyhf/docs/_generated/pyhf.interpolators.code0.rst:8:Error in "autoclass" directive:
1 argument(s) required, 0 supplied.
.. autoclass::
:show-inheritance:
.. rubric:: Methods
.. automethod:: .__init__
##[error]Process completed with exit code 1.
```
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
5 'torch': ['torch~=1.2'],
6 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
7 'xmlio': ['uproot'],
8 'minuit': ['iminuit'],
9 }
10 extras_require['backends'] = sorted(
11 set(
12 extras_require['tensorflow']
13 + extras_require['torch']
14 + extras_require['jax']
15 + extras_require['minuit']
16 )
17 )
18 extras_require['contrib'] = sorted(set(['matplotlib']))
19
20 extras_require['test'] = sorted(
21 set(
22 extras_require['backends']
23 + extras_require['xmlio']
24 + extras_require['contrib']
25 + [
26 'pyflakes',
27 'pytest~=3.5',
28 'pytest-cov>=2.5.1',
29 'pytest-mock',
30 'pytest-benchmark[histogram]',
31 'pytest-console-scripts',
32 'pytest-mpl',
33 'pydocstyle',
34 'coverage>=4.0', # coveralls
35 'papermill~=2.0',
36 'nteract-scrapbook~=0.2',
37 'check-manifest',
38 'jupyter',
39 'uproot~=3.3',
40 'graphviz',
41 'jsonpatch',
42 'black',
43 ]
44 )
45 )
46 extras_require['docs'] = sorted(
47 set(
48 [
49 'sphinx',
50 'sphinxcontrib-bibtex',
51 'sphinx-click',
52 'sphinx_rtd_theme',
53 'nbsphinx',
54 'ipywidgets',
55 'sphinx-issues',
56 'sphinx-copybutton>0.2.9',
57 ]
58 )
59 )
60 extras_require['develop'] = sorted(
61 set(
62 extras_require['docs']
63 + extras_require['test']
64 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
65 )
66 )
67 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
68
69
70 setup(
71 extras_require=extras_require,
72 use_scm_version=lambda: {'local_scheme': lambda version: ''},
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
extras_require['docs'] = sorted(
set(
[
- 'sphinx',
+ 'sphinx!=3.1.0',
'sphinxcontrib-bibtex',
'sphinx-click',
'sphinx_rtd_theme',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n extras_require['docs'] = sorted(\n set(\n [\n- 'sphinx',\n+ 'sphinx!=3.1.0',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n", "issue": "Docs build broken with Sphinx v3.1.0\n# Description\r\n\r\nToday (2020-06-08) [Sphinx `v3.1.0`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.0) was released which now classifies pyhf's particular usages of the \"autoclass\" directive as an Error in the docs generated for [`interpolators/code0.py`](https://github.com/scikit-hep/pyhf/blob/62becc2e469f89babf75534a2decfb3ace6ff179/src/pyhf/interpolators/code0.py)\r\n\r\n```\r\nWarning, treated as error:\r\n/home/runner/work/pyhf/pyhf/docs/_generated/pyhf.interpolators.code0.rst:8:Error in \"autoclass\" directive:\r\n1 argument(s) required, 0 supplied.\r\n\r\n.. autoclass::\r\n :show-inheritance:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n .. rubric:: Methods\r\n\r\n\r\n\r\n .. automethod:: .__init__\r\n##[error]Process completed with exit code 1.\r\n```\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
| 1,399 | 86 |
gh_patches_debug_64733
|
rasdani/github-patches
|
git_diff
|
python-gitlab__python-gitlab-1099
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duplicated code in gitlab/config.py
## Description of the problem, including code/CLI snippet
Duplicated code found in gitlab/config.py . I think one should be get from 'global'.
```python
self.http_username = None
self.http_password = None
try:
self.http_username = self._config.get(self.gitlab_id, "http_username")
self.http_password = self._config.get(self.gitlab_id, "http_password")
except Exception:
pass
self.http_username = None
self.http_password = None
try:
self.http_username = self._config.get(self.gitlab_id, "http_username")
self.http_password = self._config.get(self.gitlab_id, "http_password")
except Exception:
pass
```
## Expected Behavior
## Actual Behavior
## Specifications
- python-gitlab version: python-gitlab==2.2.0
- API version you are using (v3/v4): v4
- Gitlab server version (or gitlab.com):
</issue>
<code>
[start of gitlab/config.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Lesser General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU Lesser General Public License for more details.
14 #
15 # You should have received a copy of the GNU Lesser General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
17
18 import os
19 import configparser
20
21
22 def _env_config():
23 if "PYTHON_GITLAB_CFG" in os.environ:
24 return [os.environ["PYTHON_GITLAB_CFG"]]
25 return []
26
27
28 _DEFAULT_FILES = _env_config() + [
29 "/etc/python-gitlab.cfg",
30 os.path.expanduser("~/.python-gitlab.cfg"),
31 ]
32
33
34 class ConfigError(Exception):
35 pass
36
37
38 class GitlabIDError(ConfigError):
39 pass
40
41
42 class GitlabDataError(ConfigError):
43 pass
44
45
46 class GitlabConfigMissingError(ConfigError):
47 pass
48
49
50 class GitlabConfigParser(object):
51 def __init__(self, gitlab_id=None, config_files=None):
52 self.gitlab_id = gitlab_id
53 _files = config_files or _DEFAULT_FILES
54 file_exist = False
55 for file in _files:
56 if os.path.exists(file):
57 file_exist = True
58 if not file_exist:
59 raise GitlabConfigMissingError(
60 "Config file not found. \nPlease create one in "
61 "one of the following locations: {} \nor "
62 "specify a config file using the '-c' parameter.".format(
63 ", ".join(_DEFAULT_FILES)
64 )
65 )
66
67 self._config = configparser.ConfigParser()
68 self._config.read(_files)
69
70 if self.gitlab_id is None:
71 try:
72 self.gitlab_id = self._config.get("global", "default")
73 except Exception as e:
74 raise GitlabIDError(
75 "Impossible to get the gitlab id (not specified in config file)"
76 ) from e
77
78 try:
79 self.url = self._config.get(self.gitlab_id, "url")
80 except Exception as e:
81 raise GitlabDataError(
82 "Impossible to get gitlab informations from "
83 "configuration (%s)" % self.gitlab_id
84 ) from e
85
86 self.ssl_verify = True
87 try:
88 self.ssl_verify = self._config.getboolean("global", "ssl_verify")
89 except ValueError:
90 # Value Error means the option exists but isn't a boolean.
91 # Get as a string instead as it should then be a local path to a
92 # CA bundle.
93 try:
94 self.ssl_verify = self._config.get("global", "ssl_verify")
95 except Exception:
96 pass
97 except Exception:
98 pass
99 try:
100 self.ssl_verify = self._config.getboolean(self.gitlab_id, "ssl_verify")
101 except ValueError:
102 # Value Error means the option exists but isn't a boolean.
103 # Get as a string instead as it should then be a local path to a
104 # CA bundle.
105 try:
106 self.ssl_verify = self._config.get(self.gitlab_id, "ssl_verify")
107 except Exception:
108 pass
109 except Exception:
110 pass
111
112 self.timeout = 60
113 try:
114 self.timeout = self._config.getint("global", "timeout")
115 except Exception:
116 pass
117 try:
118 self.timeout = self._config.getint(self.gitlab_id, "timeout")
119 except Exception:
120 pass
121
122 self.private_token = None
123 try:
124 self.private_token = self._config.get(self.gitlab_id, "private_token")
125 except Exception:
126 pass
127
128 self.oauth_token = None
129 try:
130 self.oauth_token = self._config.get(self.gitlab_id, "oauth_token")
131 except Exception:
132 pass
133
134 self.job_token = None
135 try:
136 self.job_token = self._config.get(self.gitlab_id, "job_token")
137 except Exception:
138 pass
139
140 self.http_username = None
141 self.http_password = None
142 try:
143 self.http_username = self._config.get(self.gitlab_id, "http_username")
144 self.http_password = self._config.get(self.gitlab_id, "http_password")
145 except Exception:
146 pass
147
148 self.http_username = None
149 self.http_password = None
150 try:
151 self.http_username = self._config.get(self.gitlab_id, "http_username")
152 self.http_password = self._config.get(self.gitlab_id, "http_password")
153 except Exception:
154 pass
155
156 self.api_version = "4"
157 try:
158 self.api_version = self._config.get("global", "api_version")
159 except Exception:
160 pass
161 try:
162 self.api_version = self._config.get(self.gitlab_id, "api_version")
163 except Exception:
164 pass
165 if self.api_version not in ("4",):
166 raise GitlabDataError("Unsupported API version: %s" % self.api_version)
167
168 self.per_page = None
169 for section in ["global", self.gitlab_id]:
170 try:
171 self.per_page = self._config.getint(section, "per_page")
172 except Exception:
173 pass
174 if self.per_page is not None and not 0 <= self.per_page <= 100:
175 raise GitlabDataError("Unsupported per_page number: %s" % self.per_page)
176
177 self.pagination = None
178 try:
179 self.pagination = self._config.get(self.gitlab_id, "pagination")
180 except Exception:
181 pass
182
183 self.order_by = None
184 try:
185 self.order_by = self._config.get(self.gitlab_id, "order_by")
186 except Exception:
187 pass
188
[end of gitlab/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gitlab/config.py b/gitlab/config.py
--- a/gitlab/config.py
+++ b/gitlab/config.py
@@ -145,14 +145,6 @@
except Exception:
pass
- self.http_username = None
- self.http_password = None
- try:
- self.http_username = self._config.get(self.gitlab_id, "http_username")
- self.http_password = self._config.get(self.gitlab_id, "http_password")
- except Exception:
- pass
-
self.api_version = "4"
try:
self.api_version = self._config.get("global", "api_version")
|
{"golden_diff": "diff --git a/gitlab/config.py b/gitlab/config.py\n--- a/gitlab/config.py\n+++ b/gitlab/config.py\n@@ -145,14 +145,6 @@\n except Exception:\n pass\n \n- self.http_username = None\n- self.http_password = None\n- try:\n- self.http_username = self._config.get(self.gitlab_id, \"http_username\")\n- self.http_password = self._config.get(self.gitlab_id, \"http_password\")\n- except Exception:\n- pass\n-\n self.api_version = \"4\"\n try:\n self.api_version = self._config.get(\"global\", \"api_version\")\n", "issue": "Duplicated code in gitlab/config.py\n## Description of the problem, including code/CLI snippet\r\nDuplicated code found in gitlab/config.py . I think one should be get from 'global'.\r\n```python\r\n self.http_username = None\r\n self.http_password = None\r\n try:\r\n self.http_username = self._config.get(self.gitlab_id, \"http_username\")\r\n self.http_password = self._config.get(self.gitlab_id, \"http_password\")\r\n except Exception:\r\n pass\r\n\r\n self.http_username = None\r\n self.http_password = None\r\n try:\r\n self.http_username = self._config.get(self.gitlab_id, \"http_username\")\r\n self.http_password = self._config.get(self.gitlab_id, \"http_password\")\r\n except Exception:\r\n pass\r\n```\r\n\r\n## Expected Behavior\r\n\r\n\r\n## Actual Behavior\r\n\r\n\r\n## Specifications\r\n\r\n - python-gitlab version: python-gitlab==2.2.0\r\n - API version you are using (v3/v4): v4\r\n - Gitlab server version (or gitlab.com): \r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2013-2017 Gauvain Pocentek <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\nimport configparser\n\n\ndef _env_config():\n if \"PYTHON_GITLAB_CFG\" in os.environ:\n return [os.environ[\"PYTHON_GITLAB_CFG\"]]\n return []\n\n\n_DEFAULT_FILES = _env_config() + [\n \"/etc/python-gitlab.cfg\",\n os.path.expanduser(\"~/.python-gitlab.cfg\"),\n]\n\n\nclass ConfigError(Exception):\n pass\n\n\nclass GitlabIDError(ConfigError):\n pass\n\n\nclass GitlabDataError(ConfigError):\n pass\n\n\nclass GitlabConfigMissingError(ConfigError):\n pass\n\n\nclass GitlabConfigParser(object):\n def __init__(self, gitlab_id=None, config_files=None):\n self.gitlab_id = gitlab_id\n _files = config_files or _DEFAULT_FILES\n file_exist = False\n for file in _files:\n if os.path.exists(file):\n file_exist = True\n if not file_exist:\n raise GitlabConfigMissingError(\n \"Config file not found. \\nPlease create one in \"\n \"one of the following locations: {} \\nor \"\n \"specify a config file using the '-c' parameter.\".format(\n \", \".join(_DEFAULT_FILES)\n )\n )\n\n self._config = configparser.ConfigParser()\n self._config.read(_files)\n\n if self.gitlab_id is None:\n try:\n self.gitlab_id = self._config.get(\"global\", \"default\")\n except Exception as e:\n raise GitlabIDError(\n \"Impossible to get the gitlab id (not specified in config file)\"\n ) from e\n\n try:\n self.url = self._config.get(self.gitlab_id, \"url\")\n except Exception as e:\n raise GitlabDataError(\n \"Impossible to get gitlab informations from \"\n \"configuration (%s)\" % self.gitlab_id\n ) from e\n\n self.ssl_verify = True\n try:\n self.ssl_verify = self._config.getboolean(\"global\", \"ssl_verify\")\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get(\"global\", \"ssl_verify\")\n except Exception:\n pass\n except Exception:\n pass\n try:\n self.ssl_verify = self._config.getboolean(self.gitlab_id, \"ssl_verify\")\n except ValueError:\n # Value Error means the option exists but isn't a boolean.\n # Get as a string instead as it should then be a local path to a\n # CA bundle.\n try:\n self.ssl_verify = self._config.get(self.gitlab_id, \"ssl_verify\")\n except Exception:\n pass\n except Exception:\n pass\n\n self.timeout = 60\n try:\n self.timeout = self._config.getint(\"global\", \"timeout\")\n except Exception:\n pass\n try:\n self.timeout = self._config.getint(self.gitlab_id, \"timeout\")\n except Exception:\n pass\n\n self.private_token = None\n try:\n self.private_token = self._config.get(self.gitlab_id, \"private_token\")\n except Exception:\n pass\n\n self.oauth_token = None\n try:\n self.oauth_token = self._config.get(self.gitlab_id, \"oauth_token\")\n except Exception:\n pass\n\n self.job_token = None\n try:\n self.job_token = self._config.get(self.gitlab_id, \"job_token\")\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id, \"http_username\")\n self.http_password = self._config.get(self.gitlab_id, \"http_password\")\n except Exception:\n pass\n\n self.http_username = None\n self.http_password = None\n try:\n self.http_username = self._config.get(self.gitlab_id, \"http_username\")\n self.http_password = self._config.get(self.gitlab_id, \"http_password\")\n except Exception:\n pass\n\n self.api_version = \"4\"\n try:\n self.api_version = self._config.get(\"global\", \"api_version\")\n except Exception:\n pass\n try:\n self.api_version = self._config.get(self.gitlab_id, \"api_version\")\n except Exception:\n pass\n if self.api_version not in (\"4\",):\n raise GitlabDataError(\"Unsupported API version: %s\" % self.api_version)\n\n self.per_page = None\n for section in [\"global\", self.gitlab_id]:\n try:\n self.per_page = self._config.getint(section, \"per_page\")\n except Exception:\n pass\n if self.per_page is not None and not 0 <= self.per_page <= 100:\n raise GitlabDataError(\"Unsupported per_page number: %s\" % self.per_page)\n\n self.pagination = None\n try:\n self.pagination = self._config.get(self.gitlab_id, \"pagination\")\n except Exception:\n pass\n\n self.order_by = None\n try:\n self.order_by = self._config.get(self.gitlab_id, \"order_by\")\n except Exception:\n pass\n", "path": "gitlab/config.py"}]}
| 2,550 | 145 |
gh_patches_debug_11412
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-3108
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The modprobe combiner is raising AttributeError exceptions in production.
The AllModProbe combiner is throwing a number of the exception AttributeError("'bool' object has no attribute 'append'",) in production.
</issue>
<code>
[start of insights/combiners/modprobe.py]
1 """
2 Modprobe configuration
3 ======================
4
5 The modprobe configuration files are normally available to rules as a list of
6 ModProbe objects. This combiner turns those into one set of data, preserving
7 the original file name that defined modprobe configuration line using a tuple.
8
9 """
10
11 from insights.core.plugins import combiner
12 from insights.parsers.modprobe import ModProbe
13 from .. import LegacyItemAccess
14
15 from collections import namedtuple
16
17
18 ModProbeValue = namedtuple("ModProbeValue", ['value', 'source'])
19 """
20 A value from a ModProbe source
21 """
22
23
24 @combiner(ModProbe)
25 class AllModProbe(LegacyItemAccess):
26 """
27 Combiner for accessing all the modprobe configuration files in one
28 structure.
29
30 It's important for our reporting and information purposes to know not
31 only what the configuration was but where it was defined. Therefore, the
32 format of the data in this combiner is slightly different compared to the
33 ModProbe parser. Here, each 'value' is actually a 2-tuple, with the
34 actual data first and the file name from whence the value came second.
35 This does mean that you need to pull the value out of each item - e.g.
36 using a list comprehension - but it means that every item is associated
37 with the file it was defined in.
38
39 In line with the ModProbe configuration parser, the actual value is
40 usually a list of the space-separated parts on the line, and the
41 definitions for each module are similarly kept in a list, which makes
42
43 Thanks to the LegacyItemAccess class, this can also be treated as a
44 dictionary for look-ups of data in the `data` attribute.
45
46 Attributes:
47 data (dict): The combined data structures, with each item as a
48 2-tuple, as described above.
49 bad_lines(list): The list of unparseable lines from all files, with
50 each line as a 2-tuple as described above.
51
52 Sample data files::
53
54 /etc/modprobe.conf:
55 # watchdog drivers
56 blacklist i8xx_tco
57
58 # Don't install the Firewire ethernet driver
59 install eth1394 /bin/true
60
61 /etc/modprobe.conf.d/no_ipv6.conf:
62 options ipv6 disable=1
63 install ipv6 /bin/true
64
65 Examples:
66 >>> all_modprobe = shared[AllModProbe]
67 >>> all_modprobe['alias']
68 []
69 >>> all_modprobe['blacklist']
70 {'i8xx_tco': ModProbeValue(True, '/etc/modprobe.conf')}
71 >>> all_modprobe['install']
72 {'eth1394': ModProbeValue(['/bin/true'], '/etc/modprobe.conf'),
73 'ipv6': ModProbeValue(['/bin/true'], '/etc/modprobe.conf.d/no_ipv6.conf')}
74 """
75 def __init__(self, modprobe):
76 self.data = {}
77 self.bad_lines = []
78 for mod in modprobe:
79 filename = mod.file_path # relative path inside archive
80 # Copy data section
81 for section, sectdict in mod.data.items():
82 if section not in self.data:
83 self.data[section] = {}
84 for name, value in sectdict.items():
85 if name in self.data[section]:
86 # append to this module's value - should only
87 # happen for aliases.
88 self.data[section][name][0].append(value)
89 else:
90 # create new tuple
91 self.data[section][name] = ModProbeValue(value=value, source=filename)
92 # Copy bad lines, if any
93 if mod.bad_lines:
94 self.bad_lines.extend(
95 [ModProbeValue(value=line, source=filename) for line in mod.bad_lines]
96 )
97 super(AllModProbe, self).__init__()
98
[end of insights/combiners/modprobe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py
--- a/insights/combiners/modprobe.py
+++ b/insights/combiners/modprobe.py
@@ -82,7 +82,7 @@
if section not in self.data:
self.data[section] = {}
for name, value in sectdict.items():
- if name in self.data[section]:
+ if name in self.data[section] and type(self.data[section][name][0]) == list:
# append to this module's value - should only
# happen for aliases.
self.data[section][name][0].append(value)
|
{"golden_diff": "diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py\n--- a/insights/combiners/modprobe.py\n+++ b/insights/combiners/modprobe.py\n@@ -82,7 +82,7 @@\n if section not in self.data:\n self.data[section] = {}\n for name, value in sectdict.items():\n- if name in self.data[section]:\n+ if name in self.data[section] and type(self.data[section][name][0]) == list:\n # append to this module's value - should only\n # happen for aliases.\n self.data[section][name][0].append(value)\n", "issue": "The modprobe combiner is raising AttributeError exceptions in production.\nThe AllModProbe combiner is throwing a number of the exception AttributeError(\"'bool' object has no attribute 'append'\",) in production.\n", "before_files": [{"content": "\"\"\"\nModprobe configuration\n======================\n\nThe modprobe configuration files are normally available to rules as a list of\nModProbe objects. This combiner turns those into one set of data, preserving\nthe original file name that defined modprobe configuration line using a tuple.\n\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.modprobe import ModProbe\nfrom .. import LegacyItemAccess\n\nfrom collections import namedtuple\n\n\nModProbeValue = namedtuple(\"ModProbeValue\", ['value', 'source'])\n\"\"\"\nA value from a ModProbe source\n\"\"\"\n\n\n@combiner(ModProbe)\nclass AllModProbe(LegacyItemAccess):\n \"\"\"\n Combiner for accessing all the modprobe configuration files in one\n structure.\n\n It's important for our reporting and information purposes to know not\n only what the configuration was but where it was defined. Therefore, the\n format of the data in this combiner is slightly different compared to the\n ModProbe parser. Here, each 'value' is actually a 2-tuple, with the\n actual data first and the file name from whence the value came second.\n This does mean that you need to pull the value out of each item - e.g.\n using a list comprehension - but it means that every item is associated\n with the file it was defined in.\n\n In line with the ModProbe configuration parser, the actual value is\n usually a list of the space-separated parts on the line, and the\n definitions for each module are similarly kept in a list, which makes\n\n Thanks to the LegacyItemAccess class, this can also be treated as a\n dictionary for look-ups of data in the `data` attribute.\n\n Attributes:\n data (dict): The combined data structures, with each item as a\n 2-tuple, as described above.\n bad_lines(list): The list of unparseable lines from all files, with\n each line as a 2-tuple as described above.\n\n Sample data files::\n\n /etc/modprobe.conf:\n # watchdog drivers\n blacklist i8xx_tco\n\n # Don't install the Firewire ethernet driver\n install eth1394 /bin/true\n\n /etc/modprobe.conf.d/no_ipv6.conf:\n options ipv6 disable=1\n install ipv6 /bin/true\n\n Examples:\n >>> all_modprobe = shared[AllModProbe]\n >>> all_modprobe['alias']\n []\n >>> all_modprobe['blacklist']\n {'i8xx_tco': ModProbeValue(True, '/etc/modprobe.conf')}\n >>> all_modprobe['install']\n {'eth1394': ModProbeValue(['/bin/true'], '/etc/modprobe.conf'),\n 'ipv6': ModProbeValue(['/bin/true'], '/etc/modprobe.conf.d/no_ipv6.conf')}\n \"\"\"\n def __init__(self, modprobe):\n self.data = {}\n self.bad_lines = []\n for mod in modprobe:\n filename = mod.file_path # relative path inside archive\n # Copy data section\n for section, sectdict in mod.data.items():\n if section not in self.data:\n self.data[section] = {}\n for name, value in sectdict.items():\n if name in self.data[section]:\n # append to this module's value - should only\n # happen for aliases.\n self.data[section][name][0].append(value)\n else:\n # create new tuple\n self.data[section][name] = ModProbeValue(value=value, source=filename)\n # Copy bad lines, if any\n if mod.bad_lines:\n self.bad_lines.extend(\n [ModProbeValue(value=line, source=filename) for line in mod.bad_lines]\n )\n super(AllModProbe, self).__init__()\n", "path": "insights/combiners/modprobe.py"}]}
| 1,582 | 146 |
gh_patches_debug_33260
|
rasdani/github-patches
|
git_diff
|
apache__airflow-1056
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError in bash_operator.py
Hi,
I see a lot of these errors when running `airflow backfill` :
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 467, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)
Logged from file bash_operator.py, line 72
```
</issue>
<code>
[start of airflow/operators/bash_operator.py]
1
2 from builtins import bytes
3 import logging
4 import sys
5 from subprocess import Popen, STDOUT, PIPE
6 from tempfile import gettempdir, NamedTemporaryFile
7
8 from airflow.utils import AirflowException
9 from airflow.models import BaseOperator
10 from airflow.utils import apply_defaults, TemporaryDirectory
11
12
13 class BashOperator(BaseOperator):
14 """
15 Execute a Bash script, command or set of commands.
16
17 :param bash_command: The command, set of commands or reference to a
18 bash script (must be '.sh') to be executed.
19 :type bash_command: string
20 :param env: If env is not None, it must be a mapping that defines the
21 environment variables for the new process; these are used instead
22 of inheriting the current process environment, which is the default
23 behavior.
24 :type env: dict
25 """
26 template_fields = ('bash_command', 'env')
27 template_ext = ('.sh', '.bash',)
28 ui_color = '#f0ede4'
29
30 @apply_defaults
31 def __init__(
32 self,
33 bash_command,
34 xcom_push=False,
35 env=None,
36 *args, **kwargs):
37 """
38 If xcom_push is True, the last line written to stdout will also
39 be pushed to an XCom when the bash command completes.
40 """
41 super(BashOperator, self).__init__(*args, **kwargs)
42 self.bash_command = bash_command
43 self.env = env
44 self.xcom_push_flag = xcom_push
45
46 def execute(self, context):
47 """
48 Execute the bash command in a temporary directory
49 which will be cleaned afterwards
50 """
51 bash_command = self.bash_command
52 logging.info("tmp dir root location: \n" + gettempdir())
53 with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
54 with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
55
56 f.write(bytes(bash_command, 'utf_8'))
57 f.flush()
58 fname = f.name
59 script_location = tmp_dir + "/" + fname
60 logging.info("Temporary script "
61 "location :{0}".format(script_location))
62 logging.info("Running command: " + bash_command)
63 sp = Popen(
64 ['bash', fname],
65 stdout=PIPE, stderr=STDOUT,
66 cwd=tmp_dir, env=self.env)
67
68 self.sp = sp
69
70 logging.info("Output:")
71 line = ''
72 for line in iter(sp.stdout.readline, b''):
73 line = line.decode().strip()
74 logging.info(line)
75 sp.wait()
76 logging.info("Command exited with "
77 "return code {0}".format(sp.returncode))
78
79 if sp.returncode:
80 raise AirflowException("Bash command failed")
81
82 if self.xcom_push_flag:
83 return line
84
85 def on_kill(self):
86 logging.info('Sending SIGTERM signal to bash subprocess')
87 self.sp.terminate()
88
[end of airflow/operators/bash_operator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py
--- a/airflow/operators/bash_operator.py
+++ b/airflow/operators/bash_operator.py
@@ -1,7 +1,6 @@
from builtins import bytes
import logging
-import sys
from subprocess import Popen, STDOUT, PIPE
from tempfile import gettempdir, NamedTemporaryFile
@@ -22,6 +21,7 @@
of inheriting the current process environment, which is the default
behavior.
:type env: dict
+ :type output_encoding: output encoding of bash command
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
@@ -33,6 +33,7 @@
bash_command,
xcom_push=False,
env=None,
+ output_encoding='utf-8',
*args, **kwargs):
"""
If xcom_push is True, the last line written to stdout will also
@@ -42,6 +43,7 @@
self.bash_command = bash_command
self.env = env
self.xcom_push_flag = xcom_push
+ self.output_encoding = output_encoding
def execute(self, context):
"""
@@ -70,7 +72,7 @@
logging.info("Output:")
line = ''
for line in iter(sp.stdout.readline, b''):
- line = line.decode().strip()
+ line = line.decode(self.output_encoding).strip()
logging.info(line)
sp.wait()
logging.info("Command exited with "
|
{"golden_diff": "diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py\n--- a/airflow/operators/bash_operator.py\n+++ b/airflow/operators/bash_operator.py\n@@ -1,7 +1,6 @@\n \n from builtins import bytes\n import logging\n-import sys\n from subprocess import Popen, STDOUT, PIPE\n from tempfile import gettempdir, NamedTemporaryFile\n \n@@ -22,6 +21,7 @@\n of inheriting the current process environment, which is the default\n behavior.\n :type env: dict\n+ :type output_encoding: output encoding of bash command\n \"\"\"\n template_fields = ('bash_command', 'env')\n template_ext = ('.sh', '.bash',)\n@@ -33,6 +33,7 @@\n bash_command,\n xcom_push=False,\n env=None,\n+ output_encoding='utf-8',\n *args, **kwargs):\n \"\"\"\n If xcom_push is True, the last line written to stdout will also\n@@ -42,6 +43,7 @@\n self.bash_command = bash_command\n self.env = env\n self.xcom_push_flag = xcom_push\n+ self.output_encoding = output_encoding\n \n def execute(self, context):\n \"\"\"\n@@ -70,7 +72,7 @@\n logging.info(\"Output:\")\n line = ''\n for line in iter(sp.stdout.readline, b''):\n- line = line.decode().strip()\n+ line = line.decode(self.output_encoding).strip()\n logging.info(line)\n sp.wait()\n logging.info(\"Command exited with \"\n", "issue": "UnicodeDecodeError in bash_operator.py\nHi,\n\nI see a lot of these errors when running `airflow backfill` : \n\n```\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\n msg = self.format(record)\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\n return fmt.format(record)\n File \"/usr/lib/python2.7/logging/__init__.py\", line 467, in format\n s = self._fmt % record.__dict__\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)\nLogged from file bash_operator.py, line 72\n```\n\n", "before_files": [{"content": "\nfrom builtins import bytes\nimport logging\nimport sys\nfrom subprocess import Popen, STDOUT, PIPE\nfrom tempfile import gettempdir, NamedTemporaryFile\n\nfrom airflow.utils import AirflowException\nfrom airflow.models import BaseOperator\nfrom airflow.utils import apply_defaults, TemporaryDirectory\n\n\nclass BashOperator(BaseOperator):\n \"\"\"\n Execute a Bash script, command or set of commands.\n\n :param bash_command: The command, set of commands or reference to a\n bash script (must be '.sh') to be executed.\n :type bash_command: string\n :param env: If env is not None, it must be a mapping that defines the\n environment variables for the new process; these are used instead\n of inheriting the current process environment, which is the default\n behavior.\n :type env: dict\n \"\"\"\n template_fields = ('bash_command', 'env')\n template_ext = ('.sh', '.bash',)\n ui_color = '#f0ede4'\n\n @apply_defaults\n def __init__(\n self,\n bash_command,\n xcom_push=False,\n env=None,\n *args, **kwargs):\n \"\"\"\n If xcom_push is True, the last line written to stdout will also\n be pushed to an XCom when the bash command completes.\n \"\"\"\n super(BashOperator, self).__init__(*args, **kwargs)\n self.bash_command = bash_command\n self.env = env\n self.xcom_push_flag = xcom_push\n\n def execute(self, context):\n \"\"\"\n Execute the bash command in a temporary directory\n which will be cleaned afterwards\n \"\"\"\n bash_command = self.bash_command\n logging.info(\"tmp dir root location: \\n\" + gettempdir())\n with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:\n with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:\n\n f.write(bytes(bash_command, 'utf_8'))\n f.flush()\n fname = f.name\n script_location = tmp_dir + \"/\" + fname\n logging.info(\"Temporary script \"\n \"location :{0}\".format(script_location))\n logging.info(\"Running command: \" + bash_command)\n sp = Popen(\n ['bash', fname],\n stdout=PIPE, stderr=STDOUT,\n cwd=tmp_dir, env=self.env)\n\n self.sp = sp\n\n logging.info(\"Output:\")\n line = ''\n for line in iter(sp.stdout.readline, b''):\n line = line.decode().strip()\n logging.info(line)\n sp.wait()\n logging.info(\"Command exited with \"\n \"return code {0}\".format(sp.returncode))\n\n if sp.returncode:\n raise AirflowException(\"Bash command failed\")\n\n if self.xcom_push_flag:\n return line\n\n def on_kill(self):\n logging.info('Sending SIGTERM signal to bash subprocess')\n self.sp.terminate()\n", "path": "airflow/operators/bash_operator.py"}]}
| 1,505 | 346 |
gh_patches_debug_10161
|
rasdani/github-patches
|
git_diff
|
onnx__sklearn-onnx-598
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
saving model works with binary classifier, fails with multiclass classifier
I am using Sklearn's SVC for a classifier, with TfidfVectorizer as the feature embedding method. Following #478 I am using `skl2onnx` version 1.7.1. My code is here:
```
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
import numpy as np
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
#import json
#with open('fake_data.json', 'r') as f:
# dataset = json.load(f)
#docs = [doc for (doc, _) in dataset]
#labels = [label for (_, label) in dataset]
data = [
["schedule a meeting", 0],
["schedule a sync with the team", 0],
["slot in a meeting", 0],
["call ron", 1],
["make a phone call", 1],
["call in on the phone", 2] # changing this from 2 to 1 will allow code to successfully run
]
docs = [doc for (doc, _) in data]
labels = [label for (_, label) in data]
vectorizer = TfidfVectorizer()
vectorizer.fit_transform(docs)
embeddings = vectorizer.transform(docs)
dim = embeddings.shape[1]
#embeddings = np.vstack(embeddings)
clf = SVC()
clf.fit(embeddings, labels)
initial_type = [('float_input', FloatTensorType([1, dim]))]
onnx_model = convert_sklearn(clf, initial_types=initial_type) # this is line 37, where the crash occurs
with open('model.onnx', 'wb') as f:
f.write(onnx_model.SerializeToString())
```
When I run the above, I get:
```
Traceback (most recent call last):
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\common\_container.py", line 536, in add_node
node = make_node(op_type, inputs, outputs, name=name,
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\proto\onnx_helper_modified.py", line 66, in make_node
node.attribute.extend(
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\google\protobuf\internal\containers.py", line 410, in extend
for message in elem_seq:
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\proto\onnx_helper_modified.py", line 67, in <genexpr>
make_attribute(key, value, dtype=_dtype, domain=domain)
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\proto\onnx_helper_modified.py", line 175, in make_attribute
raise ValueError(
ValueError: You passed in an iterable attribute but I cannot figure out its applicable type, key='coefficients', type=<class 'numpy.matrix'>, dtype=float32, types=[<class 'numpy.matrix'>, <class 'numpy.matrix'>].
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File ".\dust.py", line 37, in <module>
onnx_model = convert_sklearn(clf, initial_types=initial_type)
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\convert.py", line 160, in convert_sklearn
onnx_model = convert_topology(topology, name, doc_string, target_opset,
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\common\_topology.py", line 1069, in convert_topology
conv(scope, operator, container)
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\common\_registration.py", line 29, in __call__
return self._fct(*args)
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\operator_converters\support_vector_machines.py", line 214, in convert_sklearn_svm_classifier
container.add_node(
File "C:\Users\Stefan Larson\AppData\Local\Programs\Python\Python38\lib\site-packages\skl2onnx\common\_container.py", line 539, in add_node
raise ValueError("Unable to create node '{}' with name='{}'."
ValueError: Unable to create node 'SVMClassifier' with name='SVMc'.
```
The code crashes at the `convert_sklearn` function call. However, when I alter the dataset to have only two classes ('1', and '0'), the code runs successfully.
</issue>
<code>
[start of skl2onnx/proto/onnx_helper_modified.py]
1 # Modified file from
2 # https://github.com/onnx/onnx/blob/master/onnx/helper.py.
3 import collections
4 import numbers
5
6 from onnx import (
7 TensorProto, AttributeProto,
8 NodeProto, GraphProto
9 )
10 from onnx.helper import ( # noqa
11 make_tensor, make_model, make_graph, _to_bytes_or_false,
12 make_tensor_value_info, ValueInfoProto
13 )
14
15 try:
16 from onnx import SparseTensorProto
17 from onnx.helper import make_sparse_tensor # noqa
18 except ImportError:
19 # onnx is too old.
20 SparseTensorProto = None
21
22 from onnx.numpy_helper import from_array # noqa
23 from typing import (
24 Text, Sequence, Any, Optional,
25 List, cast
26 )
27 import numpy as np # type: ignore
28
29
30 def make_node(
31 op_type, # type: Text
32 inputs, # type: Sequence[Text]
33 outputs, # type: Sequence[Text]
34 name=None, # type: Optional[Text]
35 doc_string=None, # type: Optional[Text]
36 domain=None, # type: Optional[Text]
37 _dtype=None, # type: [np.float32, np.float64]
38 **kwargs # type: Any
39 ): # type: (...) -> NodeProto
40 """Construct a NodeProto.
41
42 Arguments:
43 op_type (string): The name of the operator to construct
44 inputs (list of string): list of input names
45 outputs (list of string): list of output names
46 name (string, default None): optional unique identifier for NodeProto
47 doc_string (string, default None): optional documentation
48 string for NodeProto
49 dtype: dtype for double used to infer
50 domain (string, default None): optional domain for NodeProto.
51 If it's None, we will just use default domain (which is empty)
52 **kwargs (dict): the attributes of the node. The acceptable values
53 are documented in :func:`make_attribute`.
54 """
55 node = NodeProto()
56 node.op_type = op_type
57 node.input.extend(inputs)
58 node.output.extend(outputs)
59 if name:
60 node.name = name
61 if doc_string:
62 node.doc_string = doc_string
63 if domain is not None:
64 node.domain = domain
65 if kwargs:
66 node.attribute.extend(
67 make_attribute(key, value, dtype=_dtype, domain=domain)
68 for key, value in sorted(kwargs.items()))
69 return node
70
71
72 def make_attribute(
73 key, # type: Text
74 value, # type: Any
75 dtype=None, # type: [np.float32, np.float64]
76 domain='', # type: Text
77 doc_string=None # type: Optional[Text]
78 ): # type: (...) -> AttributeProto
79 """Makes an AttributeProto based on the value type."""
80 attr = AttributeProto()
81 attr.name = key
82 if doc_string:
83 attr.doc_string = doc_string
84
85 is_iterable = isinstance(value, collections.abc.Iterable)
86 bytes_or_false = _to_bytes_or_false(value)
87
88 use_float64 = dtype == np.float64 and domain not in ('', 'ai.onnx.ml')
89
90 if isinstance(value, np.float32):
91 attr.f = value
92 attr.type = AttributeProto.FLOAT
93 elif isinstance(value, (float, np.float64)):
94 if use_float64:
95 attr.type = AttributeProto.TENSOR
96 attr.t.CopyFrom(
97 make_tensor(
98 key, TensorProto.DOUBLE, (1, ), [value]))
99 else:
100 attr.f = value
101 attr.type = AttributeProto.FLOAT
102 elif isinstance(value, np.int32):
103 attr.i = value
104 attr.type = AttributeProto.INT
105 elif isinstance(value, np.int64):
106 attr.i = value
107 attr.type = AttributeProto.INT
108 elif isinstance(value, numbers.Integral):
109 attr.i = value
110 attr.type = AttributeProto.INT
111 # string
112 elif bytes_or_false is not False:
113 assert isinstance(bytes_or_false, bytes)
114 attr.s = bytes_or_false
115 attr.type = AttributeProto.STRING
116 elif isinstance(value, TensorProto):
117 attr.t.CopyFrom(value)
118 attr.type = AttributeProto.TENSOR
119 elif (SparseTensorProto is not None and
120 isinstance(value, SparseTensorProto)):
121 attr.sparse_tensor.CopyFrom(value)
122 attr.type = AttributeProto.SPARSE_TENSOR
123 elif isinstance(value, GraphProto):
124 attr.g.CopyFrom(value)
125 attr.type = AttributeProto.GRAPH
126 # third, iterable cases
127 elif is_iterable:
128 byte_array = [_to_bytes_or_false(v) for v in value]
129 if all(isinstance(v, np.float32) for v in value):
130 attr.floats.extend(value)
131 attr.type = AttributeProto.FLOATS
132 elif all(isinstance(v, np.float64) for v in value):
133 if use_float64:
134 attr.type = AttributeProto.TENSOR
135 attr.t.CopyFrom(
136 make_tensor(
137 key, TensorProto.DOUBLE, (len(value), ), value))
138 else:
139 attr.floats.extend(value)
140 attr.type = AttributeProto.FLOATS
141 elif all(isinstance(v, float) for v in value):
142 if use_float64:
143 attr.type = AttributeProto.TENSOR
144 attr.t.CopyFrom(
145 make_tensor(
146 key, TensorProto.DOUBLE, (len(value), ), value))
147 else:
148 attr.floats.extend(value)
149 attr.type = AttributeProto.FLOATS
150 elif all(isinstance(v, np.int32) for v in value):
151 attr.ints.extend(int(v) for v in value)
152 attr.type = AttributeProto.INTS
153 elif all(isinstance(v, np.int64) for v in value):
154 attr.ints.extend(int(v) for v in value)
155 attr.type = AttributeProto.INTS
156 elif all(isinstance(v, numbers.Integral) for v in value):
157 # Turn np.int32/64 into Python built-in int.
158 attr.ints.extend(int(v) for v in value)
159 attr.type = AttributeProto.INTS
160 elif all(map(lambda bytes_or_false: bytes_or_false is not False,
161 byte_array)):
162 attr.strings.extend(cast(List[bytes], byte_array))
163 attr.type = AttributeProto.STRINGS
164 elif all(isinstance(v, TensorProto) for v in value):
165 attr.tensors.extend(value)
166 attr.type = AttributeProto.TENSORS
167 elif (SparseTensorProto is not None and
168 all(isinstance(v, SparseTensorProto) for v in value)):
169 attr.sparse_tensors.extend(value)
170 attr.type = AttributeProto.SPARSE_TENSORS
171 elif all(isinstance(v, GraphProto) for v in value):
172 attr.graphs.extend(value)
173 attr.type = AttributeProto.GRAPHS
174 else:
175 raise ValueError(
176 "You passed in an iterable attribute but I cannot figure out "
177 "its applicable type, key='{}', type={}, dtype={}, "
178 "types={}.".format(
179 key, type(value), dtype,
180 [type(_) for _, __ in zip(value, range(0, 5))]))
181 else:
182 raise ValueError(
183 "Value '{}' is not valid attribute data type for attribute "
184 "'{}'.".format(value, key))
185 return attr
186
187
188 def get_attribute_value(attr): # type: (AttributeProto) -> Any
189 if attr.type == AttributeProto.FLOAT:
190 return attr.f
191 elif attr.type == AttributeProto.INT:
192 return attr.i
193 elif attr.type == AttributeProto.STRING:
194 return attr.s
195 elif attr.type == AttributeProto.TENSOR:
196 return attr.t
197 elif attr.type == AttributeProto.GRAPH:
198 return attr.g
199 elif attr.type == AttributeProto.FLOATS:
200 return list(attr.floats)
201 elif attr.type == AttributeProto.INTS:
202 return list(attr.ints)
203 elif attr.type == AttributeProto.STRINGS:
204 return list(attr.strings)
205 elif attr.type == AttributeProto.TENSORS:
206 return list(attr.tensors)
207 elif attr.type == AttributeProto.GRAPHS:
208 return list(attr.graphs)
209 else:
210 raise ValueError("Unsupported ONNX attribute: {}".format(attr))
211
[end of skl2onnx/proto/onnx_helper_modified.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/skl2onnx/proto/onnx_helper_modified.py b/skl2onnx/proto/onnx_helper_modified.py
--- a/skl2onnx/proto/onnx_helper_modified.py
+++ b/skl2onnx/proto/onnx_helper_modified.py
@@ -171,6 +171,10 @@
elif all(isinstance(v, GraphProto) for v in value):
attr.graphs.extend(value)
attr.type = AttributeProto.GRAPHS
+ elif isinstance(value, np.matrix):
+ return make_attribute(
+ key, np.asarray(value).ravel(), dtype=dtype, domain=domain,
+ doc_string=doc_string)
else:
raise ValueError(
"You passed in an iterable attribute but I cannot figure out "
|
{"golden_diff": "diff --git a/skl2onnx/proto/onnx_helper_modified.py b/skl2onnx/proto/onnx_helper_modified.py\n--- a/skl2onnx/proto/onnx_helper_modified.py\n+++ b/skl2onnx/proto/onnx_helper_modified.py\n@@ -171,6 +171,10 @@\n elif all(isinstance(v, GraphProto) for v in value):\n attr.graphs.extend(value)\n attr.type = AttributeProto.GRAPHS\n+ elif isinstance(value, np.matrix):\n+ return make_attribute(\n+ key, np.asarray(value).ravel(), dtype=dtype, domain=domain,\n+ doc_string=doc_string)\n else:\n raise ValueError(\n \"You passed in an iterable attribute but I cannot figure out \"\n", "issue": "saving model works with binary classifier, fails with multiclass classifier\nI am using Sklearn's SVC for a classifier, with TfidfVectorizer as the feature embedding method. Following #478 I am using `skl2onnx` version 1.7.1. My code is here:\r\n\r\n```\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.svm import SVC\r\nimport numpy as np\r\n\r\nfrom skl2onnx import convert_sklearn\r\nfrom skl2onnx.common.data_types import FloatTensorType\r\n\r\n\r\n#import json\r\n#with open('fake_data.json', 'r') as f:\r\n# dataset = json.load(f)\r\n#docs = [doc for (doc, _) in dataset]\r\n#labels = [label for (_, label) in dataset]\r\n\r\ndata = [\r\n [\"schedule a meeting\", 0],\r\n [\"schedule a sync with the team\", 0],\r\n [\"slot in a meeting\", 0],\r\n [\"call ron\", 1],\r\n [\"make a phone call\", 1],\r\n [\"call in on the phone\", 2] # changing this from 2 to 1 will allow code to successfully run\r\n]\r\ndocs = [doc for (doc, _) in data]\r\nlabels = [label for (_, label) in data]\r\n\r\n\r\nvectorizer = TfidfVectorizer()\r\nvectorizer.fit_transform(docs)\r\nembeddings = vectorizer.transform(docs)\r\ndim = embeddings.shape[1]\r\n#embeddings = np.vstack(embeddings)\r\n\r\nclf = SVC()\r\nclf.fit(embeddings, labels)\r\n\r\ninitial_type = [('float_input', FloatTensorType([1, dim]))]\r\nonnx_model = convert_sklearn(clf, initial_types=initial_type) # this is line 37, where the crash occurs\r\nwith open('model.onnx', 'wb') as f:\r\n f.write(onnx_model.SerializeToString())\r\n```\r\n\r\nWhen I run the above, I get:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\common\\_container.py\", line 536, in add_node\r\n node = make_node(op_type, inputs, outputs, name=name,\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\proto\\onnx_helper_modified.py\", line 66, in make_node\r\n node.attribute.extend(\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\google\\protobuf\\internal\\containers.py\", line 410, in extend\r\n for message in elem_seq:\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\proto\\onnx_helper_modified.py\", line 67, in <genexpr>\r\n make_attribute(key, value, dtype=_dtype, domain=domain)\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\proto\\onnx_helper_modified.py\", line 175, in make_attribute\r\n raise ValueError(\r\nValueError: You passed in an iterable attribute but I cannot figure out its applicable type, key='coefficients', type=<class 'numpy.matrix'>, dtype=float32, types=[<class 'numpy.matrix'>, <class 'numpy.matrix'>].\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \".\\dust.py\", line 37, in <module>\r\n onnx_model = convert_sklearn(clf, initial_types=initial_type)\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\convert.py\", line 160, in convert_sklearn\r\n onnx_model = convert_topology(topology, name, doc_string, target_opset,\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\common\\_topology.py\", line 1069, in convert_topology\r\n conv(scope, operator, container)\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\common\\_registration.py\", line 29, in __call__\r\n return self._fct(*args)\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\operator_converters\\support_vector_machines.py\", line 214, in convert_sklearn_svm_classifier\r\n container.add_node(\r\n File \"C:\\Users\\Stefan Larson\\AppData\\Local\\Programs\\Python\\Python38\\lib\\site-packages\\skl2onnx\\common\\_container.py\", line 539, in add_node\r\n raise ValueError(\"Unable to create node '{}' with name='{}'.\"\r\nValueError: Unable to create node 'SVMClassifier' with name='SVMc'.\r\n```\r\n\r\nThe code crashes at the `convert_sklearn` function call. However, when I alter the dataset to have only two classes ('1', and '0'), the code runs successfully.\n", "before_files": [{"content": "# Modified file from\n# https://github.com/onnx/onnx/blob/master/onnx/helper.py.\nimport collections\nimport numbers\n\nfrom onnx import (\n TensorProto, AttributeProto,\n NodeProto, GraphProto\n)\nfrom onnx.helper import ( # noqa\n make_tensor, make_model, make_graph, _to_bytes_or_false,\n make_tensor_value_info, ValueInfoProto\n)\n\ntry:\n from onnx import SparseTensorProto\n from onnx.helper import make_sparse_tensor # noqa\nexcept ImportError:\n # onnx is too old.\n SparseTensorProto = None\n\nfrom onnx.numpy_helper import from_array # noqa\nfrom typing import (\n Text, Sequence, Any, Optional,\n List, cast\n)\nimport numpy as np # type: ignore\n\n\ndef make_node(\n op_type, # type: Text\n inputs, # type: Sequence[Text]\n outputs, # type: Sequence[Text]\n name=None, # type: Optional[Text]\n doc_string=None, # type: Optional[Text]\n domain=None, # type: Optional[Text]\n _dtype=None, # type: [np.float32, np.float64]\n **kwargs # type: Any\n ): # type: (...) -> NodeProto\n \"\"\"Construct a NodeProto.\n\n Arguments:\n op_type (string): The name of the operator to construct\n inputs (list of string): list of input names\n outputs (list of string): list of output names\n name (string, default None): optional unique identifier for NodeProto\n doc_string (string, default None): optional documentation\n string for NodeProto\n dtype: dtype for double used to infer\n domain (string, default None): optional domain for NodeProto.\n If it's None, we will just use default domain (which is empty)\n **kwargs (dict): the attributes of the node. The acceptable values\n are documented in :func:`make_attribute`.\n \"\"\"\n node = NodeProto()\n node.op_type = op_type\n node.input.extend(inputs)\n node.output.extend(outputs)\n if name:\n node.name = name\n if doc_string:\n node.doc_string = doc_string\n if domain is not None:\n node.domain = domain\n if kwargs:\n node.attribute.extend(\n make_attribute(key, value, dtype=_dtype, domain=domain)\n for key, value in sorted(kwargs.items()))\n return node\n\n\ndef make_attribute(\n key, # type: Text\n value, # type: Any\n dtype=None, # type: [np.float32, np.float64]\n domain='', # type: Text\n doc_string=None # type: Optional[Text]\n ): # type: (...) -> AttributeProto\n \"\"\"Makes an AttributeProto based on the value type.\"\"\"\n attr = AttributeProto()\n attr.name = key\n if doc_string:\n attr.doc_string = doc_string\n\n is_iterable = isinstance(value, collections.abc.Iterable)\n bytes_or_false = _to_bytes_or_false(value)\n\n use_float64 = dtype == np.float64 and domain not in ('', 'ai.onnx.ml')\n\n if isinstance(value, np.float32):\n attr.f = value\n attr.type = AttributeProto.FLOAT\n elif isinstance(value, (float, np.float64)):\n if use_float64:\n attr.type = AttributeProto.TENSOR\n attr.t.CopyFrom(\n make_tensor(\n key, TensorProto.DOUBLE, (1, ), [value]))\n else:\n attr.f = value\n attr.type = AttributeProto.FLOAT\n elif isinstance(value, np.int32):\n attr.i = value\n attr.type = AttributeProto.INT\n elif isinstance(value, np.int64):\n attr.i = value\n attr.type = AttributeProto.INT\n elif isinstance(value, numbers.Integral):\n attr.i = value\n attr.type = AttributeProto.INT\n # string\n elif bytes_or_false is not False:\n assert isinstance(bytes_or_false, bytes)\n attr.s = bytes_or_false\n attr.type = AttributeProto.STRING\n elif isinstance(value, TensorProto):\n attr.t.CopyFrom(value)\n attr.type = AttributeProto.TENSOR\n elif (SparseTensorProto is not None and\n isinstance(value, SparseTensorProto)):\n attr.sparse_tensor.CopyFrom(value)\n attr.type = AttributeProto.SPARSE_TENSOR\n elif isinstance(value, GraphProto):\n attr.g.CopyFrom(value)\n attr.type = AttributeProto.GRAPH\n # third, iterable cases\n elif is_iterable:\n byte_array = [_to_bytes_or_false(v) for v in value]\n if all(isinstance(v, np.float32) for v in value):\n attr.floats.extend(value)\n attr.type = AttributeProto.FLOATS\n elif all(isinstance(v, np.float64) for v in value):\n if use_float64:\n attr.type = AttributeProto.TENSOR\n attr.t.CopyFrom(\n make_tensor(\n key, TensorProto.DOUBLE, (len(value), ), value))\n else:\n attr.floats.extend(value)\n attr.type = AttributeProto.FLOATS\n elif all(isinstance(v, float) for v in value):\n if use_float64:\n attr.type = AttributeProto.TENSOR\n attr.t.CopyFrom(\n make_tensor(\n key, TensorProto.DOUBLE, (len(value), ), value))\n else:\n attr.floats.extend(value)\n attr.type = AttributeProto.FLOATS\n elif all(isinstance(v, np.int32) for v in value):\n attr.ints.extend(int(v) for v in value)\n attr.type = AttributeProto.INTS\n elif all(isinstance(v, np.int64) for v in value):\n attr.ints.extend(int(v) for v in value)\n attr.type = AttributeProto.INTS\n elif all(isinstance(v, numbers.Integral) for v in value):\n # Turn np.int32/64 into Python built-in int.\n attr.ints.extend(int(v) for v in value)\n attr.type = AttributeProto.INTS\n elif all(map(lambda bytes_or_false: bytes_or_false is not False,\n byte_array)):\n attr.strings.extend(cast(List[bytes], byte_array))\n attr.type = AttributeProto.STRINGS\n elif all(isinstance(v, TensorProto) for v in value):\n attr.tensors.extend(value)\n attr.type = AttributeProto.TENSORS\n elif (SparseTensorProto is not None and\n all(isinstance(v, SparseTensorProto) for v in value)):\n attr.sparse_tensors.extend(value)\n attr.type = AttributeProto.SPARSE_TENSORS\n elif all(isinstance(v, GraphProto) for v in value):\n attr.graphs.extend(value)\n attr.type = AttributeProto.GRAPHS\n else:\n raise ValueError(\n \"You passed in an iterable attribute but I cannot figure out \"\n \"its applicable type, key='{}', type={}, dtype={}, \"\n \"types={}.\".format(\n key, type(value), dtype,\n [type(_) for _, __ in zip(value, range(0, 5))]))\n else:\n raise ValueError(\n \"Value '{}' is not valid attribute data type for attribute \"\n \"'{}'.\".format(value, key))\n return attr\n\n\ndef get_attribute_value(attr): # type: (AttributeProto) -> Any\n if attr.type == AttributeProto.FLOAT:\n return attr.f\n elif attr.type == AttributeProto.INT:\n return attr.i\n elif attr.type == AttributeProto.STRING:\n return attr.s\n elif attr.type == AttributeProto.TENSOR:\n return attr.t\n elif attr.type == AttributeProto.GRAPH:\n return attr.g\n elif attr.type == AttributeProto.FLOATS:\n return list(attr.floats)\n elif attr.type == AttributeProto.INTS:\n return list(attr.ints)\n elif attr.type == AttributeProto.STRINGS:\n return list(attr.strings)\n elif attr.type == AttributeProto.TENSORS:\n return list(attr.tensors)\n elif attr.type == AttributeProto.GRAPHS:\n return list(attr.graphs)\n else:\n raise ValueError(\"Unsupported ONNX attribute: {}\".format(attr))\n", "path": "skl2onnx/proto/onnx_helper_modified.py"}]}
| 4,028 | 165 |
gh_patches_debug_18105
|
rasdani/github-patches
|
git_diff
|
tobymao__sqlglot-2523
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Transpiling built-in functions is not accurate.
**Before you file an issue**
- updated to 19.0.3
**Fully reproducible code snippet**
I am transpiling Oracle SQL to Postgres. There are multiple functions that fail when executed.
Here is an example.
`sql_in_oracle = "to_char(123)"`
`sqlglot.transpile(sql_in_oracle, read='oracle', write='postgres')`
Output: `['to_char(123)']`
Expected: `['123::text']` or whatever is valid in Postgres
Postgres supports `to_char(expr, format)`, but not `to_char(expr)`.
I have many functions that cannot be tranpiled correctly in my query list, such as,
Truncate datetime to day:
TRUNC(datetime) -- Valid Oracle
Output: TRUNC(datetime) -- Invalid Postgres
Expected: DATE_TRUNC('day', datetime) -- Valid postgres
**Official Documentation**
[sqlines](https://sqlines.com/oracle-to-postgresql) has an online tool I tried once. On its website, it lists many migration conversions for different databases. It might be a helpful resource for you to check out. Thanks.
</issue>
<code>
[start of sqlglot/dialects/oracle.py]
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10 if t.TYPE_CHECKING:
11 from sqlglot._typing import E
12
13
14 def _parse_xml_table(self: Oracle.Parser) -> exp.XMLTable:
15 this = self._parse_string()
16
17 passing = None
18 columns = None
19
20 if self._match_text_seq("PASSING"):
21 # The BY VALUE keywords are optional and are provided for semantic clarity
22 self._match_text_seq("BY", "VALUE")
23 passing = self._parse_csv(self._parse_column)
24
25 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
26
27 if self._match_text_seq("COLUMNS"):
28 columns = self._parse_csv(self._parse_field_def)
29
30 return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)
31
32
33 class Oracle(Dialect):
34 ALIAS_POST_TABLESAMPLE = True
35 LOCKING_READS_SUPPORTED = True
36
37 # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm
38 RESOLVES_IDENTIFIERS_AS_UPPERCASE = True
39 ALTER_TABLE_ADD_COLUMN_KEYWORD = False
40
41 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
42 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
43 TIME_MAPPING = {
44 "AM": "%p", # Meridian indicator with or without periods
45 "A.M.": "%p", # Meridian indicator with or without periods
46 "PM": "%p", # Meridian indicator with or without periods
47 "P.M.": "%p", # Meridian indicator with or without periods
48 "D": "%u", # Day of week (1-7)
49 "DAY": "%A", # name of day
50 "DD": "%d", # day of month (1-31)
51 "DDD": "%j", # day of year (1-366)
52 "DY": "%a", # abbreviated name of day
53 "HH": "%I", # Hour of day (1-12)
54 "HH12": "%I", # alias for HH
55 "HH24": "%H", # Hour of day (0-23)
56 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
57 "MI": "%M", # Minute (0-59)
58 "MM": "%m", # Month (01-12; January = 01)
59 "MON": "%b", # Abbreviated name of month
60 "MONTH": "%B", # Name of month
61 "SS": "%S", # Second (0-59)
62 "WW": "%W", # Week of year (1-53)
63 "YY": "%y", # 15
64 "YYYY": "%Y", # 2015
65 }
66
67 class Parser(parser.Parser):
68 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
69
70 FUNCTIONS = {
71 **parser.Parser.FUNCTIONS,
72 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
73 }
74
75 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
76 **parser.Parser.FUNCTION_PARSERS,
77 "JSON_ARRAY": lambda self: self._parse_json_array(
78 exp.JSONArray,
79 expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())),
80 ),
81 "JSON_ARRAYAGG": lambda self: self._parse_json_array(
82 exp.JSONArrayAgg,
83 this=self._parse_format_json(self._parse_bitwise()),
84 order=self._parse_order(),
85 ),
86 "XMLTABLE": _parse_xml_table,
87 }
88
89 TYPE_LITERAL_PARSERS = {
90 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
91 exp.DateStrToDate, this=this
92 )
93 }
94
95 # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..
96 # Reference: https://stackoverflow.com/a/336455
97 DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}
98
99 def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
100 return self.expression(
101 expr_type,
102 null_handling=self._parse_on_handling("NULL", "NULL", "ABSENT"),
103 return_type=self._match_text_seq("RETURNING") and self._parse_type(),
104 strict=self._match_text_seq("STRICT"),
105 **kwargs,
106 )
107
108 def _parse_column(self) -> t.Optional[exp.Expression]:
109 column = super()._parse_column()
110 if column:
111 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
112 return column
113
114 def _parse_hint(self) -> t.Optional[exp.Hint]:
115 if self._match(TokenType.HINT):
116 start = self._curr
117 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
118 self._advance()
119
120 if not self._curr:
121 self.raise_error("Expected */ after HINT")
122
123 end = self._tokens[self._index - 3]
124 return exp.Hint(expressions=[self._find_sql(start, end)])
125
126 return None
127
128 class Generator(generator.Generator):
129 LOCKING_READS_SUPPORTED = True
130 JOIN_HINTS = False
131 TABLE_HINTS = False
132 COLUMN_JOIN_MARKS_SUPPORTED = True
133 DATA_TYPE_SPECIFIERS_ALLOWED = True
134 ALTER_TABLE_ADD_COLUMN_KEYWORD = False
135
136 LIMIT_FETCH = "FETCH"
137
138 TYPE_MAPPING = {
139 **generator.Generator.TYPE_MAPPING,
140 exp.DataType.Type.TINYINT: "NUMBER",
141 exp.DataType.Type.SMALLINT: "NUMBER",
142 exp.DataType.Type.INT: "NUMBER",
143 exp.DataType.Type.BIGINT: "NUMBER",
144 exp.DataType.Type.DECIMAL: "NUMBER",
145 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
146 exp.DataType.Type.VARCHAR: "VARCHAR2",
147 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
148 exp.DataType.Type.NCHAR: "NCHAR",
149 exp.DataType.Type.TEXT: "CLOB",
150 exp.DataType.Type.BINARY: "BLOB",
151 exp.DataType.Type.VARBINARY: "BLOB",
152 }
153
154 TRANSFORMS = {
155 **generator.Generator.TRANSFORMS,
156 exp.DateStrToDate: lambda self, e: self.func(
157 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
158 ),
159 exp.Group: transforms.preprocess([transforms.unalias_group]),
160 exp.ILike: no_ilike_sql,
161 exp.Select: transforms.preprocess(
162 [
163 transforms.eliminate_distinct_on,
164 transforms.eliminate_qualify,
165 ]
166 ),
167 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
168 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
169 exp.Substring: rename_func("SUBSTR"),
170 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
171 exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=" "),
172 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
173 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
174 exp.Trim: trim_sql,
175 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
176 }
177
178 PROPERTIES_LOCATION = {
179 **generator.Generator.PROPERTIES_LOCATION,
180 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
181 }
182
183 def offset_sql(self, expression: exp.Offset) -> str:
184 return f"{super().offset_sql(expression)} ROWS"
185
186 def xmltable_sql(self, expression: exp.XMLTable) -> str:
187 this = self.sql(expression, "this")
188 passing = self.expressions(expression, key="passing")
189 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
190 columns = self.expressions(expression, key="columns")
191 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
192 by_ref = (
193 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
194 )
195 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
196
197 def add_column_sql(self, expression: exp.AlterTable) -> str:
198 actions = self.expressions(expression, key="actions", flat=True)
199 if len(expression.args.get("actions", [])) > 1:
200 return f"ADD ({actions})"
201 return f"ADD {actions}"
202
203 class Tokenizer(tokens.Tokenizer):
204 VAR_SINGLE_TOKENS = {"@", "$", "#"}
205
206 KEYWORDS = {
207 **tokens.Tokenizer.KEYWORDS,
208 "(+)": TokenType.JOIN_MARKER,
209 "BINARY_DOUBLE": TokenType.DOUBLE,
210 "BINARY_FLOAT": TokenType.FLOAT,
211 "COLUMNS": TokenType.COLUMN,
212 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
213 "MINUS": TokenType.EXCEPT,
214 "NVARCHAR2": TokenType.NVARCHAR,
215 "SAMPLE": TokenType.TABLE_SAMPLE,
216 "START": TokenType.BEGIN,
217 "TOP": TokenType.TOP,
218 "VARCHAR2": TokenType.VARCHAR,
219 }
220
[end of sqlglot/dialects/oracle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -3,7 +3,13 @@
import typing as t
from sqlglot import exp, generator, parser, tokens, transforms
-from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
+from sqlglot.dialects.dialect import (
+ Dialect,
+ format_time_lambda,
+ no_ilike_sql,
+ rename_func,
+ trim_sql,
+)
from sqlglot.helper import seq_get
from sqlglot.tokens import TokenType
@@ -70,6 +76,7 @@
FUNCTIONS = {
**parser.Parser.FUNCTIONS,
"SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
+ "TO_CHAR": format_time_lambda(exp.TimeToStr, "oracle", default=True),
}
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
|
{"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -3,7 +3,13 @@\n import typing as t\n \n from sqlglot import exp, generator, parser, tokens, transforms\n-from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\n+from sqlglot.dialects.dialect import (\n+ Dialect,\n+ format_time_lambda,\n+ no_ilike_sql,\n+ rename_func,\n+ trim_sql,\n+)\n from sqlglot.helper import seq_get\n from sqlglot.tokens import TokenType\n \n@@ -70,6 +76,7 @@\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n+ \"TO_CHAR\": format_time_lambda(exp.TimeToStr, \"oracle\", default=True),\n }\n \n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n", "issue": "[Bug] Transpiling built-in functions is not accurate.\n**Before you file an issue**\r\n- updated to 19.0.3\r\n\r\n**Fully reproducible code snippet**\r\nI am transpiling Oracle SQL to Postgres. There are multiple functions that fail when executed.\r\nHere is an example.\r\n`sql_in_oracle = \"to_char(123)\"`\r\n`sqlglot.transpile(sql_in_oracle, read='oracle', write='postgres')`\r\n\r\nOutput: `['to_char(123)']`\r\nExpected: `['123::text']` or whatever is valid in Postgres\r\nPostgres supports `to_char(expr, format)`, but not `to_char(expr)`.\r\n\r\nI have many functions that cannot be tranpiled correctly in my query list, such as,\r\n\r\nTruncate\u00a0datetime\u00a0to day:\r\nTRUNC(datetime) -- Valid Oracle\r\nOutput: TRUNC(datetime) -- Invalid Postgres\r\nExpected: DATE_TRUNC('day',\u00a0datetime) -- Valid postgres\r\n\r\n\r\n\r\n**Official Documentation**\r\n[sqlines](https://sqlines.com/oracle-to-postgresql) has an online tool I tried once. On its website, it lists many migration conversions for different databases. It might be a helpful resource for you to check out. Thanks.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\nif t.TYPE_CHECKING:\n from sqlglot._typing import E\n\n\ndef _parse_xml_table(self: Oracle.Parser) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(self._parse_field_def)\n\n return self.expression(exp.XMLTable, this=this, passing=passing, columns=columns, by_ref=by_ref)\n\n\nclass Oracle(Dialect):\n ALIAS_POST_TABLESAMPLE = True\n LOCKING_READS_SUPPORTED = True\n\n # See section 8: https://docs.oracle.com/cd/A97630_01/server.920/a96540/sql_elements9a.htm\n RESOLVES_IDENTIFIERS_AS_UPPERCASE = True\n ALTER_TABLE_ADD_COLUMN_KEYWORD = False\n\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n TIME_MAPPING = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS,\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"JSON_ARRAY\": lambda self: self._parse_json_array(\n exp.JSONArray,\n expressions=self._parse_csv(lambda: self._parse_format_json(self._parse_bitwise())),\n ),\n \"JSON_ARRAYAGG\": lambda self: self._parse_json_array(\n exp.JSONArrayAgg,\n this=self._parse_format_json(self._parse_bitwise()),\n order=self._parse_order(),\n ),\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n # SELECT UNIQUE .. is old-style Oracle syntax for SELECT DISTINCT ..\n # Reference: https://stackoverflow.com/a/336455\n DISTINCT_TOKENS = {TokenType.DISTINCT, TokenType.UNIQUE}\n\n def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:\n return self.expression(\n expr_type,\n null_handling=self._parse_on_handling(\"NULL\", \"NULL\", \"ABSENT\"),\n return_type=self._match_text_seq(\"RETURNING\") and self._parse_type(),\n strict=self._match_text_seq(\"STRICT\"),\n **kwargs,\n )\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Hint]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n COLUMN_JOIN_MARKS_SUPPORTED = True\n DATA_TYPE_SPECIFIERS_ALLOWED = True\n ALTER_TABLE_ADD_COLUMN_KEYWORD = False\n\n LIMIT_FETCH = \"FETCH\"\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING,\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.NCHAR: \"NCHAR\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS,\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.ILike: no_ilike_sql,\n exp.Select: transforms.preprocess(\n [\n transforms.eliminate_distinct_on,\n transforms.eliminate_qualify,\n ]\n ),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TableSample: lambda self, e: self.tablesample_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION,\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n def add_column_sql(self, expression: exp.AlterTable) -> str:\n actions = self.expressions(expression, key=\"actions\", flat=True)\n if len(expression.args.get(\"actions\", [])) > 1:\n return f\"ADD ({actions})\"\n return f\"ADD {actions}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\", \"$\", \"#\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"BINARY_DOUBLE\": TokenType.DOUBLE,\n \"BINARY_FLOAT\": TokenType.FLOAT,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"SAMPLE\": TokenType.TABLE_SAMPLE,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]}
| 3,585 | 252 |
gh_patches_debug_9103
|
rasdani/github-patches
|
git_diff
|
opsdroid__opsdroid-30
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Copy message on respond
When a message responds it updates it's `text` value and passes itself to the connector. Due to pointers in Python the next rule to parse the message goes on to parse the response text.
The message respond method should create a shallow copy of itself to pass to the connector, instead of updating itself directly.
</issue>
<code>
[start of opsdroid/message.py]
1 """Class to encapsulate a message."""
2
3
4 class Message:
5 # pylint: disable=too-few-public-methods
6 """A message object."""
7
8 def __init__(self, text, user, room, connector):
9 """Create object with minimum properties."""
10 self.text = text
11 self.user = user
12 self.room = room
13 self.connector = connector
14 self.regex = None
15
16 def respond(self, text):
17 """Respond to this message using the connector it was created by."""
18 self.text = text
19 self.connector.respond(self)
20
[end of opsdroid/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/opsdroid/message.py b/opsdroid/message.py
--- a/opsdroid/message.py
+++ b/opsdroid/message.py
@@ -1,5 +1,7 @@
"""Class to encapsulate a message."""
+from copy import copy
+
class Message:
# pylint: disable=too-few-public-methods
@@ -15,5 +17,6 @@
def respond(self, text):
"""Respond to this message using the connector it was created by."""
- self.text = text
- self.connector.respond(self)
+ response = copy(self)
+ response.text = text
+ self.connector.respond(response)
|
{"golden_diff": "diff --git a/opsdroid/message.py b/opsdroid/message.py\n--- a/opsdroid/message.py\n+++ b/opsdroid/message.py\n@@ -1,5 +1,7 @@\n \"\"\"Class to encapsulate a message.\"\"\"\n \n+from copy import copy\n+\n \n class Message:\n # pylint: disable=too-few-public-methods\n@@ -15,5 +17,6 @@\n \n def respond(self, text):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n- self.text = text\n- self.connector.respond(self)\n+ response = copy(self)\n+ response.text = text\n+ self.connector.respond(response)\n", "issue": "Copy message on respond\nWhen a message responds it updates it's `text` value and passes itself to the connector. Due to pointers in Python the next rule to parse the message goes on to parse the response text.\n\nThe message respond method should create a shallow copy of itself to pass to the connector, instead of updating itself directly.\n\n", "before_files": [{"content": "\"\"\"Class to encapsulate a message.\"\"\"\n\n\nclass Message:\n # pylint: disable=too-few-public-methods\n \"\"\"A message object.\"\"\"\n\n def __init__(self, text, user, room, connector):\n \"\"\"Create object with minimum properties.\"\"\"\n self.text = text\n self.user = user\n self.room = room\n self.connector = connector\n self.regex = None\n\n def respond(self, text):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n self.text = text\n self.connector.respond(self)\n", "path": "opsdroid/message.py"}]}
| 754 | 148 |
gh_patches_debug_3200
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-3717
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2.0: AttributeError on editing user through admin
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): `next`
* Python version:
* Database platform, version:
* Middleware(s):
### Steps to Reproduce
In the Nautobot admin, navigate to a specific user to edit it.
<!-- What did you expect to happen? -->
### Expected Behavior
User edit page to load
<!-- What happened instead? -->
### Observed Behavior
```
AttributeError at /admin/users/user/9fc58549-2485-4f88-a4e5-ec4986fe644e/change/
'QuerySet' object has no attribute 'nocache'
```
</issue>
<code>
[start of nautobot/users/admin.py]
1 from django import forms
2 from django.contrib import admin
3 from django.contrib.auth import get_user_model
4 from django.contrib.auth.admin import UserAdmin as UserAdmin_
5 from django.contrib.auth.models import Group
6 from django.contrib.contenttypes.models import ContentType
7 from django.core.exceptions import FieldError, ValidationError
8 from django.db import models
9
10 from nautobot.core.admin import NautobotModelAdmin
11 from nautobot.extras.admin import order_content_types
12 from nautobot.users.models import AdminGroup, ObjectPermission, Token, User
13
14
15 #
16 # Inline models
17 #
18
19
20 class ObjectPermissionInline(admin.TabularInline):
21 exclude = None
22 extra = 3
23 readonly_fields = ["object_types", "actions", "constraints"]
24 verbose_name = "Permission"
25 verbose_name_plural = "Permissions"
26
27 def get_queryset(self, request):
28 return super().get_queryset(request).prefetch_related("objectpermission__object_types").nocache()
29
30 @staticmethod
31 def object_types(instance):
32 # Don't call .values_list() here because we want to reference the pre-fetched object_types
33 return ", ".join([ot.name for ot in instance.objectpermission.object_types.all()])
34
35 @staticmethod
36 def actions(instance):
37 return ", ".join(instance.objectpermission.actions)
38
39 @staticmethod
40 def constraints(instance):
41 return instance.objectpermission.constraints
42
43
44 class GroupObjectPermissionInline(ObjectPermissionInline):
45 model = AdminGroup.object_permissions.through
46
47
48 class UserObjectPermissionInline(ObjectPermissionInline):
49 model = get_user_model().object_permissions.through
50
51
52 #
53 # Users & groups
54 #
55
56 # Unregister the built-in GroupAdmin class so that we can use our custom admin class below
57 admin.site.unregister(Group)
58
59
60 @admin.register(AdminGroup)
61 class GroupAdmin(NautobotModelAdmin):
62 fields = ("name",)
63 list_display = ("name", "user_count")
64 ordering = ("name",)
65 search_fields = ("name",)
66 inlines = [GroupObjectPermissionInline]
67
68 @staticmethod
69 def user_count(obj):
70 return obj.user_set.count()
71
72
73 @admin.register(User)
74 class UserAdmin(UserAdmin_):
75 list_display = [
76 "username",
77 "email",
78 "first_name",
79 "last_name",
80 "is_superuser",
81 "is_staff",
82 "is_active",
83 ]
84 fieldsets = (
85 (
86 None,
87 {"fields": ("username", "password", "first_name", "last_name", "email")},
88 ),
89 ("Groups", {"fields": ("groups",)}),
90 (
91 "Status",
92 {
93 "fields": ("is_active", "is_staff", "is_superuser"),
94 },
95 ),
96 ("Important dates", {"fields": ("last_login", "date_joined")}),
97 ("User Preferences", {"fields": ("config_data",)}),
98 )
99 filter_horizontal = ("groups",)
100 formfield_overrides = NautobotModelAdmin.formfield_overrides
101 readonly_fields = ("config_data",)
102
103 def get_inlines(self, request, obj):
104 if obj is not None:
105 return (UserObjectPermissionInline,)
106 return ()
107
108
109 #
110 # REST API tokens
111 #
112
113
114 class TokenAdminForm(forms.ModelForm):
115 key = forms.CharField(
116 required=False,
117 help_text="If no key is provided, one will be generated automatically.",
118 )
119
120 class Meta:
121 fields = ["user", "key", "write_enabled", "expires", "description"]
122 model = Token
123
124
125 @admin.register(Token)
126 class TokenAdmin(NautobotModelAdmin):
127 form = TokenAdminForm
128 list_display = ["key", "user", "created", "expires", "write_enabled", "description"]
129
130
131 #
132 # Permissions
133 #
134
135
136 class ObjectPermissionForm(forms.ModelForm):
137 can_view = forms.BooleanField(required=False)
138 can_add = forms.BooleanField(required=False)
139 can_change = forms.BooleanField(required=False)
140 can_delete = forms.BooleanField(required=False)
141
142 class Meta:
143 model = ObjectPermission
144 exclude = []
145 help_texts = {
146 "actions": "Actions granted in addition to those listed above",
147 "constraints": "JSON expression of a queryset filter that will return only permitted objects. Leave null "
148 "to match all objects of this type. A list of multiple objects will result in a logical OR "
149 "operation.",
150 }
151 labels = {"actions": "Additional actions"}
152 widgets = {"constraints": forms.Textarea(attrs={"class": "vLargeTextField"})}
153
154 def __init__(self, *args, **kwargs):
155 super().__init__(*args, **kwargs)
156
157 # Make the actions field optional since the admin form uses it only for non-CRUD actions
158 self.fields["actions"].required = False
159
160 # Format ContentType choices
161 order_content_types(self.fields["object_types"])
162 self.fields["object_types"].choices.insert(0, ("", "---------"))
163
164 # Order group and user fields
165 self.fields["groups"].queryset = self.fields["groups"].queryset.order_by("name")
166 self.fields["users"].queryset = self.fields["users"].queryset.order_by("username")
167
168 # Check the appropriate checkboxes when editing an existing ObjectPermission
169 if self.instance.present_in_database:
170 for action in ["view", "add", "change", "delete"]:
171 if action in self.instance.actions:
172 self.fields[f"can_{action}"].initial = True
173 self.instance.actions.remove(action)
174
175 def clean(self):
176 super().clean()
177
178 object_types = self.cleaned_data.get("object_types")
179 constraints = self.cleaned_data.get("constraints")
180
181 # Append any of the selected CRUD checkboxes to the actions list
182 if not self.cleaned_data.get("actions"):
183 self.cleaned_data["actions"] = []
184 for action in ["view", "add", "change", "delete"]:
185 if self.cleaned_data[f"can_{action}"] and action not in self.cleaned_data["actions"]:
186 self.cleaned_data["actions"].append(action)
187
188 # At least one action must be specified
189 if not self.cleaned_data["actions"]:
190 raise ValidationError("At least one action must be selected.")
191
192 # Validate the specified model constraints by attempting to execute a query. We don't care whether the query
193 # returns anything; we just want to make sure the specified constraints are valid.
194 if object_types and constraints:
195 # Normalize the constraints to a list of dicts
196 if not isinstance(constraints, list):
197 constraints = [constraints]
198 for ct in object_types:
199 model = ct.model_class()
200 try:
201 model.objects.filter(*[models.Q(**c) for c in constraints]).exists()
202 except FieldError as e:
203 raise ValidationError({"constraints": f"Invalid filter for {model}: {e}"})
204
205
206 class ActionListFilter(admin.SimpleListFilter):
207 title = "action"
208 parameter_name = "action"
209
210 def lookups(self, request, model_admin):
211 options = set()
212 for action_list in ObjectPermission.objects.values_list("actions", flat=True).distinct():
213 options.update(action_list)
214 return [(action, action) for action in sorted(options)]
215
216 def queryset(self, request, queryset):
217 if self.value():
218 return queryset.filter(actions=[self.value()])
219 return None
220
221
222 class ObjectTypeListFilter(admin.SimpleListFilter):
223 title = "object type"
224 parameter_name = "object_type"
225
226 def lookups(self, request, model_admin):
227 object_types = ObjectPermission.objects.values_list("object_types__pk", flat=True).distinct()
228 content_types = ContentType.objects.filter(pk__in=object_types).order_by("app_label", "model")
229 return [(ct.pk, ct) for ct in content_types]
230
231 def queryset(self, request, queryset):
232 if self.value():
233 return queryset.filter(object_types=self.value())
234 return None
235
236
237 @admin.register(ObjectPermission)
238 class ObjectPermissionAdmin(NautobotModelAdmin):
239 actions = ("enable", "disable")
240 fieldsets = (
241 (None, {"fields": ("name", "description", "enabled")}),
242 (
243 "Actions",
244 {
245 "fields": (
246 ("can_view", "can_add", "can_change", "can_delete"),
247 "actions",
248 )
249 },
250 ),
251 ("Objects", {"fields": ("object_types",)}),
252 ("Assignment", {"fields": ("groups", "users")}),
253 ("Constraints", {"fields": ("constraints",), "classes": ("monospace",)}),
254 )
255 filter_horizontal = ("object_types", "groups", "users")
256 form = ObjectPermissionForm
257 list_display = [
258 "name",
259 "enabled",
260 "list_models",
261 "list_users",
262 "list_groups",
263 "actions",
264 "constraints",
265 "description",
266 ]
267 list_filter = ["enabled", ActionListFilter, ObjectTypeListFilter, "groups", "users"]
268 search_fields = ["actions", "constraints", "description", "name"]
269
270 def get_queryset(self, request):
271 return super().get_queryset(request).prefetch_related("object_types", "users", "groups")
272
273 def list_models(self, obj):
274 return ", ".join([f"{ct}" for ct in obj.object_types.all()])
275
276 list_models.short_description = "Models"
277
278 def list_users(self, obj):
279 return ", ".join([u.username for u in obj.users.all()])
280
281 list_users.short_description = "Users"
282
283 def list_groups(self, obj):
284 return ", ".join([g.name for g in obj.groups.all()])
285
286 list_groups.short_description = "Groups"
287
288 #
289 # Admin actions
290 #
291
292 def enable(self, request, queryset):
293 updated = queryset.update(enabled=True)
294 self.message_user(request, f"Enabled {updated} permissions")
295
296 def disable(self, request, queryset):
297 updated = queryset.update(enabled=False)
298 self.message_user(request, f"Disabled {updated} permissions")
299
[end of nautobot/users/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/users/admin.py b/nautobot/users/admin.py
--- a/nautobot/users/admin.py
+++ b/nautobot/users/admin.py
@@ -25,7 +25,7 @@
verbose_name_plural = "Permissions"
def get_queryset(self, request):
- return super().get_queryset(request).prefetch_related("objectpermission__object_types").nocache()
+ return super().get_queryset(request).prefetch_related("objectpermission__object_types")
@staticmethod
def object_types(instance):
|
{"golden_diff": "diff --git a/nautobot/users/admin.py b/nautobot/users/admin.py\n--- a/nautobot/users/admin.py\n+++ b/nautobot/users/admin.py\n@@ -25,7 +25,7 @@\n verbose_name_plural = \"Permissions\"\n \n def get_queryset(self, request):\n- return super().get_queryset(request).prefetch_related(\"objectpermission__object_types\").nocache()\n+ return super().get_queryset(request).prefetch_related(\"objectpermission__object_types\")\n \n @staticmethod\n def object_types(instance):\n", "issue": "2.0: AttributeError on editing user through admin\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): `next`\r\n* Python version:\r\n* Database platform, version:\r\n* Middleware(s):\r\n\r\n\r\n### Steps to Reproduce\r\nIn the Nautobot admin, navigate to a specific user to edit it.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nUser edit page to load\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\n```\r\nAttributeError at /admin/users/user/9fc58549-2485-4f88-a4e5-ec4986fe644e/change/\r\n\r\n'QuerySet' object has no attribute 'nocache'\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.contrib import admin\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.admin import UserAdmin as UserAdmin_\nfrom django.contrib.auth.models import Group\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import FieldError, ValidationError\nfrom django.db import models\n\nfrom nautobot.core.admin import NautobotModelAdmin\nfrom nautobot.extras.admin import order_content_types\nfrom nautobot.users.models import AdminGroup, ObjectPermission, Token, User\n\n\n#\n# Inline models\n#\n\n\nclass ObjectPermissionInline(admin.TabularInline):\n exclude = None\n extra = 3\n readonly_fields = [\"object_types\", \"actions\", \"constraints\"]\n verbose_name = \"Permission\"\n verbose_name_plural = \"Permissions\"\n\n def get_queryset(self, request):\n return super().get_queryset(request).prefetch_related(\"objectpermission__object_types\").nocache()\n\n @staticmethod\n def object_types(instance):\n # Don't call .values_list() here because we want to reference the pre-fetched object_types\n return \", \".join([ot.name for ot in instance.objectpermission.object_types.all()])\n\n @staticmethod\n def actions(instance):\n return \", \".join(instance.objectpermission.actions)\n\n @staticmethod\n def constraints(instance):\n return instance.objectpermission.constraints\n\n\nclass GroupObjectPermissionInline(ObjectPermissionInline):\n model = AdminGroup.object_permissions.through\n\n\nclass UserObjectPermissionInline(ObjectPermissionInline):\n model = get_user_model().object_permissions.through\n\n\n#\n# Users & groups\n#\n\n# Unregister the built-in GroupAdmin class so that we can use our custom admin class below\nadmin.site.unregister(Group)\n\n\[email protected](AdminGroup)\nclass GroupAdmin(NautobotModelAdmin):\n fields = (\"name\",)\n list_display = (\"name\", \"user_count\")\n ordering = (\"name\",)\n search_fields = (\"name\",)\n inlines = [GroupObjectPermissionInline]\n\n @staticmethod\n def user_count(obj):\n return obj.user_set.count()\n\n\[email protected](User)\nclass UserAdmin(UserAdmin_):\n list_display = [\n \"username\",\n \"email\",\n \"first_name\",\n \"last_name\",\n \"is_superuser\",\n \"is_staff\",\n \"is_active\",\n ]\n fieldsets = (\n (\n None,\n {\"fields\": (\"username\", \"password\", \"first_name\", \"last_name\", \"email\")},\n ),\n (\"Groups\", {\"fields\": (\"groups\",)}),\n (\n \"Status\",\n {\n \"fields\": (\"is_active\", \"is_staff\", \"is_superuser\"),\n },\n ),\n (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n (\"User Preferences\", {\"fields\": (\"config_data\",)}),\n )\n filter_horizontal = (\"groups\",)\n formfield_overrides = NautobotModelAdmin.formfield_overrides\n readonly_fields = (\"config_data\",)\n\n def get_inlines(self, request, obj):\n if obj is not None:\n return (UserObjectPermissionInline,)\n return ()\n\n\n#\n# REST API tokens\n#\n\n\nclass TokenAdminForm(forms.ModelForm):\n key = forms.CharField(\n required=False,\n help_text=\"If no key is provided, one will be generated automatically.\",\n )\n\n class Meta:\n fields = [\"user\", \"key\", \"write_enabled\", \"expires\", \"description\"]\n model = Token\n\n\[email protected](Token)\nclass TokenAdmin(NautobotModelAdmin):\n form = TokenAdminForm\n list_display = [\"key\", \"user\", \"created\", \"expires\", \"write_enabled\", \"description\"]\n\n\n#\n# Permissions\n#\n\n\nclass ObjectPermissionForm(forms.ModelForm):\n can_view = forms.BooleanField(required=False)\n can_add = forms.BooleanField(required=False)\n can_change = forms.BooleanField(required=False)\n can_delete = forms.BooleanField(required=False)\n\n class Meta:\n model = ObjectPermission\n exclude = []\n help_texts = {\n \"actions\": \"Actions granted in addition to those listed above\",\n \"constraints\": \"JSON expression of a queryset filter that will return only permitted objects. Leave null \"\n \"to match all objects of this type. A list of multiple objects will result in a logical OR \"\n \"operation.\",\n }\n labels = {\"actions\": \"Additional actions\"}\n widgets = {\"constraints\": forms.Textarea(attrs={\"class\": \"vLargeTextField\"})}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Make the actions field optional since the admin form uses it only for non-CRUD actions\n self.fields[\"actions\"].required = False\n\n # Format ContentType choices\n order_content_types(self.fields[\"object_types\"])\n self.fields[\"object_types\"].choices.insert(0, (\"\", \"---------\"))\n\n # Order group and user fields\n self.fields[\"groups\"].queryset = self.fields[\"groups\"].queryset.order_by(\"name\")\n self.fields[\"users\"].queryset = self.fields[\"users\"].queryset.order_by(\"username\")\n\n # Check the appropriate checkboxes when editing an existing ObjectPermission\n if self.instance.present_in_database:\n for action in [\"view\", \"add\", \"change\", \"delete\"]:\n if action in self.instance.actions:\n self.fields[f\"can_{action}\"].initial = True\n self.instance.actions.remove(action)\n\n def clean(self):\n super().clean()\n\n object_types = self.cleaned_data.get(\"object_types\")\n constraints = self.cleaned_data.get(\"constraints\")\n\n # Append any of the selected CRUD checkboxes to the actions list\n if not self.cleaned_data.get(\"actions\"):\n self.cleaned_data[\"actions\"] = []\n for action in [\"view\", \"add\", \"change\", \"delete\"]:\n if self.cleaned_data[f\"can_{action}\"] and action not in self.cleaned_data[\"actions\"]:\n self.cleaned_data[\"actions\"].append(action)\n\n # At least one action must be specified\n if not self.cleaned_data[\"actions\"]:\n raise ValidationError(\"At least one action must be selected.\")\n\n # Validate the specified model constraints by attempting to execute a query. We don't care whether the query\n # returns anything; we just want to make sure the specified constraints are valid.\n if object_types and constraints:\n # Normalize the constraints to a list of dicts\n if not isinstance(constraints, list):\n constraints = [constraints]\n for ct in object_types:\n model = ct.model_class()\n try:\n model.objects.filter(*[models.Q(**c) for c in constraints]).exists()\n except FieldError as e:\n raise ValidationError({\"constraints\": f\"Invalid filter for {model}: {e}\"})\n\n\nclass ActionListFilter(admin.SimpleListFilter):\n title = \"action\"\n parameter_name = \"action\"\n\n def lookups(self, request, model_admin):\n options = set()\n for action_list in ObjectPermission.objects.values_list(\"actions\", flat=True).distinct():\n options.update(action_list)\n return [(action, action) for action in sorted(options)]\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(actions=[self.value()])\n return None\n\n\nclass ObjectTypeListFilter(admin.SimpleListFilter):\n title = \"object type\"\n parameter_name = \"object_type\"\n\n def lookups(self, request, model_admin):\n object_types = ObjectPermission.objects.values_list(\"object_types__pk\", flat=True).distinct()\n content_types = ContentType.objects.filter(pk__in=object_types).order_by(\"app_label\", \"model\")\n return [(ct.pk, ct) for ct in content_types]\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.filter(object_types=self.value())\n return None\n\n\[email protected](ObjectPermission)\nclass ObjectPermissionAdmin(NautobotModelAdmin):\n actions = (\"enable\", \"disable\")\n fieldsets = (\n (None, {\"fields\": (\"name\", \"description\", \"enabled\")}),\n (\n \"Actions\",\n {\n \"fields\": (\n (\"can_view\", \"can_add\", \"can_change\", \"can_delete\"),\n \"actions\",\n )\n },\n ),\n (\"Objects\", {\"fields\": (\"object_types\",)}),\n (\"Assignment\", {\"fields\": (\"groups\", \"users\")}),\n (\"Constraints\", {\"fields\": (\"constraints\",), \"classes\": (\"monospace\",)}),\n )\n filter_horizontal = (\"object_types\", \"groups\", \"users\")\n form = ObjectPermissionForm\n list_display = [\n \"name\",\n \"enabled\",\n \"list_models\",\n \"list_users\",\n \"list_groups\",\n \"actions\",\n \"constraints\",\n \"description\",\n ]\n list_filter = [\"enabled\", ActionListFilter, ObjectTypeListFilter, \"groups\", \"users\"]\n search_fields = [\"actions\", \"constraints\", \"description\", \"name\"]\n\n def get_queryset(self, request):\n return super().get_queryset(request).prefetch_related(\"object_types\", \"users\", \"groups\")\n\n def list_models(self, obj):\n return \", \".join([f\"{ct}\" for ct in obj.object_types.all()])\n\n list_models.short_description = \"Models\"\n\n def list_users(self, obj):\n return \", \".join([u.username for u in obj.users.all()])\n\n list_users.short_description = \"Users\"\n\n def list_groups(self, obj):\n return \", \".join([g.name for g in obj.groups.all()])\n\n list_groups.short_description = \"Groups\"\n\n #\n # Admin actions\n #\n\n def enable(self, request, queryset):\n updated = queryset.update(enabled=True)\n self.message_user(request, f\"Enabled {updated} permissions\")\n\n def disable(self, request, queryset):\n updated = queryset.update(enabled=False)\n self.message_user(request, f\"Disabled {updated} permissions\")\n", "path": "nautobot/users/admin.py"}]}
| 3,717 | 117 |
gh_patches_debug_25188
|
rasdani/github-patches
|
git_diff
|
helmholtz-analytics__heat-115
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add unit tests for stride_tricks/broadcast_shape
</issue>
<code>
[start of heat/core/stride_tricks.py]
1 import itertools
2
3
4 def broadcast_shape(shape_a, shape_b):
5 """
6 Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post:
7 https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape
8
9 Parameters
10 ----------
11 shape_a : tuple of ints
12 shape of operand a
13 shape_b : tuple of ints
14 shape of operand b
15
16 Returns
17 -------
18 broadcast_shape : tuple of ints
19 the broadcast shape
20
21 Raises
22 -------
23 ValueError
24 If the two shapes cannot be broadcast.
25 """
26 #TODO: test me
27 it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)
28 resulting_shape = max(len(shape_a), len(shape_b)) * [None]
29 for i, (a, b) in enumerate(it):
30 if a == 1 or b == 1 or a == b:
31 resulting_shape[i] = max(a, b)
32 else:
33 raise ValueError('operands could not be broadcast, input shapes {} {}'.format(shape_a, shape_b))
34
35 return tuple(resulting_shape[::-1])
36
37
38 def sanitize_axis(shape, axis):
39 """
40 Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent
41 and is checked to be within bounds
42
43 Parameters
44 ----------
45 shape : tuple of ints
46 shape of an array
47 axis : ints
48 the axis to be sanitized
49
50 Returns
51 -------
52 sane_axis : int
53 the sane axis
54
55 Raises
56 -------
57 ValueError
58 if the axis cannot be sanitized, i.e. out of bounds.
59 TypeError
60 if the the axis is not integral.
61 """
62 #TODO: test me
63
64 if axis is not None:
65 if isinstance(axis, tuple):
66 raise NotImplementedError('Not implemented for axis: tuple of ints')
67 if not isinstance(axis, int):
68 raise TypeError('axis must be None or int, but was {}'.format(type(axis)))
69
70 if axis is None or 0 <= axis < len(shape):
71 return axis
72 elif axis < 0:
73 axis += len(shape)
74
75 if axis < 0 or axis >= len(shape):
76 raise ValueError('axis axis {} is out of bounds for shape {}'.format(axis, shape))
77
78 return axis
79
80
81 def sanitize_shape(shape):
82 """
83 Verifies and normalizes the given shape.
84
85 Parameters
86 ----------
87 shape : int or sequence of ints
88 Shape of an array.
89
90 Returns
91 -------
92 sane_shape : tuple of ints
93 The sanitized shape.
94
95 Raises
96 -------
97 ValueError
98 If the shape contains illegal values, e.g. negative numbers.
99 TypeError
100 If the given shape is neither and int or a sequence of ints.
101
102 Examples
103 --------
104 >>> sanitize_shape(3)
105 (3,)
106
107 >>> sanitize_shape([1, 2, 3])
108 (1, 2, 3,)
109
110 >>> sanitize_shape(1.0)
111 TypeError
112 """
113 shape = (shape,) if not hasattr(shape, '__iter__') else tuple(shape)
114
115 for dimension in shape:
116 if not isinstance(dimension, int):
117 raise TypeError('expected sequence object with length >= 0 or a single integer')
118 if dimension <= 0:
119 raise ValueError('negative dimensions are not allowed')
120
121 return shape
122
[end of heat/core/stride_tricks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py
--- a/heat/core/stride_tricks.py
+++ b/heat/core/stride_tricks.py
@@ -22,8 +22,22 @@
-------
ValueError
If the two shapes cannot be broadcast.
+
+ Examples
+ -------
+ >>> broadcast_shape((5,4),(4,))
+ (5,4)
+
+ >>> broadcast_shape((1,100,1),(10,1,5))
+ (10,100,5)
+
+ >>> broadcast_shape((8,1,6,1),(7,1,5,))
+ (8,7,6,5))
+
+ >>> broadcast_shape((2,1),(8,4,3))
+ ValueError
"""
- #TODO: test me
+
it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)
resulting_shape = max(len(shape_a), len(shape_b)) * [None]
for i, (a, b) in enumerate(it):
@@ -58,9 +72,23 @@
if the axis cannot be sanitized, i.e. out of bounds.
TypeError
if the the axis is not integral.
+
+ Examples
+ -------
+ >>> sanitize_axis((5,4,4),1)
+ 1
+
+ >>> sanitize_axis((5,4,4),-1)
+ 2
+
+ >>> sanitize_axis((5, 4), (1,))
+ NotImplementedError
+
+ >>> sanitize_axis((5, 4), 1.0)
+ TypeError
+
"""
- #TODO: test me
-
+
if axis is not None:
if isinstance(axis, tuple):
raise NotImplementedError('Not implemented for axis: tuple of ints')
|
{"golden_diff": "diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py\n--- a/heat/core/stride_tricks.py\n+++ b/heat/core/stride_tricks.py\n@@ -22,8 +22,22 @@\n -------\n ValueError\n If the two shapes cannot be broadcast.\n+\n+ Examples\n+ -------\n+ >>> broadcast_shape((5,4),(4,))\n+ (5,4)\n+\n+ >>> broadcast_shape((1,100,1),(10,1,5))\n+ (10,100,5)\n+\n+ >>> broadcast_shape((8,1,6,1),(7,1,5,))\n+ (8,7,6,5))\n+\n+ >>> broadcast_shape((2,1),(8,4,3))\n+ ValueError\n \"\"\"\n- #TODO: test me\n+\n it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)\n resulting_shape = max(len(shape_a), len(shape_b)) * [None]\n for i, (a, b) in enumerate(it):\n@@ -58,9 +72,23 @@\n if the axis cannot be sanitized, i.e. out of bounds.\n TypeError\n if the the axis is not integral.\n+\n+ Examples\n+ -------\n+ >>> sanitize_axis((5,4,4),1)\n+ 1\n+\n+ >>> sanitize_axis((5,4,4),-1)\n+ 2\n+\n+ >>> sanitize_axis((5, 4), (1,))\n+ NotImplementedError\n+\n+ >>> sanitize_axis((5, 4), 1.0)\n+ TypeError\n+\n \"\"\"\n- #TODO: test me\n- \n+\n if axis is not None:\n if isinstance(axis, tuple):\n raise NotImplementedError('Not implemented for axis: tuple of ints')\n", "issue": "Add unit tests for stride_tricks/broadcast_shape\n\n", "before_files": [{"content": "import itertools\n\n\ndef broadcast_shape(shape_a, shape_b):\n \"\"\"\n Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post:\n https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape\n\n Parameters\n ----------\n shape_a : tuple of ints\n shape of operand a\n shape_b : tuple of ints\n shape of operand b\n\n Returns\n -------\n broadcast_shape : tuple of ints\n the broadcast shape\n\n Raises\n -------\n ValueError\n If the two shapes cannot be broadcast.\n \"\"\"\n #TODO: test me\n it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)\n resulting_shape = max(len(shape_a), len(shape_b)) * [None]\n for i, (a, b) in enumerate(it):\n if a == 1 or b == 1 or a == b:\n resulting_shape[i] = max(a, b)\n else:\n raise ValueError('operands could not be broadcast, input shapes {} {}'.format(shape_a, shape_b))\n\n return tuple(resulting_shape[::-1])\n\n\ndef sanitize_axis(shape, axis):\n \"\"\"\n Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent\n and is checked to be within bounds\n\n Parameters\n ----------\n shape : tuple of ints\n shape of an array\n axis : ints\n the axis to be sanitized\n\n Returns\n -------\n sane_axis : int\n the sane axis\n\n Raises\n -------\n ValueError\n if the axis cannot be sanitized, i.e. out of bounds.\n TypeError\n if the the axis is not integral.\n \"\"\"\n #TODO: test me\n \n if axis is not None:\n if isinstance(axis, tuple):\n raise NotImplementedError('Not implemented for axis: tuple of ints')\n if not isinstance(axis, int):\n raise TypeError('axis must be None or int, but was {}'.format(type(axis)))\n\n if axis is None or 0 <= axis < len(shape):\n return axis\n elif axis < 0:\n axis += len(shape)\n\n if axis < 0 or axis >= len(shape):\n raise ValueError('axis axis {} is out of bounds for shape {}'.format(axis, shape))\n\n return axis\n\n\ndef sanitize_shape(shape):\n \"\"\"\n Verifies and normalizes the given shape.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of an array.\n\n Returns\n -------\n sane_shape : tuple of ints\n The sanitized shape.\n\n Raises\n -------\n ValueError\n If the shape contains illegal values, e.g. negative numbers.\n TypeError\n If the given shape is neither and int or a sequence of ints.\n\n Examples\n --------\n >>> sanitize_shape(3)\n (3,)\n\n >>> sanitize_shape([1, 2, 3])\n (1, 2, 3,)\n\n >>> sanitize_shape(1.0)\n TypeError\n \"\"\"\n shape = (shape,) if not hasattr(shape, '__iter__') else tuple(shape)\n\n for dimension in shape:\n if not isinstance(dimension, int):\n raise TypeError('expected sequence object with length >= 0 or a single integer')\n if dimension <= 0:\n raise ValueError('negative dimensions are not allowed')\n\n return shape\n", "path": "heat/core/stride_tricks.py"}]}
| 1,558 | 417 |
gh_patches_debug_34376
|
rasdani/github-patches
|
git_diff
|
ethereum__consensus-specs-1065
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add linter to CI
Add a new job to CI that uses `flake8` or something similar to lint the the pyspec after the python code is dumped to `spec.py`.
This will likely need a bit of configuration (`maxline=120`, etc) and some cleaning up.
</issue>
<code>
[start of scripts/phase0/function_puller.py]
1 import sys
2 from typing import List
3
4
5 def get_spec(file_name: str) -> List[str]:
6 code_lines = []
7 pulling_from = None
8 current_name = None
9 current_typedef = None
10 type_defs = []
11 for linenum, line in enumerate(open(sys.argv[1]).readlines()):
12 line = line.rstrip()
13 if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
14 current_name = line[line[:-1].rfind('`') + 1: -1]
15 if line[:9] == '```python':
16 assert pulling_from is None
17 pulling_from = linenum + 1
18 elif line[:3] == '```':
19 if pulling_from is None:
20 pulling_from = linenum
21 else:
22 if current_typedef is not None:
23 assert code_lines[-1] == '}'
24 code_lines[-1] = '})'
25 current_typedef[-1] = '})'
26 type_defs.append((current_name, current_typedef))
27 pulling_from = None
28 current_typedef = None
29 else:
30 if pulling_from == linenum and line == '{':
31 code_lines.append('%s = SSZType({' % current_name)
32 current_typedef = ['global_vars["%s"] = SSZType({' % current_name]
33 elif pulling_from is not None:
34 # Add some whitespace between functions
35 if line[:3] == 'def':
36 code_lines.append('')
37 code_lines.append('')
38 code_lines.append(line)
39 # Remember type def lines
40 if current_typedef is not None:
41 current_typedef.append(line)
42 elif pulling_from is None and len(line) > 0 and line[0] == '|':
43 row = line[1:].split('|')
44 if len(row) >= 2:
45 for i in range(2):
46 row[i] = row[i].strip().strip('`')
47 if '`' in row[i]:
48 row[i] = row[i][:row[i].find('`')]
49 eligible = True
50 if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
51 eligible = False
52 for c in row[0]:
53 if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
54 eligible = False
55 if eligible:
56 code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')))
57 # Build type-def re-initialization
58 code_lines.append('')
59 code_lines.append('def init_SSZ_types():')
60 code_lines.append(' global_vars = globals()')
61 for ssz_type_name, ssz_type in type_defs:
62 code_lines.append('')
63 for type_line in ssz_type:
64 code_lines.append(' ' + type_line)
65 code_lines.append('\n')
66 code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']')
67 code_lines.append('\n')
68 code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:')
69 code_lines.append(' return globals()[name]')
70 code_lines.append('')
71 return code_lines
72
[end of scripts/phase0/function_puller.py]
[start of scripts/phase0/build_spec.py]
1 import sys
2 import function_puller
3
4
5 def build_phase0_spec(sourcefile, outfile):
6 code_lines = []
7 code_lines.append("""
8 from typing import (
9 Any,
10 Dict,
11 List,
12 NewType,
13 Tuple,
14 )
15 from eth2spec.utils.minimal_ssz import *
16 from eth2spec.utils.bls_stub import *
17
18 # stub, will get overwritten by real var
19 SLOTS_PER_EPOCH = 64
20
21 Slot = NewType('Slot', int) # uint64
22 Epoch = NewType('Epoch', int) # uint64
23 Shard = NewType('Shard', int) # uint64
24 ValidatorIndex = NewType('ValidatorIndex', int) # uint64
25 Gwei = NewType('Gwei', int) # uint64
26 Bytes32 = NewType('Bytes32', bytes) # bytes32
27 BLSPubkey = NewType('BLSPubkey', bytes) # bytes48
28 BLSSignature = NewType('BLSSignature', bytes) # bytes96
29 Store = None
30 """)
31
32 code_lines += function_puller.get_spec(sourcefile)
33
34 code_lines.append("""
35 # Monkey patch validator compute committee code
36 _compute_committee = compute_committee
37 committee_cache = {}
38
39
40 def compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:
41 param_hash = (hash_tree_root(indices), seed, index, count)
42
43 if param_hash in committee_cache:
44 return committee_cache[param_hash]
45 else:
46 ret = _compute_committee(indices, seed, index, count)
47 committee_cache[param_hash] = ret
48 return ret
49
50
51 # Monkey patch hash cache
52 _hash = hash
53 hash_cache = {}
54
55
56 def hash(x):
57 if x in hash_cache:
58 return hash_cache[x]
59 else:
60 ret = _hash(x)
61 hash_cache[x] = ret
62 return ret
63
64 # Access to overwrite spec constants based on configuration
65 def apply_constants_preset(preset: Dict[str, Any]):
66 global_vars = globals()
67 for k, v in preset.items():
68 global_vars[k] = v
69
70 # Deal with derived constants
71 global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
72
73 # Initialize SSZ types again, to account for changed lengths
74 init_SSZ_types()
75 """)
76
77 with open(outfile, 'w') as out:
78 out.write("\n".join(code_lines))
79
80
81 if __name__ == '__main__':
82 if len(sys.argv) < 3:
83 print("Usage: <source phase0> <output phase0 pyspec>")
84 build_phase0_spec(sys.argv[1], sys.argv[2])
85
86
[end of scripts/phase0/build_spec.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py
--- a/scripts/phase0/build_spec.py
+++ b/scripts/phase0/build_spec.py
@@ -12,8 +12,18 @@
NewType,
Tuple,
)
-from eth2spec.utils.minimal_ssz import *
-from eth2spec.utils.bls_stub import *
+from eth2spec.utils.minimal_ssz import (
+ SSZType,
+ hash_tree_root,
+ signing_root,
+)
+from eth2spec.utils.bls_stub import (
+ bls_aggregate_pubkeys,
+ bls_verify,
+ bls_verify_multiple,
+)
+from eth2spec.utils.hash_function import hash
+
# stub, will get overwritten by real var
SLOTS_PER_EPOCH = 64
@@ -61,6 +71,7 @@
hash_cache[x] = ret
return ret
+
# Access to overwrite spec constants based on configuration
def apply_constants_preset(preset: Dict[str, Any]):
global_vars = globals()
diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py
--- a/scripts/phase0/function_puller.py
+++ b/scripts/phase0/function_puller.py
@@ -55,15 +55,19 @@
if eligible:
code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')))
# Build type-def re-initialization
- code_lines.append('')
+ code_lines.append('\n')
code_lines.append('def init_SSZ_types():')
code_lines.append(' global_vars = globals()')
for ssz_type_name, ssz_type in type_defs:
code_lines.append('')
for type_line in ssz_type:
- code_lines.append(' ' + type_line)
+ if len(type_line) > 0:
+ code_lines.append(' ' + type_line)
code_lines.append('\n')
- code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']')
+ code_lines.append('ssz_types = [\n')
+ for (ssz_type_name, _) in type_defs:
+ code_lines.append(f' {ssz_type_name},\n')
+ code_lines.append(']')
code_lines.append('\n')
code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:')
code_lines.append(' return globals()[name]')
|
{"golden_diff": "diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py\n--- a/scripts/phase0/build_spec.py\n+++ b/scripts/phase0/build_spec.py\n@@ -12,8 +12,18 @@\n NewType,\n Tuple,\n )\n-from eth2spec.utils.minimal_ssz import *\n-from eth2spec.utils.bls_stub import *\n+from eth2spec.utils.minimal_ssz import (\n+ SSZType,\n+ hash_tree_root,\n+ signing_root,\n+)\n+from eth2spec.utils.bls_stub import (\n+ bls_aggregate_pubkeys,\n+ bls_verify,\n+ bls_verify_multiple,\n+)\n+from eth2spec.utils.hash_function import hash\n+\n \n # stub, will get overwritten by real var\n SLOTS_PER_EPOCH = 64\n@@ -61,6 +71,7 @@\n hash_cache[x] = ret\n return ret\n \n+\n # Access to overwrite spec constants based on configuration\n def apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\ndiff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py\n--- a/scripts/phase0/function_puller.py\n+++ b/scripts/phase0/function_puller.py\n@@ -55,15 +55,19 @@\n if eligible:\n code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')))\n # Build type-def re-initialization\n- code_lines.append('')\n+ code_lines.append('\\n')\n code_lines.append('def init_SSZ_types():')\n code_lines.append(' global_vars = globals()')\n for ssz_type_name, ssz_type in type_defs:\n code_lines.append('')\n for type_line in ssz_type:\n- code_lines.append(' ' + type_line)\n+ if len(type_line) > 0:\n+ code_lines.append(' ' + type_line)\n code_lines.append('\\n')\n- code_lines.append('ssz_types = [' + ', '.join([f'\\'{ssz_type_name}\\'' for (ssz_type_name, _) in type_defs]) + ']')\n+ code_lines.append('ssz_types = [\\n')\n+ for (ssz_type_name, _) in type_defs:\n+ code_lines.append(f' {ssz_type_name},\\n')\n+ code_lines.append(']')\n code_lines.append('\\n')\n code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:')\n code_lines.append(' return globals()[name]')\n", "issue": "add linter to CI\nAdd a new job to CI that uses `flake8` or something similar to lint the the pyspec after the python code is dumped to `spec.py`.\r\n\r\nThis will likely need a bit of configuration (`maxline=120`, etc) and some cleaning up.\n", "before_files": [{"content": "import sys\nfrom typing import List\n\n\ndef get_spec(file_name: str) -> List[str]:\n code_lines = []\n pulling_from = None\n current_name = None\n current_typedef = None\n type_defs = []\n for linenum, line in enumerate(open(sys.argv[1]).readlines()):\n line = line.rstrip()\n if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':\n current_name = line[line[:-1].rfind('`') + 1: -1]\n if line[:9] == '```python':\n assert pulling_from is None\n pulling_from = linenum + 1\n elif line[:3] == '```':\n if pulling_from is None:\n pulling_from = linenum\n else:\n if current_typedef is not None:\n assert code_lines[-1] == '}'\n code_lines[-1] = '})'\n current_typedef[-1] = '})'\n type_defs.append((current_name, current_typedef))\n pulling_from = None\n current_typedef = None\n else:\n if pulling_from == linenum and line == '{':\n code_lines.append('%s = SSZType({' % current_name)\n current_typedef = ['global_vars[\"%s\"] = SSZType({' % current_name]\n elif pulling_from is not None:\n # Add some whitespace between functions\n if line[:3] == 'def':\n code_lines.append('')\n code_lines.append('')\n code_lines.append(line)\n # Remember type def lines\n if current_typedef is not None:\n current_typedef.append(line)\n elif pulling_from is None and len(line) > 0 and line[0] == '|':\n row = line[1:].split('|')\n if len(row) >= 2:\n for i in range(2):\n row[i] = row[i].strip().strip('`')\n if '`' in row[i]:\n row[i] = row[i][:row[i].find('`')]\n eligible = True\n if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':\n eligible = False\n for c in row[0]:\n if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':\n eligible = False\n if eligible:\n code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')))\n # Build type-def re-initialization\n code_lines.append('')\n code_lines.append('def init_SSZ_types():')\n code_lines.append(' global_vars = globals()')\n for ssz_type_name, ssz_type in type_defs:\n code_lines.append('')\n for type_line in ssz_type:\n code_lines.append(' ' + type_line)\n code_lines.append('\\n')\n code_lines.append('ssz_types = [' + ', '.join([f'\\'{ssz_type_name}\\'' for (ssz_type_name, _) in type_defs]) + ']')\n code_lines.append('\\n')\n code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType:')\n code_lines.append(' return globals()[name]')\n code_lines.append('')\n return code_lines\n", "path": "scripts/phase0/function_puller.py"}, {"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.bls_stub import *\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n", "path": "scripts/phase0/build_spec.py"}]}
| 2,274 | 614 |
gh_patches_debug_5756
|
rasdani/github-patches
|
git_diff
|
privacyidea__privacyidea-3411
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
logging documentation
Check the log configuration.
The yaml config seems to be wrong.
Is the indentation of `root:` correct?
https://privacyidea.readthedocs.io/en/master/installation/system/logging.html#debug-log
</issue>
<code>
[start of privacyidea/app.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2014-11-15 Cornelius Kölbel, [email protected]
4 # Initial creation
5 #
6 # (c) Cornelius Kölbel
7 # Info: http://www.privacyidea.org
8 #
9 # This code is free software; you can redistribute it and/or
10 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
11 # License as published by the Free Software Foundation; either
12 # version 3 of the License, or any later version.
13 #
14 # This code is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
18 #
19 # You should have received a copy of the GNU Affero General Public
20 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #
22 import os
23 import os.path
24 import logging
25 import logging.config
26 import sys
27 import yaml
28 from flask import Flask, request, Response
29 from flask_babel import Babel
30 from flask_migrate import Migrate
31 from flaskext.versioned import Versioned
32
33 # we need this import to add the before/after request function to the blueprints
34 import privacyidea.api.before_after
35 from privacyidea.api.validate import validate_blueprint
36 from privacyidea.api.token import token_blueprint
37 from privacyidea.api.system import system_blueprint
38 from privacyidea.api.resolver import resolver_blueprint
39 from privacyidea.api.realm import realm_blueprint
40 from privacyidea.api.realm import defaultrealm_blueprint
41 from privacyidea.api.policy import policy_blueprint
42 from privacyidea.api.user import user_blueprint
43 from privacyidea.api.audit import audit_blueprint
44 from privacyidea.api.application import application_blueprint
45 from privacyidea.api.caconnector import caconnector_blueprint
46 from privacyidea.api.register import register_blueprint
47 from privacyidea.api.auth import jwtauth
48 from privacyidea.webui.login import login_blueprint, get_accepted_language
49 from privacyidea.webui.certificate import cert_blueprint
50 from privacyidea.api.machineresolver import machineresolver_blueprint
51 from privacyidea.api.machine import machine_blueprint
52 from privacyidea.api.ttype import ttype_blueprint
53 from privacyidea.api.smtpserver import smtpserver_blueprint
54 from privacyidea.api.radiusserver import radiusserver_blueprint
55 from privacyidea.api.periodictask import periodictask_blueprint
56 from privacyidea.api.privacyideaserver import privacyideaserver_blueprint
57 from privacyidea.api.recover import recover_blueprint
58 from privacyidea.api.event import eventhandling_blueprint
59 from privacyidea.api.smsgateway import smsgateway_blueprint
60 from privacyidea.api.clienttype import client_blueprint
61 from privacyidea.api.subscriptions import subscriptions_blueprint
62 from privacyidea.api.monitoring import monitoring_blueprint
63 from privacyidea.api.tokengroup import tokengroup_blueprint
64 from privacyidea.lib import queue
65 from privacyidea.lib.log import DEFAULT_LOGGING_CONFIG
66 from privacyidea.config import config
67 from privacyidea.models import db
68 from privacyidea.lib.crypto import init_hsm
69
70
71 ENV_KEY = "PRIVACYIDEA_CONFIGFILE"
72
73
74 class PiResponseClass(Response):
75 """Custom Response class overwriting the flask.Response.
76 To avoid caching problems with the json property in the Response class,
77 the property is overwritten using a non-caching approach.
78 """
79 @property
80 def json(self):
81 """This will contain the parsed JSON data if the mimetype indicates
82 JSON (:mimetype:`application/json`, see :meth:`is_json`), otherwise it
83 will be ``None``.
84 Caching of the json data is disabled.
85 """
86 return self.get_json(cache=False)
87
88 default_mimetype = 'application/json'
89
90
91 def create_app(config_name="development",
92 config_file='/etc/privacyidea/pi.cfg',
93 silent=False, init_hsm=False):
94 """
95 First the configuration from the config.py is loaded depending on the
96 config type like "production" or "development" or "testing".
97
98 Then the environment variable PRIVACYIDEA_CONFIGFILE is checked for a
99 config file, that contains additional settings, that will overwrite the
100 default settings from config.py
101
102 :param config_name: The config name like "production" or "testing"
103 :type config_name: basestring
104 :param config_file: The name of a config file to read configuration from
105 :type config_file: basestring
106 :param silent: If set to True the additional information are not printed
107 to stdout
108 :type silent: bool
109 :param init_hsm: Whether the HSM should be initialized on app startup
110 :type init_hsm: bool
111 :return: The flask application
112 :rtype: App object
113 """
114 if not silent:
115 print("The configuration name is: {0!s}".format(config_name))
116 if os.environ.get(ENV_KEY):
117 config_file = os.environ[ENV_KEY]
118 if not silent:
119 print("Additional configuration will be read "
120 "from the file {0!s}".format(config_file))
121 app = Flask(__name__, static_folder="static",
122 template_folder="static/templates")
123 if config_name:
124 app.config.from_object(config[config_name])
125
126 # Set up flask-versioned
127 versioned = Versioned(app, format='%(path)s?v=%(version)s')
128
129 try:
130 # Try to load the given config_file.
131 # If it does not exist, just ignore it.
132 app.config.from_pyfile(config_file, silent=True)
133 except IOError:
134 sys.stderr.write("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
135 sys.stderr.write(" WARNING: privacyidea create_app has no access\n")
136 sys.stderr.write(" to {0!s}!\n".format(config_file))
137 sys.stderr.write("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
138
139 # Try to load the file, that was specified in the environment variable
140 # PRIVACYIDEA_CONFIG_FILE
141 # If this file does not exist, we create an error!
142 app.config.from_envvar(ENV_KEY, silent=True)
143
144 # We allow to set different static folders
145 app.static_folder = app.config.get("PI_STATIC_FOLDER", "static/")
146 app.template_folder = app.config.get("PI_TEMPLATE_FOLDER", "static/templates/")
147
148 app.register_blueprint(validate_blueprint, url_prefix='/validate')
149 app.register_blueprint(token_blueprint, url_prefix='/token')
150 app.register_blueprint(system_blueprint, url_prefix='/system')
151 app.register_blueprint(resolver_blueprint, url_prefix='/resolver')
152 app.register_blueprint(realm_blueprint, url_prefix='/realm')
153 app.register_blueprint(defaultrealm_blueprint, url_prefix='/defaultrealm')
154 app.register_blueprint(policy_blueprint, url_prefix='/policy')
155 app.register_blueprint(login_blueprint, url_prefix='/')
156 app.register_blueprint(jwtauth, url_prefix='/auth')
157 app.register_blueprint(user_blueprint, url_prefix='/user')
158 app.register_blueprint(audit_blueprint, url_prefix='/audit')
159 app.register_blueprint(machineresolver_blueprint,
160 url_prefix='/machineresolver')
161 app.register_blueprint(machine_blueprint, url_prefix='/machine')
162 app.register_blueprint(application_blueprint, url_prefix='/application')
163 app.register_blueprint(caconnector_blueprint, url_prefix='/caconnector')
164 app.register_blueprint(cert_blueprint, url_prefix='/certificate')
165 app.register_blueprint(ttype_blueprint, url_prefix='/ttype')
166 app.register_blueprint(register_blueprint, url_prefix='/register')
167 app.register_blueprint(smtpserver_blueprint, url_prefix='/smtpserver')
168 app.register_blueprint(recover_blueprint, url_prefix='/recover')
169 app.register_blueprint(radiusserver_blueprint, url_prefix='/radiusserver')
170 app.register_blueprint(periodictask_blueprint, url_prefix='/periodictask')
171 app.register_blueprint(privacyideaserver_blueprint,
172 url_prefix='/privacyideaserver')
173 app.register_blueprint(eventhandling_blueprint, url_prefix='/event')
174 app.register_blueprint(smsgateway_blueprint, url_prefix='/smsgateway')
175 app.register_blueprint(client_blueprint, url_prefix='/client')
176 app.register_blueprint(subscriptions_blueprint, url_prefix='/subscriptions')
177 app.register_blueprint(monitoring_blueprint, url_prefix='/monitoring')
178 app.register_blueprint(tokengroup_blueprint, url_prefix='/tokengroup')
179 db.init_app(app)
180 migrate = Migrate(app, db)
181
182 app.response_class = PiResponseClass
183
184 # Setup logging
185 log_read_func = {
186 'yaml': lambda x: logging.config.dictConfig(yaml.safe_load(open(x, 'r').read())),
187 'cfg': lambda x: logging.config.fileConfig(x)
188 }
189 have_config = False
190 log_exx = None
191 log_config_file = app.config.get("PI_LOGCONFIG", "/etc/privacyidea/logging.cfg")
192 if os.path.isfile(log_config_file):
193 for cnf_type in ['cfg', 'yaml']:
194 try:
195 log_read_func[cnf_type](log_config_file)
196 if not silent:
197 print('Read Logging settings from {0!s}'.format(log_config_file))
198 have_config = True
199 break
200 except Exception as exx:
201 log_exx = exx
202 pass
203 if not have_config:
204 if log_exx:
205 sys.stderr.write("Could not use PI_LOGCONFIG: " + str(log_exx) + "\n")
206 if not silent:
207 sys.stderr.write("Using PI_LOGLEVEL and PI_LOGFILE.\n")
208 level = app.config.get("PI_LOGLEVEL", logging.INFO)
209 # If there is another logfile in pi.cfg we use this.
210 logfile = app.config.get("PI_LOGFILE", '/var/log/privacyidea/privacyidea.log')
211 if not silent:
212 sys.stderr.write("Using PI_LOGLEVEL {0!s}.\n".format(level))
213 sys.stderr.write("Using PI_LOGFILE {0!s}.\n".format(logfile))
214 DEFAULT_LOGGING_CONFIG["handlers"]["file"]["filename"] = logfile
215 DEFAULT_LOGGING_CONFIG["handlers"]["file"]["level"] = level
216 DEFAULT_LOGGING_CONFIG["loggers"]["privacyidea"]["level"] = level
217 logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
218
219 babel = Babel(app)
220
221 @babel.localeselector
222 def get_locale():
223 return get_accepted_language(request)
224
225 queue.register_app(app)
226
227 if init_hsm:
228 with app.app_context():
229 init_hsm()
230
231 logging.debug(u"Reading application from the static folder {0!s} and "
232 u"the template folder {1!s}".format(app.static_folder, app.template_folder))
233
234 return app
235
[end of privacyidea/app.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/privacyidea/app.py b/privacyidea/app.py
--- a/privacyidea/app.py
+++ b/privacyidea/app.py
@@ -228,7 +228,8 @@
with app.app_context():
init_hsm()
- logging.debug(u"Reading application from the static folder {0!s} and "
- u"the template folder {1!s}".format(app.static_folder, app.template_folder))
+ logging.getLogger(__name__).debug(u"Reading application from the static "
+ u"folder {0!s} and the template folder "
+ u"{1!s}".format(app.static_folder, app.template_folder))
return app
|
{"golden_diff": "diff --git a/privacyidea/app.py b/privacyidea/app.py\n--- a/privacyidea/app.py\n+++ b/privacyidea/app.py\n@@ -228,7 +228,8 @@\n with app.app_context():\n init_hsm()\n \n- logging.debug(u\"Reading application from the static folder {0!s} and \"\n- u\"the template folder {1!s}\".format(app.static_folder, app.template_folder))\n+ logging.getLogger(__name__).debug(u\"Reading application from the static \"\n+ u\"folder {0!s} and the template folder \"\n+ u\"{1!s}\".format(app.static_folder, app.template_folder))\n \n return app\n", "issue": "logging documentation\nCheck the log configuration.\r\nThe yaml config seems to be wrong. \r\nIs the indentation of `root:` correct?\r\n\r\nhttps://privacyidea.readthedocs.io/en/master/installation/system/logging.html#debug-log\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2014-11-15 Cornelius K\u00f6lbel, [email protected]\n# Initial creation\n#\n# (c) Cornelius K\u00f6lbel\n# Info: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nimport os\nimport os.path\nimport logging\nimport logging.config\nimport sys\nimport yaml\nfrom flask import Flask, request, Response\nfrom flask_babel import Babel\nfrom flask_migrate import Migrate\nfrom flaskext.versioned import Versioned\n\n# we need this import to add the before/after request function to the blueprints\nimport privacyidea.api.before_after\nfrom privacyidea.api.validate import validate_blueprint\nfrom privacyidea.api.token import token_blueprint\nfrom privacyidea.api.system import system_blueprint\nfrom privacyidea.api.resolver import resolver_blueprint\nfrom privacyidea.api.realm import realm_blueprint\nfrom privacyidea.api.realm import defaultrealm_blueprint\nfrom privacyidea.api.policy import policy_blueprint\nfrom privacyidea.api.user import user_blueprint\nfrom privacyidea.api.audit import audit_blueprint\nfrom privacyidea.api.application import application_blueprint\nfrom privacyidea.api.caconnector import caconnector_blueprint\nfrom privacyidea.api.register import register_blueprint\nfrom privacyidea.api.auth import jwtauth\nfrom privacyidea.webui.login import login_blueprint, get_accepted_language\nfrom privacyidea.webui.certificate import cert_blueprint\nfrom privacyidea.api.machineresolver import machineresolver_blueprint\nfrom privacyidea.api.machine import machine_blueprint\nfrom privacyidea.api.ttype import ttype_blueprint\nfrom privacyidea.api.smtpserver import smtpserver_blueprint\nfrom privacyidea.api.radiusserver import radiusserver_blueprint\nfrom privacyidea.api.periodictask import periodictask_blueprint\nfrom privacyidea.api.privacyideaserver import privacyideaserver_blueprint\nfrom privacyidea.api.recover import recover_blueprint\nfrom privacyidea.api.event import eventhandling_blueprint\nfrom privacyidea.api.smsgateway import smsgateway_blueprint\nfrom privacyidea.api.clienttype import client_blueprint\nfrom privacyidea.api.subscriptions import subscriptions_blueprint\nfrom privacyidea.api.monitoring import monitoring_blueprint\nfrom privacyidea.api.tokengroup import tokengroup_blueprint\nfrom privacyidea.lib import queue\nfrom privacyidea.lib.log import DEFAULT_LOGGING_CONFIG\nfrom privacyidea.config import config\nfrom privacyidea.models import db\nfrom privacyidea.lib.crypto import init_hsm\n\n\nENV_KEY = \"PRIVACYIDEA_CONFIGFILE\"\n\n\nclass PiResponseClass(Response):\n \"\"\"Custom Response class overwriting the flask.Response.\n To avoid caching problems with the json property in the Response class,\n the property is overwritten using a non-caching approach.\n \"\"\"\n @property\n def json(self):\n \"\"\"This will contain the parsed JSON data if the mimetype indicates\n JSON (:mimetype:`application/json`, see :meth:`is_json`), otherwise it\n will be ``None``.\n Caching of the json data is disabled.\n \"\"\"\n return self.get_json(cache=False)\n\n default_mimetype = 'application/json'\n\n\ndef create_app(config_name=\"development\",\n config_file='/etc/privacyidea/pi.cfg',\n silent=False, init_hsm=False):\n \"\"\"\n First the configuration from the config.py is loaded depending on the\n config type like \"production\" or \"development\" or \"testing\".\n\n Then the environment variable PRIVACYIDEA_CONFIGFILE is checked for a\n config file, that contains additional settings, that will overwrite the\n default settings from config.py\n\n :param config_name: The config name like \"production\" or \"testing\"\n :type config_name: basestring\n :param config_file: The name of a config file to read configuration from\n :type config_file: basestring\n :param silent: If set to True the additional information are not printed\n to stdout\n :type silent: bool\n :param init_hsm: Whether the HSM should be initialized on app startup\n :type init_hsm: bool\n :return: The flask application\n :rtype: App object\n \"\"\"\n if not silent:\n print(\"The configuration name is: {0!s}\".format(config_name))\n if os.environ.get(ENV_KEY):\n config_file = os.environ[ENV_KEY]\n if not silent:\n print(\"Additional configuration will be read \"\n \"from the file {0!s}\".format(config_file))\n app = Flask(__name__, static_folder=\"static\",\n template_folder=\"static/templates\")\n if config_name:\n app.config.from_object(config[config_name])\n\n # Set up flask-versioned\n versioned = Versioned(app, format='%(path)s?v=%(version)s')\n\n try:\n # Try to load the given config_file.\n # If it does not exist, just ignore it.\n app.config.from_pyfile(config_file, silent=True)\n except IOError:\n sys.stderr.write(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\")\n sys.stderr.write(\" WARNING: privacyidea create_app has no access\\n\")\n sys.stderr.write(\" to {0!s}!\\n\".format(config_file))\n sys.stderr.write(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\")\n\n # Try to load the file, that was specified in the environment variable\n # PRIVACYIDEA_CONFIG_FILE\n # If this file does not exist, we create an error!\n app.config.from_envvar(ENV_KEY, silent=True)\n\n # We allow to set different static folders\n app.static_folder = app.config.get(\"PI_STATIC_FOLDER\", \"static/\")\n app.template_folder = app.config.get(\"PI_TEMPLATE_FOLDER\", \"static/templates/\")\n\n app.register_blueprint(validate_blueprint, url_prefix='/validate')\n app.register_blueprint(token_blueprint, url_prefix='/token')\n app.register_blueprint(system_blueprint, url_prefix='/system')\n app.register_blueprint(resolver_blueprint, url_prefix='/resolver')\n app.register_blueprint(realm_blueprint, url_prefix='/realm')\n app.register_blueprint(defaultrealm_blueprint, url_prefix='/defaultrealm')\n app.register_blueprint(policy_blueprint, url_prefix='/policy')\n app.register_blueprint(login_blueprint, url_prefix='/')\n app.register_blueprint(jwtauth, url_prefix='/auth')\n app.register_blueprint(user_blueprint, url_prefix='/user')\n app.register_blueprint(audit_blueprint, url_prefix='/audit')\n app.register_blueprint(machineresolver_blueprint,\n url_prefix='/machineresolver')\n app.register_blueprint(machine_blueprint, url_prefix='/machine')\n app.register_blueprint(application_blueprint, url_prefix='/application')\n app.register_blueprint(caconnector_blueprint, url_prefix='/caconnector')\n app.register_blueprint(cert_blueprint, url_prefix='/certificate')\n app.register_blueprint(ttype_blueprint, url_prefix='/ttype')\n app.register_blueprint(register_blueprint, url_prefix='/register')\n app.register_blueprint(smtpserver_blueprint, url_prefix='/smtpserver')\n app.register_blueprint(recover_blueprint, url_prefix='/recover')\n app.register_blueprint(radiusserver_blueprint, url_prefix='/radiusserver')\n app.register_blueprint(periodictask_blueprint, url_prefix='/periodictask')\n app.register_blueprint(privacyideaserver_blueprint,\n url_prefix='/privacyideaserver')\n app.register_blueprint(eventhandling_blueprint, url_prefix='/event')\n app.register_blueprint(smsgateway_blueprint, url_prefix='/smsgateway')\n app.register_blueprint(client_blueprint, url_prefix='/client')\n app.register_blueprint(subscriptions_blueprint, url_prefix='/subscriptions')\n app.register_blueprint(monitoring_blueprint, url_prefix='/monitoring')\n app.register_blueprint(tokengroup_blueprint, url_prefix='/tokengroup')\n db.init_app(app)\n migrate = Migrate(app, db)\n\n app.response_class = PiResponseClass\n\n # Setup logging\n log_read_func = {\n 'yaml': lambda x: logging.config.dictConfig(yaml.safe_load(open(x, 'r').read())),\n 'cfg': lambda x: logging.config.fileConfig(x)\n }\n have_config = False\n log_exx = None\n log_config_file = app.config.get(\"PI_LOGCONFIG\", \"/etc/privacyidea/logging.cfg\")\n if os.path.isfile(log_config_file):\n for cnf_type in ['cfg', 'yaml']:\n try:\n log_read_func[cnf_type](log_config_file)\n if not silent:\n print('Read Logging settings from {0!s}'.format(log_config_file))\n have_config = True\n break\n except Exception as exx:\n log_exx = exx\n pass\n if not have_config:\n if log_exx:\n sys.stderr.write(\"Could not use PI_LOGCONFIG: \" + str(log_exx) + \"\\n\")\n if not silent:\n sys.stderr.write(\"Using PI_LOGLEVEL and PI_LOGFILE.\\n\")\n level = app.config.get(\"PI_LOGLEVEL\", logging.INFO)\n # If there is another logfile in pi.cfg we use this.\n logfile = app.config.get(\"PI_LOGFILE\", '/var/log/privacyidea/privacyidea.log')\n if not silent:\n sys.stderr.write(\"Using PI_LOGLEVEL {0!s}.\\n\".format(level))\n sys.stderr.write(\"Using PI_LOGFILE {0!s}.\\n\".format(logfile))\n DEFAULT_LOGGING_CONFIG[\"handlers\"][\"file\"][\"filename\"] = logfile\n DEFAULT_LOGGING_CONFIG[\"handlers\"][\"file\"][\"level\"] = level\n DEFAULT_LOGGING_CONFIG[\"loggers\"][\"privacyidea\"][\"level\"] = level\n logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)\n\n babel = Babel(app)\n\n @babel.localeselector\n def get_locale():\n return get_accepted_language(request)\n\n queue.register_app(app)\n\n if init_hsm:\n with app.app_context():\n init_hsm()\n\n logging.debug(u\"Reading application from the static folder {0!s} and \"\n u\"the template folder {1!s}\".format(app.static_folder, app.template_folder))\n\n return app\n", "path": "privacyidea/app.py"}]}
| 3,404 | 151 |
gh_patches_debug_18398
|
rasdani/github-patches
|
git_diff
|
ultrabug__py3status-1544
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mail: namecollision in dict with mailbox named 'mail'
If you have a mailbox with
```
'name' = 'mail'
```
the unread mails in this mailbox are counted twice and will reset the count for the mailboxes that have come before it. This is caused by line [208](https://github.com/ultrabug/py3status/blob/109e60a360db6deebe719a67afa07ade4e03640a/py3status/modules/mail.py#L208) as `mail_data['mail']` is set to count_mail and at Line [213](https://github.com/ultrabug/py3status/blob/109e60a360db6deebe719a67afa07ade4e03640a/py3status/modules/mail.py#L213)
it is added again.
</issue>
<code>
[start of py3status/modules/mail.py]
1 # -*- coding: utf-8 -*-
2 """
3 Display number of messages in various mailbox formats.
4 This module supports Maildir, mbox, MH, Babyl, MMDF, and IMAP.
5
6 Configuration parameters:
7 accounts: specify a dict consisting of mailbox types and a list of dicts
8 consisting of mailbox settings and/or paths to use (default {})
9 cache_timeout: refresh interval for this module (default 60)
10 format: display format for this module
11 (default '\?not_zero Mail {mail}|No Mail')
12 thresholds: specify color thresholds to use (default [])
13
14 Format placeholders:
15 {mail} number of messages
16 {maildir} number of Maildir messages
17 {mbox} number of mbox messages
18 {mh} number of MH messages
19 {babyl} number of Babyl messages
20 {mmdf} number of MMDF messages
21 {imap} number of IMAP messages
22
23 We can divide mailbox, eg `{maildir}`, into numbered placeholders based
24 on number of mailbox accounts, eg `{maildir_1}`, and if we add `name` to
25 a mailbox account, we can use `{name}` placeholder instead, eg `{home}`.
26
27 Color thresholds:
28 xxx: print a color based on the value of `xxx` placeholder
29
30 Examples:
31 ```
32 # add multiple accounts
33 mail { #
34 accounts = { # {mail}
35 'maildir': [ # ├── {maildir}
36 {'path': '~/.mutt'}, # │ ├── {maildir_1}
37 {'path': '~/Mail'}, # │ └── {maildir_2}
38 ], # │
39 'mbox': [ # ├── {mbox}
40 {'path': '~/home.mbox'}, # │ ├── {mbox_1}
41 { # │ ├── {mbox_2}
42 'name': 'local', # <----│----│----└── {local}
43 'path': '~/mbox' # │ │
44 }, # │ │
45 { # │ └── {mbox_3}
46 'name': 'debian', # <----│---------└── {debian}
47 'path': '/var/mail/$USER' # │
48 'urgent': False, # <----│---- disable urgent
49 }, # │
50 ], # │
51 'mh': [ # ├── {mh}
52 {'path': '~/mh_mail'}, # │ └── {mh_1}
53 ], # │
54 'babyl': [ # ├── {babyl}
55 {'path': '~/babyl_mail'}, # │ └── {babyl_1}
56 ], # │
57 'mmdf': [ # ├── {mmdf}
58 {'path': '~/mmdf_mail'}, # │ └── {mmdf_1}
59 ] # │
60 'imap': [ # ├── {imap}
61 { # │ ├── {imap_1}
62 'name': 'home', # <----│----│----└── {home}
63 'user': 'lasers', # │ │
64 'password': 'kiss_my_butt!', # │ │
65 'server': 'imap.gmail.com', # │ │
66 'port': 993, # │ │
67 }, # │ │
68 { # │ └── {imap_2}
69 'name': 'work', # <----│---------└── {work}
70 'user': 'tobes', # │
71 'password': 'i_love_python', #
72 'server': 'imap.yahoo.com', #
73 # <---- no port, use port 993
74 'urgent': False, # <---- disable urgent
75 } #
76 ]
77 }
78 allow_urgent = False <---- disable urgent for all accounts
79 }
80
81 # add colors, disable urgent
82 mail {
83 format = '[\?color=mail&show Mail] {mail}'
84 thresholds = [(1, 'good'), (5, 'degraded'), (15, 'bad')]
85 allow_urgent = False
86 }
87
88 # identify the mailboxes, remove what you don't need
89 mail {
90 format = '[\?color=mail '
91 format += '[\?if=imap&color=#00ff00 IMAP ]'
92 format += '[\?if=maildir&color=#ffff00 MAILDIR ]'
93 format += '[\?if=mbox&color=#ff0000 MBOX ]'
94 format += '[\?if=babyl&color=#ffa500 BABYL ]'
95 format += '[\?if=mmdf&color=#00bfff MMDF ]'
96 format += '[\?if=mh&color=#ee82ee MH ]'
97 format += ']'
98 format += '[\?not_zero&color Mail {mail}|No Mail]'
99 }
100
101 # individual colorized mailboxes, remove what you don't need
102 mail {
103 format = '[\?if=imap&color=#00ff00 IMAP] {imap} '
104 format += '[\?if=maildir&color=#ffff00 MAILDIR] {maildir} '
105 format += '[\?if=mbox&color=#ff0000 MBOX] {mbox} '
106 format += '[\?if=babyl&color=#ffa500 BABYL] {babyl} '
107 format += '[\?if=mmdf&color=#00bfff MMDF] {mmdf} '
108 format += '[\?if=mh&color=#ee82ee MH] {mh}'
109 allow_urgent = False
110 }
111 ```
112
113 @author lasers
114
115 SAMPLE OUTPUT
116 {'full_text': 'Mail 15', 'urgent': True}
117
118 identified
119 [
120 {'full_text': 'IMAP ', 'color': '#00ff00'},
121 {'full_text': 'MAILDIR ', 'color': '#ffff00'},
122 {'full_text': 'MBOX ', 'color': '#ff0000'},
123 {'full_text': 'Mail 15'},
124 ]
125
126 individualized
127 [
128 {'full_text': 'IMAP ', 'color': '#00ff00'}, {'full_text': 'Mail 10 '},
129 {'full_text': 'MAILDIR ', 'color': '#ffff00'}, {'full_text': 'Mail 2 '},
130 {'full_text': 'MBOX ', 'color': '#ff0000'}, {'full_text': 'Mail 3'},
131 ]
132
133 no_mail
134 {'full_text': 'No Mail'}
135 """
136
137 import mailbox
138 from imaplib import IMAP4_SSL
139 from os.path import exists, expanduser, expandvars
140 STRING_MISSING = 'missing {} {}'
141
142
143 class Py3status:
144 """
145 """
146 # available configuration parameters
147 accounts = {}
148 cache_timeout = 60
149 format = '\?not_zero Mail {mail}|No Mail'
150 thresholds = []
151
152 def post_config_hook(self):
153 if not self.accounts:
154 raise Exception('missing accounts')
155
156 self.mailboxes = {}
157 mailboxes = ['Maildir', 'mbox', 'mh', 'Babyl', 'MMDF', 'IMAP']
158 for mail, accounts in self.accounts.items():
159 if mail not in [x.lower() for x in mailboxes]:
160 continue
161 self.mailboxes[mail] = []
162 for account in accounts:
163 account.setdefault('urgent', True)
164 if mail == 'imap':
165 for v in ['user', 'password', 'server']:
166 if v not in account:
167 raise Exception(STRING_MISSING.format(mail, v))
168 account.setdefault('port', 993)
169 self.mailboxes[mail].append(account)
170 else:
171 for box in mailboxes[:-1]:
172 if mail == box.lower():
173 if 'path' not in account:
174 raise Exception(
175 STRING_MISSING.format(mail, 'path')
176 )
177 path = expandvars(expanduser(account['path']))
178 if not exists(path):
179 path = 'path: {}'.format(path)
180 raise Exception(
181 STRING_MISSING.format(mail, path)
182 )
183 account['box'] = box
184 account['path'] = path
185 self.mailboxes[mail].append(account)
186 break
187
188 self.thresholds_init = self.py3.get_color_names_list(self.format)
189
190 def mail(self):
191 mail_data = {'mail': 0, 'urgent': False}
192 for k, v in self.mailboxes.items():
193 mail_data[k] = 0
194 for i, account in enumerate(v, 1):
195 if k == 'imap':
196 inbox = IMAP4_SSL(account['server'], account['port'])
197 inbox.login(account['user'], account['password'])
198 inbox.select(readonly=True)
199 imap_data = inbox.search(None, '(UNSEEN)')
200 count_mail = len(imap_data[1][0].split())
201 inbox.close()
202 inbox.logout()
203 else:
204 inbox = getattr(mailbox, account['box'])(
205 account['path'], create=False)
206 count_mail = len(inbox)
207 inbox.close()
208 if 'name' in account:
209 mail_data[account['name']] = count_mail
210 if account['urgent'] and count_mail:
211 mail_data['urgent'] = True
212 mail_data['%s_%s' % (k, i)] = count_mail
213 mail_data['mail'] += count_mail
214 mail_data[k] += count_mail
215
216 for x in self.thresholds_init:
217 if x in mail_data:
218 self.py3.threshold_get_color(mail_data[x], x)
219
220 response = {
221 'cached_until': self.py3.time_in(self.cache_timeout),
222 'full_text': self.py3.safe_format(self.format, mail_data)
223 }
224 if mail_data['urgent']:
225 response['urgent'] = True
226 return response
227
228
229 if __name__ == "__main__":
230 """
231 Run module in test mode.
232 """
233 from py3status.module_test import module_test
234 module_test(Py3status)
235
[end of py3status/modules/mail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py3status/modules/mail.py b/py3status/modules/mail.py
--- a/py3status/modules/mail.py
+++ b/py3status/modules/mail.py
@@ -138,6 +138,8 @@
from imaplib import IMAP4_SSL
from os.path import exists, expanduser, expandvars
STRING_MISSING = 'missing {} {}'
+STRING_WRONG_NAME = 'Accountname ({}) collides with "Maildir", "mbox", "mh", \
+"Babyl", "MMDF", "mail" or "IMAP".'
class Py3status:
@@ -160,6 +162,9 @@
continue
self.mailboxes[mail] = []
for account in accounts:
+ if account['name'] in [x.lower() for x in mailboxes] \
+ + ['mail']:
+ raise Exception(STRING_WRONG_NAME.format(account['name']))
account.setdefault('urgent', True)
if mail == 'imap':
for v in ['user', 'password', 'server']:
|
{"golden_diff": "diff --git a/py3status/modules/mail.py b/py3status/modules/mail.py\n--- a/py3status/modules/mail.py\n+++ b/py3status/modules/mail.py\n@@ -138,6 +138,8 @@\n from imaplib import IMAP4_SSL\n from os.path import exists, expanduser, expandvars\n STRING_MISSING = 'missing {} {}'\n+STRING_WRONG_NAME = 'Accountname ({}) collides with \"Maildir\", \"mbox\", \"mh\", \\\n+\"Babyl\", \"MMDF\", \"mail\" or \"IMAP\".'\n \n \n class Py3status:\n@@ -160,6 +162,9 @@\n continue\n self.mailboxes[mail] = []\n for account in accounts:\n+ if account['name'] in [x.lower() for x in mailboxes] \\\n+ + ['mail']:\n+ raise Exception(STRING_WRONG_NAME.format(account['name']))\n account.setdefault('urgent', True)\n if mail == 'imap':\n for v in ['user', 'password', 'server']:\n", "issue": "mail: namecollision in dict with mailbox named 'mail'\nIf you have a mailbox with \r\n```\r\n'name' = 'mail'\r\n```\r\nthe unread mails in this mailbox are counted twice and will reset the count for the mailboxes that have come before it. This is caused by line [208](https://github.com/ultrabug/py3status/blob/109e60a360db6deebe719a67afa07ade4e03640a/py3status/modules/mail.py#L208) as `mail_data['mail']` is set to count_mail and at Line [213](https://github.com/ultrabug/py3status/blob/109e60a360db6deebe719a67afa07ade4e03640a/py3status/modules/mail.py#L213)\r\n it is added again.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay number of messages in various mailbox formats.\nThis module supports Maildir, mbox, MH, Babyl, MMDF, and IMAP.\n\nConfiguration parameters:\n accounts: specify a dict consisting of mailbox types and a list of dicts\n consisting of mailbox settings and/or paths to use (default {})\n cache_timeout: refresh interval for this module (default 60)\n format: display format for this module\n (default '\\?not_zero Mail {mail}|No Mail')\n thresholds: specify color thresholds to use (default [])\n\nFormat placeholders:\n {mail} number of messages\n {maildir} number of Maildir messages\n {mbox} number of mbox messages\n {mh} number of MH messages\n {babyl} number of Babyl messages\n {mmdf} number of MMDF messages\n {imap} number of IMAP messages\n\n We can divide mailbox, eg `{maildir}`, into numbered placeholders based\n on number of mailbox accounts, eg `{maildir_1}`, and if we add `name` to\n a mailbox account, we can use `{name}` placeholder instead, eg `{home}`.\n\nColor thresholds:\n xxx: print a color based on the value of `xxx` placeholder\n\nExamples:\n```\n# add multiple accounts\nmail { #\n accounts = { # {mail}\n 'maildir': [ # \u251c\u2500\u2500 {maildir}\n {'path': '~/.mutt'}, # \u2502 \u251c\u2500\u2500 {maildir_1}\n {'path': '~/Mail'}, # \u2502 \u2514\u2500\u2500 {maildir_2}\n ], # \u2502\n 'mbox': [ # \u251c\u2500\u2500 {mbox}\n {'path': '~/home.mbox'}, # \u2502 \u251c\u2500\u2500 {mbox_1}\n { # \u2502 \u251c\u2500\u2500 {mbox_2}\n 'name': 'local', # <----\u2502----\u2502----\u2514\u2500\u2500 {local}\n 'path': '~/mbox' # \u2502 \u2502\n }, # \u2502 \u2502\n { # \u2502 \u2514\u2500\u2500 {mbox_3}\n 'name': 'debian', # <----\u2502---------\u2514\u2500\u2500 {debian}\n 'path': '/var/mail/$USER' # \u2502\n 'urgent': False, # <----\u2502---- disable urgent\n }, # \u2502\n ], # \u2502\n 'mh': [ # \u251c\u2500\u2500 {mh}\n {'path': '~/mh_mail'}, # \u2502 \u2514\u2500\u2500 {mh_1}\n ], # \u2502\n 'babyl': [ # \u251c\u2500\u2500 {babyl}\n {'path': '~/babyl_mail'}, # \u2502 \u2514\u2500\u2500 {babyl_1}\n ], # \u2502\n 'mmdf': [ # \u251c\u2500\u2500 {mmdf}\n {'path': '~/mmdf_mail'}, # \u2502 \u2514\u2500\u2500 {mmdf_1}\n ] # \u2502\n 'imap': [ # \u251c\u2500\u2500 {imap}\n { # \u2502 \u251c\u2500\u2500 {imap_1}\n 'name': 'home', # <----\u2502----\u2502----\u2514\u2500\u2500 {home}\n 'user': 'lasers', # \u2502 \u2502\n 'password': 'kiss_my_butt!', # \u2502 \u2502\n 'server': 'imap.gmail.com', # \u2502 \u2502\u00a0\n 'port': 993, # \u2502 \u2502\n }, # \u2502 \u2502\n { # \u2502 \u2514\u2500\u2500 {imap_2}\n 'name': 'work', # <----\u2502---------\u2514\u2500\u2500 {work}\n 'user': 'tobes', # \u2502\n 'password': 'i_love_python', #\n 'server': 'imap.yahoo.com', #\n # <---- no port, use port 993\n 'urgent': False, # <---- disable urgent\n } #\n ]\n }\n allow_urgent = False <---- disable urgent for all accounts\n}\n\n# add colors, disable urgent\nmail {\n format = '[\\?color=mail&show Mail] {mail}'\n thresholds = [(1, 'good'), (5, 'degraded'), (15, 'bad')]\n allow_urgent = False\n}\n\n# identify the mailboxes, remove what you don't need\nmail {\n format = '[\\?color=mail '\n format += '[\\?if=imap&color=#00ff00 IMAP ]'\n format += '[\\?if=maildir&color=#ffff00 MAILDIR ]'\n format += '[\\?if=mbox&color=#ff0000 MBOX ]'\n format += '[\\?if=babyl&color=#ffa500 BABYL ]'\n format += '[\\?if=mmdf&color=#00bfff MMDF ]'\n format += '[\\?if=mh&color=#ee82ee MH ]'\n format += ']'\n format += '[\\?not_zero&color Mail {mail}|No Mail]'\n}\n\n# individual colorized mailboxes, remove what you don't need\nmail {\n format = '[\\?if=imap&color=#00ff00 IMAP] {imap} '\n format += '[\\?if=maildir&color=#ffff00 MAILDIR] {maildir} '\n format += '[\\?if=mbox&color=#ff0000 MBOX] {mbox} '\n format += '[\\?if=babyl&color=#ffa500 BABYL] {babyl} '\n format += '[\\?if=mmdf&color=#00bfff MMDF] {mmdf} '\n format += '[\\?if=mh&color=#ee82ee MH] {mh}'\n allow_urgent = False\n}\n```\n\n@author lasers\n\nSAMPLE OUTPUT\n{'full_text': 'Mail 15', 'urgent': True}\n\nidentified\n[\n {'full_text': 'IMAP ', 'color': '#00ff00'},\n {'full_text': 'MAILDIR ', 'color': '#ffff00'},\n {'full_text': 'MBOX ', 'color': '#ff0000'},\n {'full_text': 'Mail 15'},\n]\n\nindividualized\n[\n {'full_text': 'IMAP ', 'color': '#00ff00'}, {'full_text': 'Mail 10 '},\n {'full_text': 'MAILDIR ', 'color': '#ffff00'}, {'full_text': 'Mail 2 '},\n {'full_text': 'MBOX ', 'color': '#ff0000'}, {'full_text': 'Mail 3'},\n]\n\nno_mail\n{'full_text': 'No Mail'}\n\"\"\"\n\nimport mailbox\nfrom imaplib import IMAP4_SSL\nfrom os.path import exists, expanduser, expandvars\nSTRING_MISSING = 'missing {} {}'\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n accounts = {}\n cache_timeout = 60\n format = '\\?not_zero Mail {mail}|No Mail'\n thresholds = []\n\n def post_config_hook(self):\n if not self.accounts:\n raise Exception('missing accounts')\n\n self.mailboxes = {}\n mailboxes = ['Maildir', 'mbox', 'mh', 'Babyl', 'MMDF', 'IMAP']\n for mail, accounts in self.accounts.items():\n if mail not in [x.lower() for x in mailboxes]:\n continue\n self.mailboxes[mail] = []\n for account in accounts:\n account.setdefault('urgent', True)\n if mail == 'imap':\n for v in ['user', 'password', 'server']:\n if v not in account:\n raise Exception(STRING_MISSING.format(mail, v))\n account.setdefault('port', 993)\n self.mailboxes[mail].append(account)\n else:\n for box in mailboxes[:-1]:\n if mail == box.lower():\n if 'path' not in account:\n raise Exception(\n STRING_MISSING.format(mail, 'path')\n )\n path = expandvars(expanduser(account['path']))\n if not exists(path):\n path = 'path: {}'.format(path)\n raise Exception(\n STRING_MISSING.format(mail, path)\n )\n account['box'] = box\n account['path'] = path\n self.mailboxes[mail].append(account)\n break\n\n self.thresholds_init = self.py3.get_color_names_list(self.format)\n\n def mail(self):\n mail_data = {'mail': 0, 'urgent': False}\n for k, v in self.mailboxes.items():\n mail_data[k] = 0\n for i, account in enumerate(v, 1):\n if k == 'imap':\n inbox = IMAP4_SSL(account['server'], account['port'])\n inbox.login(account['user'], account['password'])\n inbox.select(readonly=True)\n imap_data = inbox.search(None, '(UNSEEN)')\n count_mail = len(imap_data[1][0].split())\n inbox.close()\n inbox.logout()\n else:\n inbox = getattr(mailbox, account['box'])(\n account['path'], create=False)\n count_mail = len(inbox)\n inbox.close()\n if 'name' in account:\n mail_data[account['name']] = count_mail\n if account['urgent'] and count_mail:\n mail_data['urgent'] = True\n mail_data['%s_%s' % (k, i)] = count_mail\n mail_data['mail'] += count_mail\n mail_data[k] += count_mail\n\n for x in self.thresholds_init:\n if x in mail_data:\n self.py3.threshold_get_color(mail_data[x], x)\n\n response = {\n 'cached_until': self.py3.time_in(self.cache_timeout),\n 'full_text': self.py3.safe_format(self.format, mail_data)\n }\n if mail_data['urgent']:\n response['urgent'] = True\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/mail.py"}]}
| 3,588 | 226 |
gh_patches_debug_6425
|
rasdani/github-patches
|
git_diff
|
helmholtz-analytics__heat-736
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Heat software development status "Beta"
**Related**
--
**Feature functionality**
The software development status in PyPI is listed as "3 - Alpha". We are currently considering Heat as Beta, so this should be reflected in the status, which I propose to set to "4 - Beta".
**Additional context**
--
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import codecs
3
4
5 with codecs.open("README.md", "r", "utf-8") as handle:
6 long_description = handle.read()
7
8 __version__ = None # appeases flake, assignment in exec() below
9 with open("./heat/core/version.py") as handle:
10 exec(handle.read())
11
12 setup(
13 name="heat",
14 packages=find_packages(exclude=("*tests*", "*benchmarks*")),
15 data_files=["README.md", "LICENSE"],
16 version=__version__,
17 description="A framework for high-performance data analytics and machine learning.",
18 long_description=long_description,
19 long_description_content_type="text/markdown",
20 author="Helmholtz Association",
21 author_email="[email protected]",
22 url="https://github.com/helmholtz-analytics/heat",
23 keywords=["data", "analytics", "tensors", "distributed", "gpu"],
24 python_requires="~=3.6",
25 classifiers=[
26 "Development Status :: 3 - Alpha",
27 "Programming Language :: Python :: 3.6",
28 "Programming Language :: Python :: 3.7",
29 "Programming Language :: Python :: 3.8",
30 "License :: OSI Approved :: MIT License",
31 "Intended Audience :: Science/Research",
32 "Topic :: Scientific/Engineering",
33 ],
34 install_requires=[
35 "mpi4py>=3.0.0",
36 "numpy>=1.13.0",
37 "torch>=1.7.0",
38 "scipy>=0.14.0",
39 "pillow>=6.0.0",
40 "torchvision>=0.5.0",
41 ],
42 extras_require={
43 "hdf5": ["h5py>=2.8.0"],
44 "netcdf": ["netCDF4>=1.4.0"],
45 "dev": ["pre-commit>=1.18.3"],
46 },
47 )
48
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
keywords=["data", "analytics", "tensors", "distributed", "gpu"],
python_requires="~=3.6",
classifiers=[
- "Development Status :: 3 - Alpha",
+ "Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\"~=3.6\",\n classifiers=[\n- \"Development Status :: 3 - Alpha\",\n+ \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "Heat software development status \"Beta\"\n**Related**\r\n--\r\n\r\n**Feature functionality**\r\nThe software development status in PyPI is listed as \"3 - Alpha\". We are currently considering Heat as Beta, so this should be reflected in the status, which I propose to set to \"4 - Beta\".\r\n\r\n**Additional context**\r\n--\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport codecs\n\n\nwith codecs.open(\"README.md\", \"r\", \"utf-8\") as handle:\n long_description = handle.read()\n\n__version__ = None # appeases flake, assignment in exec() below\nwith open(\"./heat/core/version.py\") as handle:\n exec(handle.read())\n\nsetup(\n name=\"heat\",\n packages=find_packages(exclude=(\"*tests*\", \"*benchmarks*\")),\n data_files=[\"README.md\", \"LICENSE\"],\n version=__version__,\n description=\"A framework for high-performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\n \"mpi4py>=3.0.0\",\n \"numpy>=1.13.0\",\n \"torch>=1.7.0\",\n \"scipy>=0.14.0\",\n \"pillow>=6.0.0\",\n \"torchvision>=0.5.0\",\n ],\n extras_require={\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.4.0\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n },\n)\n", "path": "setup.py"}]}
| 1,099 | 120 |
gh_patches_debug_17791
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-119
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Extend the whiteboard feature to consider all possible whiteboard words
https://github.com/mozilla/bugbug/blob/1784a10346d9dedcb2e18e076f3d482e39be93a1/bugbug/bug_features.py#L83
</issue>
<code>
[start of bugbug/bug_features.py]
1 # -*- coding: utf-8 -*-
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 # You can obtain one at http://mozilla.org/MPL/2.0/.
5
6 import re
7 from datetime import datetime
8 from datetime import timezone
9
10 import pandas as pd
11 from libmozdata import versions
12 from sklearn.base import BaseEstimator
13 from sklearn.base import TransformerMixin
14
15 from bugbug import bug_snapshot
16 from bugbug import repository
17
18
19 def field(bug, field):
20 if field in bug and bug[field] != '---':
21 return bug[field]
22
23 return None
24
25
26 class has_str(object):
27 def __call__(self, bug):
28 return field(bug, 'cf_has_str')
29
30
31 class has_regression_range(object):
32 def __call__(self, bug):
33 return field(bug, 'cf_has_regression_range')
34
35
36 class has_crash_signature(object):
37 def __call__(self, bug):
38 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''
39
40
41 class keywords(object):
42 def __init__(self, to_ignore=set()):
43 self.to_ignore = to_ignore
44
45 def __call__(self, bug):
46 keywords = []
47 subkeywords = []
48 for keyword in bug['keywords']:
49 if keyword in self.to_ignore:
50 continue
51
52 keywords.append(keyword)
53
54 if keyword.startswith('sec-'):
55 subkeywords.append('sec-')
56 elif keyword.startswith('csectype-'):
57 subkeywords.append('csectype-')
58 return keywords + subkeywords
59
60
61 class severity(object):
62 def __call__(self, bug):
63 return field(bug, 'severity')
64
65
66 class is_coverity_issue(object):
67 def __call__(self, bug):
68 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None
69
70
71 class has_url(object):
72 def __call__(self, bug):
73 return bug['url'] != ''
74
75
76 class has_w3c_url(object):
77 def __call__(self, bug):
78 return 'w3c' in bug['url']
79
80
81 class has_github_url(object):
82 def __call__(self, bug):
83 return 'github' in bug['url']
84
85
86 class whiteboard(object):
87 def __call__(self, bug):
88 ret = []
89
90 # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only
91
92 for elem in ['memshrink', '[ux]']:
93 if elem in bug['whiteboard'].lower():
94 ret.append(elem)
95
96 return ret
97
98
99 class patches(object):
100 def __call__(self, bug):
101 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])
102
103
104 class landings(object):
105 def __call__(self, bug):
106 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])
107
108
109 class title(object):
110 def __call__(self, bug):
111 ret = []
112
113 keywords = [
114 'fail',
115 ]
116 for keyword in keywords:
117 if keyword in bug['summary'].lower():
118 ret.append(keyword)
119
120 return ret
121
122
123 class product(object):
124 def __call__(self, bug):
125 return bug['product']
126
127
128 class component(object):
129 def __call__(self, bug):
130 return bug['component']
131
132
133 class is_mozillian(object):
134 def __call__(self, bug):
135 return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])
136
137
138 class delta_request_merge(object):
139 def __call__(self, bug):
140 for history in bug['history']:
141 for change in history['changes']:
142 if change['added'].startswith('approval-mozilla'):
143 uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)
144 timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime
145 return timedelta.days + timedelta.seconds / (24 * 60 * 60)
146
147 return None
148
149
150 class commit_added(object):
151 def __call__(self, bug):
152 return sum(commit['added'] for commit in bug['commits'])
153
154
155 class commit_deleted(object):
156 def __call__(self, bug):
157 return sum(commit['deleted'] for commit in bug['commits'])
158
159
160 class commit_types(object):
161 def __call__(self, bug):
162 return sum((commit['types'] for commit in bug['commits']), [])
163
164
165 def cleanup_url(text):
166 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text)
167 return re.sub(r'http\S+', '__URL__', text)
168
169
170 def cleanup_fileref(text):
171 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text)
172
173
174 def cleanup_responses(text):
175 return re.sub('>[^\n]+', ' ', text)
176
177
178 def cleanup_hex(text):
179 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text)
180
181
182 def cleanup_dll(text):
183 return re.sub(r'\w+(\.dll|\.so|\.dylib)\b', '__DLL_NAME__', text)
184
185
186 def cleanup_synonyms(text):
187 synonyms = [
188 ('safemode', ['safemode', 'safe mode']),
189 ('str', ['str', 'steps to reproduce', 'repro steps']),
190 ('uaf', ['uaf', 'use after free', 'use-after-free']),
191 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),
192 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),
193 ('spec', ['spec', 'specification']),
194 ]
195
196 for synonym_group, synonym_list in synonyms:
197 text = re.sub('|'.join(fr'\b{synonym}\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)
198
199 return text
200
201
202 def cleanup_crash(text):
203 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text)
204
205
206 class BugExtractor(BaseEstimator, TransformerMixin):
207 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):
208 self.feature_extractors = feature_extractors
209 self.cleanup_functions = cleanup_functions
210 self.rollback = rollback
211 self.rollback_when = rollback_when
212 self.commit_map = repository.get_commit_map() if commit_data else None
213
214 def fit(self, x, y=None):
215 return self
216
217 def transform(self, bugs):
218 results = []
219
220 for bug in bugs:
221 bug_id = bug['id']
222
223 if self.rollback:
224 bug = bug_snapshot.rollback(bug, self.rollback_when)
225
226 data = {}
227
228 if self.commit_map is not None:
229 if bug_id in self.commit_map:
230 bug['commits'] = self.commit_map[bug_id]
231 else:
232 bug['commits'] = []
233
234 for f in self.feature_extractors:
235 res = f(bug)
236
237 if res is None:
238 continue
239
240 if isinstance(res, list):
241 for item in res:
242 data[f.__class__.__name__ + '-' + item] = 'True'
243 continue
244
245 if isinstance(res, bool):
246 res = str(res)
247
248 data[f.__class__.__name__] = res
249
250 # TODO: Try simply using all possible fields instead of extracting features manually.
251
252 for cleanup_function in self.cleanup_functions:
253 bug['summary'] = cleanup_function(bug['summary'])
254 for c in bug['comments']:
255 c['text'] = cleanup_function(c['text'])
256
257 result = {
258 'data': data,
259 'title': bug['summary'],
260 'first_comment': bug['comments'][0]['text'],
261 'comments': ' '.join([c['text'] for c in bug['comments']]),
262 }
263
264 results.append(result)
265
266 return pd.DataFrame(results)
267
[end of bugbug/bug_features.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py
--- a/bugbug/bug_features.py
+++ b/bugbug/bug_features.py
@@ -85,15 +85,26 @@
class whiteboard(object):
def __call__(self, bug):
- ret = []
- # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only
+ # Split by '['
+ paren_splits = bug['whiteboard'].lower().split('[')
- for elem in ['memshrink', '[ux]']:
- if elem in bug['whiteboard'].lower():
- ret.append(elem)
+ # Split splits by space if they weren't in [ and ].
+ splits = []
+ for paren_split in paren_splits:
+ if ']' in paren_split:
+ paren_split = paren_split.split(']')
+ splits += paren_split
+ else:
+ splits += paren_split.split(' ')
- return ret
+ # Remove empty splits and strip
+ splits = [split.strip() for split in splits if split.strip() != '']
+
+ # For splits which contain ':', return both the whole string and the string before ':'.
+ splits += [split.split(':', 1)[0] for split in splits if ':' in split]
+
+ return splits
class patches(object):
|
{"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -85,15 +85,26 @@\n \n class whiteboard(object):\n def __call__(self, bug):\n- ret = []\n \n- # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n+ # Split by '['\n+ paren_splits = bug['whiteboard'].lower().split('[')\n \n- for elem in ['memshrink', '[ux]']:\n- if elem in bug['whiteboard'].lower():\n- ret.append(elem)\n+ # Split splits by space if they weren't in [ and ].\n+ splits = []\n+ for paren_split in paren_splits:\n+ if ']' in paren_split:\n+ paren_split = paren_split.split(']')\n+ splits += paren_split\n+ else:\n+ splits += paren_split.split(' ')\n \n- return ret\n+ # Remove empty splits and strip\n+ splits = [split.strip() for split in splits if split.strip() != '']\n+\n+ # For splits which contain ':', return both the whole string and the string before ':'.\n+ splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n+\n+ return splits\n \n \n class patches(object):\n", "issue": "Extend the whiteboard feature to consider all possible whiteboard words\nhttps://github.com/mozilla/bugbug/blob/1784a10346d9dedcb2e18e076f3d482e39be93a1/bugbug/bug_features.py#L83\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n ret = []\n\n # TODO: Add any [XXX:YYY] that appears in the whiteboard as [XXX: only\n\n for elem in ['memshrink', '[ux]']:\n if elem in bug['whiteboard'].lower():\n ret.append(elem)\n\n return ret\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]}
| 3,252 | 312 |
gh_patches_debug_467
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__codeforlife-portal-442
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New run on local fails because of latest pillow version
Needs to be set to 2.9
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3 import versioneer
4
5 setup(name='codeforlife-portal',
6 cmdclass=versioneer.get_cmdclass(),
7 version=versioneer.get_version(),
8 packages=find_packages(),
9 include_package_data=True,
10 install_requires=[
11 'django==1.8.2',
12 'django-appconf==1.0.1',
13 'django-countries==3.4.1',
14 'djangorestframework==3.1.3',
15 'django-jquery==1.9.1',
16 'django-autoconfig==0.3.6',
17 'django-pipeline==1.5.4',
18
19 'pyyaml==3.10',
20 'rapid-router >= 1.0.0.post.dev1',
21 'six==1.9.0',
22 'docutils==0.12',
23 'django-recaptcha-field==1.0b2',
24 'reportlab==3.2.0',
25 'postcodes==0.1',
26 'django-formtools==1.0',
27 'django-two-factor-auth==1.2.0',
28 'urllib3==1.10.4',
29 'requests==2.7.0',
30
31 'django-cms==3.1.2',
32
33 'django-classy-tags==0.6.1',
34 'django-treebeard==3.0',
35 'django-sekizai==0.8.2',
36 'djangocms-admin-style==0.2.8',
37
38 'djangocms-text-ckeditor==2.6.0',
39 'djangocms-link==1.6.2',
40 'djangocms-snippet==1.5',
41 'djangocms-style==1.5',
42 'djangocms-column==1.5',
43 'djangocms-grid==1.2',
44 'djangocms-oembed==0.5',
45 'djangocms-table==1.2',
46 'djangocms-file==0.1',
47 'djangocms_flash==0.2.0',
48 'djangocms_googlemap==0.3',
49 'djangocms_inherit==0.1',
50 'djangocms_picture==0.1',
51 'djangocms_teaser==0.1',
52 'djangocms_video==0.1',
53 'django-online-status==0.1.0',
54
55
56 'Pillow>=2.9.0',
57 'django-reversion==1.9.3',
58 'sqlparse',
59 'libsass',
60 ],
61 tests_require=[
62 'django-setuptest',
63 'django-selenium-clean==0.2.1',
64 'responses==0.4.0',
65 'selenium==2.48.0',
66 ],
67 test_suite='setuptest.setuptest.SetupTestSuite',
68 zip_safe=False,
69 )
70
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,7 @@
'django-online-status==0.1.0',
- 'Pillow>=2.9.0',
+ 'Pillow==2.9.0',
'django-reversion==1.9.3',
'sqlparse',
'libsass',
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,7 @@\n 'django-online-status==0.1.0',\n \n \n- 'Pillow>=2.9.0',\n+ 'Pillow==2.9.0',\n 'django-reversion==1.9.3',\n 'sqlparse',\n 'libsass',\n", "issue": "New run on local fails because of latest pillow version\nNeeds to be set to 2.9\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\nimport versioneer\n\nsetup(name='codeforlife-portal',\n cmdclass=versioneer.get_cmdclass(),\n version=versioneer.get_version(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django==1.8.2',\n 'django-appconf==1.0.1',\n 'django-countries==3.4.1',\n 'djangorestframework==3.1.3',\n 'django-jquery==1.9.1',\n 'django-autoconfig==0.3.6',\n 'django-pipeline==1.5.4',\n\n 'pyyaml==3.10',\n 'rapid-router >= 1.0.0.post.dev1',\n 'six==1.9.0',\n 'docutils==0.12',\n 'django-recaptcha-field==1.0b2',\n 'reportlab==3.2.0',\n 'postcodes==0.1',\n 'django-formtools==1.0',\n 'django-two-factor-auth==1.2.0',\n 'urllib3==1.10.4',\n 'requests==2.7.0',\n\n 'django-cms==3.1.2',\n\n 'django-classy-tags==0.6.1',\n 'django-treebeard==3.0',\n 'django-sekizai==0.8.2',\n 'djangocms-admin-style==0.2.8',\n\n 'djangocms-text-ckeditor==2.6.0',\n 'djangocms-link==1.6.2',\n 'djangocms-snippet==1.5',\n 'djangocms-style==1.5',\n 'djangocms-column==1.5',\n 'djangocms-grid==1.2',\n 'djangocms-oembed==0.5',\n 'djangocms-table==1.2',\n 'djangocms-file==0.1',\n 'djangocms_flash==0.2.0',\n 'djangocms_googlemap==0.3',\n 'djangocms_inherit==0.1',\n 'djangocms_picture==0.1',\n 'djangocms_teaser==0.1',\n 'djangocms_video==0.1',\n 'django-online-status==0.1.0',\n\n\n 'Pillow>=2.9.0',\n 'django-reversion==1.9.3',\n 'sqlparse',\n 'libsass',\n ],\n tests_require=[\n 'django-setuptest',\n 'django-selenium-clean==0.2.1',\n 'responses==0.4.0',\n 'selenium==2.48.0',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n zip_safe=False,\n )\n", "path": "setup.py"}]}
| 1,331 | 91 |
gh_patches_debug_18540
|
rasdani/github-patches
|
git_diff
|
Mailu__Mailu-2111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Discussion of 1.9 roadmap
As proposed in the last project meeting (#1582 (comment)), we are experimenting with a discussion driven model for determining the roadmap of the next release. This follows the existing practice of the succesful community run project Gitea (e.g go-gitea/gitea#14477).
During last meeting someone made the remark that with everything in the pipeline we already have sufficient work for a 1.9 release. I agree. We have this special situation due to the 1.8rc where we had a release candidate for over 7 months. This caused many PRs to be merged, but not included in 1.8. These are now waiting to be made available via a 1.9 release. This also caused a lot of already submitted PRs not be reviewed yet. We wanted to focus on getting 1.8 out of the door.
There are new PRs waiting for review for
- Updating the interface to AdminLTE3 #1800. This is not only a security update, but also adds a lot of functionality such as
- a language selector
- more modern look&feel of interface elements
- filtering and column ordering for tables.
- Multiple PRs for increasing the overall security of mailu #1922, #1916, #1902.
- For these PRs we (I?) will also introduce a brand new security documentation page where you can find all documentation on what security measures are in place, how it works and how it can be tweaked.
- updated polish translations #1751 and completely new Hebrew translation #1873.
We have already merged PRs on master for
- Completely new CLI configuration import/export which really exports/imports the complete mailu configuration database. With this it is possible to migrate the mailu config to another mailu instance. You can also use it to easily add new configuration (e.g. import users with specific settings) https://mailu.io/master/cli.html#config-export
- single sign on for webmail. Webmail uses the admin web interface for authenticating users
- various security related enhancements
- All other already merged PRs which were not included in 1.8. See all newsfragments for the details https://github.com/Mailu/Mailu/tree/master/towncrier/newsfragments
IMHO All those features together are sufficient to release a new version.
I suggest we review all open PRs and then check what issues really need to be included as well for 1.9 and include these on the roadmap. Examples are:
- I think a good example is the new SSO #1929. When you are redirected to the login page, it does not show that the login is for the webmail. You have the feeling that you are redirected to the wrong page. See the demo site for an example (https://test.mailu.io/webmail/). Of course this is not production worthy.
- Incorrect documentation
- E.g.the documentation on translations #1869. We have no weblate instance anymore for web-based translations. The documentation must be updated as it is incorrect now. You can only do translations manually with a tool such as poedit and send a PR for getting your new translation files merged .
- documentation on development environment for the admin interface #1577. The faulty documentation should be removed or directly updated with the correct steps.
For small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.
What are your thoughts? Please share your feedback.
Regardless the above wall of text, feel free to mention any feature/issue you would like included in 1.9.
</issue>
<code>
[start of docs/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4
5 import os
6
7 extensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']
8 templates_path = ['_templates']
9 source_suffix = '.rst'
10 master_doc = 'index'
11 project = 'Mailu'
12 copyright = '2018, Mailu authors'
13 author = 'Mailu authors'
14 version = release = os.environ.get('VERSION', 'master')
15 language = None
16 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']
17 pygments_style = 'sphinx'
18 todo_include_todos = False
19 html_theme = 'sphinx_rtd_theme'
20 html_title = 'Mailu, Docker based mail server'
21 html_static_path = []
22 htmlhelp_basename = 'Mailudoc'
23
24 # Custom sidebar templates, must be a dictionary that maps document names
25 # to template names.
26 html_sidebars = {
27 '**': [
28 'relations.html',
29 'searchbox.html',
30 ]
31 }
32
33 # Theme options
34 html_context = {
35 'display_github': True,
36 'github_user': 'mailu',
37 'github_repo': 'mailu',
38 'github_version': version,
39 'stable_version': '1.8',
40 'versions': [
41 ('1.5', '/1.5/'),
42 ('1.6', '/1.6/'),
43 ('1.7', '/1.7/'),
44 ('1.8', '/1.8/'),
45 ('master', '/master/')
46 ],
47 'conf_py_path': '/docs/'
48 }
49
[end of docs/conf.py]
[start of core/admin/mailu/configuration.py]
1 import os
2
3 from datetime import timedelta
4 from socrate import system
5 import ipaddress
6
7 DEFAULT_CONFIG = {
8 # Specific to the admin UI
9 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',
10 'BABEL_DEFAULT_LOCALE': 'en',
11 'BABEL_DEFAULT_TIMEZONE': 'UTC',
12 'BOOTSTRAP_SERVE_LOCAL': True,
13 'RATELIMIT_STORAGE_URL': '',
14 'QUOTA_STORAGE_URL': '',
15 'DEBUG': False,
16 'DOMAIN_REGISTRATION': False,
17 'TEMPLATES_AUTO_RELOAD': True,
18 'MEMORY_SESSIONS': False,
19 # Database settings
20 'DB_FLAVOR': None,
21 'DB_USER': 'mailu',
22 'DB_PW': None,
23 'DB_HOST': 'database',
24 'DB_NAME': 'mailu',
25 'SQLITE_DATABASE_FILE':'data/main.db',
26 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',
27 'SQLALCHEMY_TRACK_MODIFICATIONS': False,
28 # Statistics management
29 'INSTANCE_ID_PATH': '/data/instance',
30 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
31 # Common configuration variables
32 'SECRET_KEY': 'changeMe',
33 'DOMAIN': 'mailu.io',
34 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',
35 'POSTMASTER': 'postmaster',
36 'WILDCARD_SENDERS': '',
37 'TLS_FLAVOR': 'cert',
38 'INBOUND_TLS_ENFORCE': False,
39 'DEFER_ON_TLS_ERROR': True,
40 'AUTH_RATELIMIT_IP': '60/hour',
41 'AUTH_RATELIMIT_IP_V4_MASK': 24,
42 'AUTH_RATELIMIT_IP_V6_MASK': 56,
43 'AUTH_RATELIMIT_USER': '100/day',
44 'AUTH_RATELIMIT_EXEMPTION': '',
45 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,
46 'DISABLE_STATISTICS': False,
47 # Mail settings
48 'DMARC_RUA': None,
49 'DMARC_RUF': None,
50 'WELCOME': False,
51 'WELCOME_SUBJECT': 'Dummy welcome topic',
52 'WELCOME_BODY': 'Dummy welcome body',
53 'DKIM_SELECTOR': 'dkim',
54 'DKIM_PATH': '/dkim/{domain}.{selector}.key',
55 'DEFAULT_QUOTA': 1000000000,
56 'MESSAGE_RATELIMIT': '200/day',
57 'MESSAGE_RATELIMIT_EXEMPTION': '',
58 'RECIPIENT_DELIMITER': '',
59 # Web settings
60 'SITENAME': 'Mailu',
61 'WEBSITE': 'https://mailu.io',
62 'ADMIN' : 'none',
63 'WEB_ADMIN': '/admin',
64 'WEB_WEBMAIL': '/webmail',
65 'WEBMAIL': 'none',
66 'RECAPTCHA_PUBLIC_KEY': '',
67 'RECAPTCHA_PRIVATE_KEY': '',
68 'LOGO_URL': None,
69 'LOGO_BACKGROUND': None,
70 # Advanced settings
71 'LOG_LEVEL': 'WARNING',
72 'SESSION_KEY_BITS': 128,
73 'SESSION_TIMEOUT': 3600,
74 'PERMANENT_SESSION_LIFETIME': 30*24*3600,
75 'SESSION_COOKIE_SECURE': True,
76 'CREDENTIAL_ROUNDS': 12,
77 'TZ': 'Etc/UTC',
78 # Host settings
79 'HOST_IMAP': 'imap',
80 'HOST_LMTP': 'imap:2525',
81 'HOST_POP3': 'imap',
82 'HOST_SMTP': 'smtp',
83 'HOST_AUTHSMTP': 'smtp',
84 'HOST_ADMIN': 'admin',
85 'HOST_WEBMAIL': 'webmail',
86 'HOST_WEBDAV': 'webdav:5232',
87 'HOST_REDIS': 'redis',
88 'HOST_FRONT': 'front',
89 'SUBNET': '192.168.203.0/24',
90 'SUBNET6': None,
91 'POD_ADDRESS_RANGE': None
92 }
93
94 class ConfigManager:
95 """ Naive configuration manager that uses environment only
96 """
97
98 DB_TEMPLATES = {
99 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',
100 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',
101 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'
102 }
103
104 def __init__(self):
105 self.config = dict()
106
107 def get_host_address(self, name):
108 # if MYSERVICE_ADDRESS is defined, use this
109 if f'{name}_ADDRESS' in os.environ:
110 return os.environ.get(f'{name}_ADDRESS')
111 # otherwise use the host name and resolve it
112 return system.resolve_address(self.config[f'HOST_{name}'])
113
114 def resolve_hosts(self):
115 for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:
116 self.config[f'{key}_ADDRESS'] = self.get_host_address(key)
117 if self.config['WEBMAIL'] != 'none':
118 self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')
119
120 def __get_env(self, key, value):
121 key_file = key + "_FILE"
122 if key_file in os.environ:
123 with open(os.environ.get(key_file)) as file:
124 value_from_file = file.read()
125 return value_from_file.strip()
126 else:
127 return os.environ.get(key, value)
128
129 def __coerce_value(self, value):
130 if isinstance(value, str) and value.lower() in ('true','yes'):
131 return True
132 elif isinstance(value, str) and value.lower() in ('false', 'no'):
133 return False
134 return value
135
136 def init_app(self, app):
137 # get current app config
138 self.config.update(app.config)
139 # get environment variables
140 self.config.update({
141 key: self.__coerce_value(self.__get_env(key, value))
142 for key, value in DEFAULT_CONFIG.items()
143 })
144 self.resolve_hosts()
145
146 # automatically set the sqlalchemy string
147 if self.config['DB_FLAVOR']:
148 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]
149 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)
150
151 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2'
152 self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/1'
153 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3'
154 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'
155 self.config['SESSION_COOKIE_HTTPONLY'] = True
156 self.config['SESSION_PERMANENT'] = True
157 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])
158 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])
159 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])
160 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])
161 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]
162 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)
163 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])
164 self.config['HOSTNAMES'] = ','.join(hostnames)
165 self.config['HOSTNAME'] = hostnames[0]
166
167 # update the app config
168 app.config.update(self.config)
169
170
[end of core/admin/mailu/configuration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py
--- a/core/admin/mailu/configuration.py
+++ b/core/admin/mailu/configuration.py
@@ -27,7 +27,7 @@
'SQLALCHEMY_TRACK_MODIFICATIONS': False,
# Statistics management
'INSTANCE_ID_PATH': '/data/instance',
- 'STATS_ENDPOINT': '18.{}.stats.mailu.io',
+ 'STATS_ENDPOINT': '19.{}.stats.mailu.io',
# Common configuration variables
'SECRET_KEY': 'changeMe',
'DOMAIN': 'mailu.io',
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -36,12 +36,10 @@
'github_user': 'mailu',
'github_repo': 'mailu',
'github_version': version,
- 'stable_version': '1.8',
+ 'stable_version': '1.9',
'versions': [
- ('1.5', '/1.5/'),
- ('1.6', '/1.6/'),
- ('1.7', '/1.7/'),
('1.8', '/1.8/'),
+ ('1.9', '/1.9/'),
('master', '/master/')
],
'conf_py_path': '/docs/'
|
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -27,7 +27,7 @@\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n- 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n+ 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -36,12 +36,10 @@\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n- 'stable_version': '1.8',\n+ 'stable_version': '1.9',\n 'versions': [\n- ('1.5', '/1.5/'),\n- ('1.6', '/1.6/'),\n- ('1.7', '/1.7/'),\n ('1.8', '/1.8/'),\n+ ('1.9', '/1.9/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n", "issue": "Discussion of 1.9 roadmap\nAs proposed in the last project meeting (#1582 (comment)), we are experimenting with a discussion driven model for determining the roadmap of the next release. This follows the existing practice of the succesful community run project Gitea (e.g go-gitea/gitea#14477).\r\n\r\nDuring last meeting someone made the remark that with everything in the pipeline we already have sufficient work for a 1.9 release. I agree. We have this special situation due to the 1.8rc where we had a release candidate for over 7 months. This caused many PRs to be merged, but not included in 1.8. These are now waiting to be made available via a 1.9 release. This also caused a lot of already submitted PRs not be reviewed yet. We wanted to focus on getting 1.8 out of the door.\r\n\r\nThere are new PRs waiting for review for\r\n- Updating the interface to AdminLTE3 #1800. This is not only a security update, but also adds a lot of functionality such as\r\n - a language selector\r\n - more modern look&feel of interface elements\r\n - filtering and column ordering for tables.\r\n- Multiple PRs for increasing the overall security of mailu #1922, #1916, #1902. \r\n - For these PRs we (I?) will also introduce a brand new security documentation page where you can find all documentation on what security measures are in place, how it works and how it can be tweaked. \r\n- updated polish translations #1751 and completely new Hebrew translation #1873.\r\n\r\nWe have already merged PRs on master for\r\n- Completely new CLI configuration import/export which really exports/imports the complete mailu configuration database. With this it is possible to migrate the mailu config to another mailu instance. You can also use it to easily add new configuration (e.g. import users with specific settings) https://mailu.io/master/cli.html#config-export\r\n- single sign on for webmail. Webmail uses the admin web interface for authenticating users\r\n- various security related enhancements\r\n- All other already merged PRs which were not included in 1.8. See all newsfragments for the details https://github.com/Mailu/Mailu/tree/master/towncrier/newsfragments\r\n\r\nIMHO All those features together are sufficient to release a new version.\r\n\r\n\r\nI suggest we review all open PRs and then check what issues really need to be included as well for 1.9 and include these on the roadmap. Examples are:\r\n- I think a good example is the new SSO #1929. When you are redirected to the login page, it does not show that the login is for the webmail. You have the feeling that you are redirected to the wrong page. See the demo site for an example (https://test.mailu.io/webmail/). Of course this is not production worthy. \r\n- Incorrect documentation\r\n - E.g.the documentation on translations #1869. We have no weblate instance anymore for web-based translations. The documentation must be updated as it is incorrect now. You can only do translations manually with a tool such as poedit and send a PR for getting your new translation files merged .\r\n - documentation on development environment for the admin interface #1577. The faulty documentation should be removed or directly updated with the correct steps.\r\n\r\nFor small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included. \r\n\r\nWhat are your thoughts? Please share your feedback.\r\nRegardless the above wall of text, feel free to mention any feature/issue you would like included in 1.9.\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n\nimport os\n\nextensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'Mailu'\ncopyright = '2018, Mailu authors'\nauthor = 'Mailu authors'\nversion = release = os.environ.get('VERSION', 'master')\nlanguage = None\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']\npygments_style = 'sphinx'\ntodo_include_todos = False\nhtml_theme = 'sphinx_rtd_theme'\nhtml_title = 'Mailu, Docker based mail server'\nhtml_static_path = []\nhtmlhelp_basename = 'Mailudoc'\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\nhtml_sidebars = {\n '**': [\n 'relations.html', \n 'searchbox.html',\n ]\n}\n\n# Theme options\nhtml_context = {\n 'display_github': True,\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n 'stable_version': '1.8',\n 'versions': [\n ('1.5', '/1.5/'),\n ('1.6', '/1.6/'),\n ('1.7', '/1.7/'),\n ('1.8', '/1.8/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n}\n", "path": "docs/conf.py"}, {"content": "import os\n\nfrom datetime import timedelta\nfrom socrate import system\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'QUOTA_STORAGE_URL': '',\n 'DEBUG': False,\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE':'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '18.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '60/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 56,\n 'AUTH_RATELIMIT_USER': '100/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN' : 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': True,\n 'CREDENTIAL_ROUNDS': 12,\n 'TZ': 'Etc/UTC',\n # Host settings\n 'HOST_IMAP': 'imap',\n 'HOST_LMTP': 'imap:2525',\n 'HOST_POP3': 'imap',\n 'HOST_SMTP': 'smtp',\n 'HOST_AUTHSMTP': 'smtp',\n 'HOST_ADMIN': 'admin',\n 'HOST_WEBMAIL': 'webmail',\n 'HOST_WEBDAV': 'webdav:5232',\n 'HOST_REDIS': 'redis',\n 'HOST_FRONT': 'front',\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n 'POD_ADDRESS_RANGE': None\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}'\n }\n\n def __init__(self):\n self.config = dict()\n\n def get_host_address(self, name):\n # if MYSERVICE_ADDRESS is defined, use this\n if f'{name}_ADDRESS' in os.environ:\n return os.environ.get(f'{name}_ADDRESS')\n # otherwise use the host name and resolve it\n return system.resolve_address(self.config[f'HOST_{name}'])\n\n def resolve_hosts(self):\n for key in ['IMAP', 'POP3', 'AUTHSMTP', 'SMTP', 'REDIS']:\n self.config[f'{key}_ADDRESS'] = self.get_host_address(key)\n if self.config['WEBMAIL'] != 'none':\n self.config['WEBMAIL_ADDRESS'] = self.get_host_address('WEBMAIL')\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n self.resolve_hosts()\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n self.config['QUOTA_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/1'\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n\n # update the app config\n app.config.update(self.config)\n\n", "path": "core/admin/mailu/configuration.py"}]}
| 3,916 | 304 |
gh_patches_debug_24362
|
rasdani/github-patches
|
git_diff
|
liqd__a4-opin-496
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown messes with Gender Mainstreaming
When writing “Initiator*innen […] Entscheidungsträger*innen” in a comment, the text between the `*` is set in italics, because of the markdown formatting, I assume. Is there anything we can do about that? If I remember it correctly, some version of markdown only allows underscores for emphasis and double “*” for setting something in bold. Should we maybe use that version?

Example here: https://opin-stage.liqd.net/de/projects/opin-alleinstellungsmerkmale-fur-produktseite/
Markdown in comments in consistent with rest
We decided against Markdown in most of the other input fields and used CKEditor instead, but comments still use markdown. But this is not document anywhere. So these are our options:
1. support markdown in comments, but also advertise it to the user
2. support only new lines and nor further formatting in comments (like _italic_, **bold**, ~~strike~~)
3. add ckeditor to comment edit field and allow some basic html in comments
</issue>
<code>
[start of euth/comments/templatetags/react_comments.py]
1 import json
2
3 from django import template, utils
4 from django.contrib.contenttypes.models import ContentType
5 from django.utils.safestring import mark_safe
6
7
8 from ..models import Comment
9 from ..serializers import ThreadSerializer
10
11 register = template.Library()
12
13
14 @register.simple_tag(takes_context=True)
15 def react_comments(context, obj):
16 request = context['request']
17
18 serializer = ThreadSerializer(
19 obj.comments.all(), many=True, context={'request': request})
20 comments = serializer.data
21
22 user = request.user
23 is_authenticated = user.is_authenticated()
24 is_moderator = user.is_superuser or user in obj.project.moderators.all()
25 user_name = user.username
26
27 contenttype = ContentType.objects.get_for_model(obj)
28 permission = '{ct.app_label}.comment_{ct.model}'.format(ct=contenttype)
29 has_comment_permission = user.has_perm(permission, obj)
30
31 comments_contenttype = ContentType.objects.get_for_model(Comment)
32 pk = obj.pk
33
34 language = utils.translation.get_language()
35
36 mountpoint = 'comments_for_{contenttype}_{pk}'.format(
37 contenttype=contenttype.pk,
38 pk=pk
39 )
40 attributes = {
41 'comments': comments,
42 'comments_contenttype': comments_contenttype.pk,
43 'subjectType': contenttype.pk,
44 'subjectId': pk,
45 'isAuthenticated': is_authenticated,
46 'isModerator': is_moderator,
47 'user_name': user_name,
48 'language': language,
49 'isReadOnly': not has_comment_permission,
50 }
51
52 return mark_safe((
53 '<div id={mountpoint}></div><script>window.opin.renderComment('
54 '{mountpoint}, {attributes})</script>').format(
55 attributes=json.dumps(attributes),
56 mountpoint=json.dumps(mountpoint)
57 )
58 )
59
[end of euth/comments/templatetags/react_comments.py]
[start of euth/comments/models.py]
1 from django.conf import settings
2 from django.contrib.contenttypes.fields import (GenericForeignKey,
3 GenericRelation)
4 from django.contrib.contenttypes.models import ContentType
5 from django.db import models
6 from django.utils.translation import ugettext_lazy as _
7
8 from euth.contrib.base_models import UserGeneratedContentModel
9 from euth.contrib.generics import models_to_limit
10 from euth.ratings import models as rating_models
11
12
13 class Comment(UserGeneratedContentModel):
14
15 content_type = models.ForeignKey(
16 ContentType,
17 on_delete=models.CASCADE,
18 limit_choices_to=models_to_limit(settings.COMMENTABLES)
19 )
20 object_pk = models.PositiveIntegerField()
21 content_object = GenericForeignKey(
22 ct_field="content_type", fk_field="object_pk")
23 comment = models.TextField(max_length=1024)
24 is_removed = models.BooleanField(default=False)
25 is_censored = models.BooleanField(default=False)
26 ratings = GenericRelation(rating_models.Rating,
27 related_query_name='comment',
28 object_id_field='object_pk')
29 child_comments = GenericRelation('self',
30 related_query_name='parent_comment',
31 object_id_field='object_pk')
32
33 class Meta:
34 verbose_name = _("Comment")
35 verbose_name_plural = _("Comments")
36 ordering = ('created',)
37
38 def __str__(self):
39 if len(self.comment) > 50:
40 return "comment: {} ...".format(self.comment[:50])
41 else:
42 return "comment: {}".format(self.comment)
43
44 def save(self, *args, **kwargs):
45 """
46 Change the text of the comment if
47 the comment was marked removed or censored
48 """
49
50 if self.is_removed:
51 self.comment = 'deleted by creator'
52 if self.is_censored:
53 self.comment = 'deleted by moderator'
54 return super(Comment, self).save(*args, **kwargs)
55
56 def get_absolute_url(self):
57 if hasattr(self.content_object, 'get_absolute_url'):
58 return self.content_object.get_absolute_url()
59 elif hasattr(self.project, 'get_absolute_url'):
60 return self.project.get_absolute_url()
61 else:
62 return None
63
64 @property
65 def notification_content(self):
66 return self.comment
67
68 @property
69 def project(self):
70 co = self.content_object
71 if isinstance(co, self.__class__):
72 co = co.content_object
73 return co.project
74
[end of euth/comments/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/euth/comments/models.py b/euth/comments/models.py
--- a/euth/comments/models.py
+++ b/euth/comments/models.py
@@ -5,6 +5,7 @@
from django.db import models
from django.utils.translation import ugettext_lazy as _
+from contrib.transforms import html_transforms
from euth.contrib.base_models import UserGeneratedContentModel
from euth.contrib.generics import models_to_limit
from euth.ratings import models as rating_models
@@ -47,6 +48,9 @@
the comment was marked removed or censored
"""
+ self.comment = html_transforms.clean_html_all(
+ self.comment)
+
if self.is_removed:
self.comment = 'deleted by creator'
if self.is_censored:
diff --git a/euth/comments/templatetags/react_comments.py b/euth/comments/templatetags/react_comments.py
--- a/euth/comments/templatetags/react_comments.py
+++ b/euth/comments/templatetags/react_comments.py
@@ -16,7 +16,8 @@
request = context['request']
serializer = ThreadSerializer(
- obj.comments.all(), many=True, context={'request': request})
+ obj.comments.all().order_by('-created'),
+ many=True, context={'request': request})
comments = serializer.data
user = request.user
|
{"golden_diff": "diff --git a/euth/comments/models.py b/euth/comments/models.py\n--- a/euth/comments/models.py\n+++ b/euth/comments/models.py\n@@ -5,6 +5,7 @@\n from django.db import models\n from django.utils.translation import ugettext_lazy as _\n \n+from contrib.transforms import html_transforms\n from euth.contrib.base_models import UserGeneratedContentModel\n from euth.contrib.generics import models_to_limit\n from euth.ratings import models as rating_models\n@@ -47,6 +48,9 @@\n the comment was marked removed or censored\n \"\"\"\n \n+ self.comment = html_transforms.clean_html_all(\n+ self.comment)\n+\n if self.is_removed:\n self.comment = 'deleted by creator'\n if self.is_censored:\ndiff --git a/euth/comments/templatetags/react_comments.py b/euth/comments/templatetags/react_comments.py\n--- a/euth/comments/templatetags/react_comments.py\n+++ b/euth/comments/templatetags/react_comments.py\n@@ -16,7 +16,8 @@\n request = context['request']\n \n serializer = ThreadSerializer(\n- obj.comments.all(), many=True, context={'request': request})\n+ obj.comments.all().order_by('-created'),\n+ many=True, context={'request': request})\n comments = serializer.data\n \n user = request.user\n", "issue": "Markdown messes with Gender Mainstreaming\nWhen writing \u201cInitiator*innen [\u2026] Entscheidungstr\u00e4ger*innen\u201d in a comment, the text between the `*` is set in italics, because of the markdown formatting, I assume. Is there anything we can do about that? If I remember it correctly, some version of markdown only allows underscores for emphasis and double \u201c*\u201d for setting something in bold. Should we maybe use that version?\r\n\r\n\r\n\r\nExample here: https://opin-stage.liqd.net/de/projects/opin-alleinstellungsmerkmale-fur-produktseite/\nMarkdown in comments in consistent with rest\nWe decided against Markdown in most of the other input fields and used CKEditor instead, but comments still use markdown. But this is not document anywhere. So these are our options:\n1. support markdown in comments, but also advertise it to the user\n2. support only new lines and nor further formatting in comments (like _italic_, **bold**, ~~strike~~)\n3. add ckeditor to comment edit field and allow some basic html in comments\n\n", "before_files": [{"content": "import json\n\nfrom django import template, utils\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\n\n\nfrom ..models import Comment\nfrom ..serializers import ThreadSerializer\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef react_comments(context, obj):\n request = context['request']\n\n serializer = ThreadSerializer(\n obj.comments.all(), many=True, context={'request': request})\n comments = serializer.data\n\n user = request.user\n is_authenticated = user.is_authenticated()\n is_moderator = user.is_superuser or user in obj.project.moderators.all()\n user_name = user.username\n\n contenttype = ContentType.objects.get_for_model(obj)\n permission = '{ct.app_label}.comment_{ct.model}'.format(ct=contenttype)\n has_comment_permission = user.has_perm(permission, obj)\n\n comments_contenttype = ContentType.objects.get_for_model(Comment)\n pk = obj.pk\n\n language = utils.translation.get_language()\n\n mountpoint = 'comments_for_{contenttype}_{pk}'.format(\n contenttype=contenttype.pk,\n pk=pk\n )\n attributes = {\n 'comments': comments,\n 'comments_contenttype': comments_contenttype.pk,\n 'subjectType': contenttype.pk,\n 'subjectId': pk,\n 'isAuthenticated': is_authenticated,\n 'isModerator': is_moderator,\n 'user_name': user_name,\n 'language': language,\n 'isReadOnly': not has_comment_permission,\n }\n\n return mark_safe((\n '<div id={mountpoint}></div><script>window.opin.renderComment('\n '{mountpoint}, {attributes})</script>').format(\n attributes=json.dumps(attributes),\n mountpoint=json.dumps(mountpoint)\n )\n )\n", "path": "euth/comments/templatetags/react_comments.py"}, {"content": "from django.conf import settings\nfrom django.contrib.contenttypes.fields import (GenericForeignKey,\n GenericRelation)\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.contrib.base_models import UserGeneratedContentModel\nfrom euth.contrib.generics import models_to_limit\nfrom euth.ratings import models as rating_models\n\n\nclass Comment(UserGeneratedContentModel):\n\n content_type = models.ForeignKey(\n ContentType,\n on_delete=models.CASCADE,\n limit_choices_to=models_to_limit(settings.COMMENTABLES)\n )\n object_pk = models.PositiveIntegerField()\n content_object = GenericForeignKey(\n ct_field=\"content_type\", fk_field=\"object_pk\")\n comment = models.TextField(max_length=1024)\n is_removed = models.BooleanField(default=False)\n is_censored = models.BooleanField(default=False)\n ratings = GenericRelation(rating_models.Rating,\n related_query_name='comment',\n object_id_field='object_pk')\n child_comments = GenericRelation('self',\n related_query_name='parent_comment',\n object_id_field='object_pk')\n\n class Meta:\n verbose_name = _(\"Comment\")\n verbose_name_plural = _(\"Comments\")\n ordering = ('created',)\n\n def __str__(self):\n if len(self.comment) > 50:\n return \"comment: {} ...\".format(self.comment[:50])\n else:\n return \"comment: {}\".format(self.comment)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Change the text of the comment if\n the comment was marked removed or censored\n \"\"\"\n\n if self.is_removed:\n self.comment = 'deleted by creator'\n if self.is_censored:\n self.comment = 'deleted by moderator'\n return super(Comment, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n if hasattr(self.content_object, 'get_absolute_url'):\n return self.content_object.get_absolute_url()\n elif hasattr(self.project, 'get_absolute_url'):\n return self.project.get_absolute_url()\n else:\n return None\n\n @property\n def notification_content(self):\n return self.comment\n\n @property\n def project(self):\n co = self.content_object\n if isinstance(co, self.__class__):\n co = co.content_object\n return co.project\n", "path": "euth/comments/models.py"}]}
| 1,999 | 295 |
gh_patches_debug_1853
|
rasdani/github-patches
|
git_diff
|
microsoft__playwright-python-145
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DEBUG outputs won't get forwarded
</issue>
<code>
[start of playwright/main.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import subprocess
17 import sys
18 from typing import Any
19
20 from greenlet import greenlet
21
22 from playwright.async_api import Playwright as AsyncPlaywright
23 from playwright.connection import Connection
24 from playwright.helper import Error
25 from playwright.object_factory import create_remote_object
26 from playwright.path_utils import get_file_dirname
27 from playwright.playwright import Playwright
28 from playwright.sync_api import Playwright as SyncPlaywright
29 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber
30
31
32 def compute_driver_name() -> str:
33 platform = sys.platform
34 if platform == "darwin":
35 result = "driver-macos"
36 elif platform == "linux":
37 result = "driver-linux"
38 elif platform == "win32":
39 result = "driver-win.exe"
40 return result
41
42
43 async def run_driver_async() -> Connection:
44 package_path = get_file_dirname()
45 driver_name = compute_driver_name()
46 driver_executable = package_path / "drivers" / driver_name
47
48 proc = await asyncio.create_subprocess_exec(
49 str(driver_executable),
50 stdin=asyncio.subprocess.PIPE,
51 stdout=asyncio.subprocess.PIPE,
52 stderr=asyncio.subprocess.PIPE,
53 limit=32768,
54 )
55 assert proc.stdout
56 assert proc.stdin
57 connection = Connection(
58 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()
59 )
60 return connection
61
62
63 def run_driver() -> Connection:
64 loop = asyncio.get_event_loop()
65 if loop.is_running():
66 raise Error("Can only run one Playwright at a time.")
67 return loop.run_until_complete(run_driver_async())
68
69
70 class SyncPlaywrightContextManager:
71 def __init__(self) -> None:
72 self._connection = run_driver()
73 self._playwright: SyncPlaywright
74
75 def __enter__(self) -> SyncPlaywright:
76 g_self = greenlet.getcurrent()
77
78 def callback_wrapper(playwright_impl: Playwright) -> None:
79 self._playwright = SyncPlaywright(playwright_impl)
80 g_self.switch()
81
82 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper)
83 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))
84 dispatcher_fiber().switch()
85 return self._playwright
86
87 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
88 self._connection.stop_sync()
89
90
91 class AsyncPlaywrightContextManager:
92 def __init__(self) -> None:
93 self._connection: Connection
94
95 async def __aenter__(self) -> AsyncPlaywright:
96 self._connection = await run_driver_async()
97 self._connection.run_async()
98 return AsyncPlaywright(
99 await self._connection.wait_for_object_with_known_name("Playwright")
100 )
101
102 async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
103 self._connection.stop_async()
104
105
106 if sys.platform == "win32":
107 # Use ProactorEventLoop in 3.7, which is default in 3.8
108 loop = asyncio.ProactorEventLoop()
109 asyncio.set_event_loop(loop)
110
111
112 def main() -> None:
113 if "install" not in sys.argv:
114 print('Run "python -m playwright install" to complete installation')
115 return
116 package_path = get_file_dirname()
117 driver_name = compute_driver_name()
118 driver_executable = package_path / "drivers" / driver_name
119 print("Installing the browsers...")
120 subprocess.check_call(f"{driver_executable} install", shell=True)
121
122 print("Playwright is now ready for use")
123
[end of playwright/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/playwright/main.py b/playwright/main.py
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -49,7 +49,7 @@
str(driver_executable),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
+ stderr=sys.stderr,
limit=32768,
)
assert proc.stdout
|
{"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -49,7 +49,7 @@\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=asyncio.subprocess.PIPE,\n+ stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n", "issue": "DEBUG outputs won't get forwarded\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}]}
| 1,715 | 96 |
gh_patches_debug_1864
|
rasdani/github-patches
|
git_diff
|
python__python-docs-es-1201
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
readthedocs: 'extensions' is not defined
Por alguna razón, hemos encontrado https://github.com/UPC/ravada/issues/890 en la CI de readthedocs, y actualmente los builds tienen el siguiente error:
```
% python -m sphinx -T -j auto -E -b html -d _build/doctrees -D language=es . _build/html
Running Sphinx v2.2.0
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/cmd/build.py", line 272, in build_main
app = Sphinx(args.sourcedir, args.confdir, args.outputdir,
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/application.py", line 210, in __init__
self.config = Config.read(self.confdir, confoverrides or {}, self.tags)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 196, in read
namespace = eval_config_file(filename, tags)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 371, in eval_config_file
raise ConfigError(msg % traceback.format_exc())
sphinx.errors.ConfigError: There is a programmable error in your configuration file:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
Configuration error:
There is a programmable error in your configuration file:
Traceback (most recent call last):
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py", line 361, in eval_config_file
execfile_(filename, namespace)
File "/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py", line 81, in execfile_
exec(code, _globals)
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 22, in <module>
from conf import *
File "/home/cmaureir/repos/python-docs-es-admin/conf.py", line 72, in <module>
if extensions:
NameError: name 'extensions' is not defined
```
Localmente `extensions` está definido, pero por alguna razón no en el CI de readthedocs.
</issue>
<code>
[start of conf.py]
1 # Sphinx configuration file.
2 #
3 # - import original configurations from cpython/Doc/conf.py
4 # - append the path considering the cpython submodule is at ./cpython
5 # - create the symbolic links under ./cpython/locale/es/LC_MESSAGES
6 # - make the build to work under Read the Docs
7 #
8 # The git submodule was created using this Stack Overflow answer
9 # to fetch only the commit that I needed and avoid clonning the whole history
10 # https://stackoverflow.com/a/27445058
11 #
12 # This can be built locally using `sphinx-build` by running
13 #
14 # $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html
15
16 import sys, os, time
17 sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))
18 sys.path.append(os.path.abspath('cpython/Doc/includes'))
19
20 # Import all the Sphinx settings from cpython
21 sys.path.append(os.path.abspath('cpython/Doc'))
22 from conf import *
23
24 # Call patchlevel with the proper path to get the version from
25 # instead of hardcoding it
26 import patchlevel
27 version, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))
28
29 project = 'Python en Español'
30 copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
31
32 html_theme_path = ['cpython/Doc/tools']
33 templates_path = ['cpython/Doc/tools/templates']
34 html_static_path = ['cpython/Doc/tools/static']
35
36 os.system('mkdir -p cpython/locales/es/')
37 os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
38
39 html_short_title = f'Documentación {release}'
40 html_title = f'Documentación de Python en Español -- {release}'
41
42 exclude_patterns = [
43 # This file is not included and it not marked as :orphan:
44 'distutils/_setuptools_disclaimer.rst',
45 'README.rst',
46 ]
47
48 if not os.environ.get('SPHINX_GETTEXT') == 'True':
49 # Override all the files from ``.overrides`` directory
50 from pathlib import Path
51 overrides_paths = Path('.overrides')
52
53 for path in overrides_paths.glob('**/*.*'):
54 if path.name == 'README.rst' and path.parent == '.overrides':
55 continue
56 destroot = str(path.parent).replace('.overrides', '').lstrip('/')
57 outputdir = Path('cpython/Doc') / destroot / path.name
58 os.system(f'ln -nfs `pwd`/{path.parent}/{path.name} {outputdir}')
59
60 gettext_compact = False
61 locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
62
63
64 # NOTE: Read the Docs does not support "multi document output".
65 # So, we put all the documentation as a single file for now.
66 _stdauthor = r'Guido van Rossum\\and the Python development team'
67 latex_documents = [
68 ('contents', 'python-docs-es.tex', u'Documentación de Python en Español',
69 _stdauthor, 'manual'),
70 ]
71
72 extensions.extend([
73 'sphinx_tabs.tabs',
74 'sphinxemoji.sphinxemoji',
75 ])
76
77
78 def setup(app):
79
80 def add_contributing_banner(app, doctree):
81 """
82 Insert a banner at the top of the index.
83
84 This way, we can easily communicate people to help with the translation,
85 pointing them to different resources.
86 """
87
88 if app.builder.format != 'html':
89 # Do not include the banner when building with other formats
90 # (this is useful when using -b gettext)
91 return
92
93 from docutils import nodes, core
94
95 message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \
96 f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \
97 'Ayuda a acercar Python a más personas de habla hispana.'
98
99 paragraph = core.publish_doctree(message)[0]
100 banner = nodes.warning(ids=['contributing-banner'])
101 banner.append(paragraph)
102
103 for document in doctree.traverse(nodes.document):
104 document.insert(0, banner)
105
106 # Change the sourcedir programmatically because Read the Docs always call it with `.`
107 app.srcdir = 'cpython/Doc'
108
109 app.connect('doctree-read', add_contributing_banner)
110
111 # Import the sphinx-autorun manually to avoid this warning
112 # TODO: Remove this code and use just ``extensions.append('sphinx_autorun')`` when
113 # that issue gets fixed
114 # See https://github.com/WhyNotHugo/sphinx-autorun/issues/17
115
116 # WARNING: the sphinx_autorun extension does not declare if it is safe for
117 # parallel reading, assuming it isn't - please ask the extension author to
118 # check and make it explicit
119 # WARNING: doing serial read
120 from sphinx_autorun import RunBlock, AutoRun
121 app.add_directive('runblock', RunBlock)
122 app.connect('builder-inited', AutoRun.builder_init)
123 app.add_config_value('autorun_languages', AutoRun.config, 'env')
124 return {
125 'version': '0.1',
126 'parallel_read_safe': True,
127 'parallel_write_safe': True,
128 }
129
[end of conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -69,10 +69,16 @@
_stdauthor, 'manual'),
]
-extensions.extend([
- 'sphinx_tabs.tabs',
- 'sphinxemoji.sphinxemoji',
-])
+try:
+ extensions.extend([
+ 'sphinx_tabs.tabs',
+ 'sphinxemoji.sphinxemoji',
+ ])
+except NameError:
+ extensions = [
+ 'sphinx_tabs.tabs',
+ 'sphinxemoji.sphinxemoji',
+ ]
def setup(app):
|
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -69,10 +69,16 @@\n _stdauthor, 'manual'),\n ]\n \n-extensions.extend([\n- 'sphinx_tabs.tabs',\n- 'sphinxemoji.sphinxemoji',\n-])\n+try:\n+ extensions.extend([\n+ 'sphinx_tabs.tabs',\n+ 'sphinxemoji.sphinxemoji',\n+ ])\n+except NameError:\n+ extensions = [\n+ 'sphinx_tabs.tabs',\n+ 'sphinxemoji.sphinxemoji',\n+ ]\n \n \n def setup(app):\n", "issue": "readthedocs: 'extensions' is not defined\nPor alguna raz\u00f3n, hemos encontrado https://github.com/UPC/ravada/issues/890 en la CI de readthedocs, y actualmente los builds tienen el siguiente error:\r\n\r\n```\r\n% python -m sphinx -T -j auto -E -b html -d _build/doctrees -D language=es . _build/html\r\nRunning Sphinx v2.2.0\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py\", line 361, in eval_config_file\r\n execfile_(filename, namespace)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py\", line 81, in execfile_\r\n exec(code, _globals)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 22, in <module>\r\n from conf import *\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 72, in <module>\r\n if extensions:\r\nNameError: name 'extensions' is not defined\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/cmd/build.py\", line 272, in build_main\r\n app = Sphinx(args.sourcedir, args.confdir, args.outputdir,\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/application.py\", line 210, in __init__\r\n self.config = Config.read(self.confdir, confoverrides or {}, self.tags)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py\", line 196, in read\r\n namespace = eval_config_file(filename, tags)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py\", line 371, in eval_config_file\r\n raise ConfigError(msg % traceback.format_exc())\r\nsphinx.errors.ConfigError: There is a programmable error in your configuration file:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py\", line 361, in eval_config_file\r\n execfile_(filename, namespace)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py\", line 81, in execfile_\r\n exec(code, _globals)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 22, in <module>\r\n from conf import *\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 72, in <module>\r\n if extensions:\r\nNameError: name 'extensions' is not defined\r\n\r\n\r\nConfiguration error:\r\nThere is a programmable error in your configuration file:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/config.py\", line 361, in eval_config_file\r\n execfile_(filename, namespace)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/venv/lib/python3.9/site-packages/sphinx/util/pycompat.py\", line 81, in execfile_\r\n exec(code, _globals)\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 22, in <module>\r\n from conf import *\r\n File \"/home/cmaureir/repos/python-docs-es-admin/conf.py\", line 72, in <module>\r\n if extensions:\r\nNameError: name 'extensions' is not defined\r\n```\r\n\r\nLocalmente `extensions` est\u00e1 definido, pero por alguna raz\u00f3n no en el CI de readthedocs.\n", "before_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\n# Call patchlevel with the proper path to get the version from\n# instead of hardcoding it\nimport patchlevel\nversion, release = patchlevel.get_header_version_info(os.path.abspath('cpython/Doc'))\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\nhtml_short_title = f'Documentaci\u00f3n {release}'\nhtml_title = f'Documentaci\u00f3n de Python en Espa\u00f1ol -- {release}'\n\nexclude_patterns = [\n # This file is not included and it not marked as :orphan:\n 'distutils/_setuptools_disclaimer.rst',\n 'README.rst',\n]\n\nif not os.environ.get('SPHINX_GETTEXT') == 'True':\n # Override all the files from ``.overrides`` directory\n from pathlib import Path\n overrides_paths = Path('.overrides')\n\n for path in overrides_paths.glob('**/*.*'):\n if path.name == 'README.rst' and path.parent == '.overrides':\n continue\n destroot = str(path.parent).replace('.overrides', '').lstrip('/')\n outputdir = Path('cpython/Doc') / destroot / path.name\n os.system(f'ln -nfs `pwd`/{path.parent}/{path.name} {outputdir}')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\n\n# NOTE: Read the Docs does not support \"multi document output\".\n# So, we put all the documentation as a single file for now.\n_stdauthor = r'Guido van Rossum\\\\and the Python development team'\nlatex_documents = [\n ('contents', 'python-docs-es.tex', u'Documentaci\u00f3n de Python en Espa\u00f1ol',\n _stdauthor, 'manual'),\n]\n\nextensions.extend([\n 'sphinx_tabs.tabs',\n 'sphinxemoji.sphinxemoji',\n])\n\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n\n if app.builder.format != 'html':\n # Do not include the banner when building with other formats\n # (this is useful when using -b gettext)\n return\n\n from docutils import nodes, core\n\n message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_. ' \\\n 'Ayuda a acercar Python a m\u00e1s personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n\n # Import the sphinx-autorun manually to avoid this warning\n # TODO: Remove this code and use just ``extensions.append('sphinx_autorun')`` when\n # that issue gets fixed\n # See https://github.com/WhyNotHugo/sphinx-autorun/issues/17\n\n # WARNING: the sphinx_autorun extension does not declare if it is safe for\n # parallel reading, assuming it isn't - please ask the extension author to\n # check and make it explicit\n # WARNING: doing serial read\n from sphinx_autorun import RunBlock, AutoRun\n app.add_directive('runblock', RunBlock)\n app.connect('builder-inited', AutoRun.builder_init)\n app.add_config_value('autorun_languages', AutoRun.config, 'env')\n return {\n 'version': '0.1',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "conf.py"}]}
| 2,888 | 133 |
gh_patches_debug_28978
|
rasdani/github-patches
|
git_diff
|
mne-tools__mne-bids-243
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plot_anat should use BIDS subjects_dir
I'm a bit confused by the example of convert_mri_and_trans.py. I think here:
https://github.com/mne-tools/mne-bids/blob/master/examples/convert_mri_and_trans.py#L139-L150
It should actually read in the nifti file that is written instead of the old mgz file. What do you think @sappelhoff ?
</issue>
<code>
[start of examples/convert_mri_and_trans.py]
1 """
2 ==========================================================================
3 Save and load T1-weighted MRI scan along with anatomical landmarks in BIDS
4 ==========================================================================
5
6 When working with MEEG data in the domain of source localization, we usually
7 have to deal with aligning several coordinate systems, such as the coordinate
8 systems of ...
9
10 - the head of a study participant
11 - the recording device (in the case of MEG)
12 - the anatomical MRI scan of a study participant
13
14 The process of aligning these frames is also called coregistration, and is
15 performed with the help of a transformation matrix, called ``trans`` in MNE.
16
17 In this tutorial, we show how ``MNE-BIDS`` can be used to save a T1 weighted
18 MRI scan in BIDS format, and to encode all information of the ``trans`` object
19 in a BIDS compatible way.
20
21 Finally, we will automatically reproduce our ``trans`` object from a BIDS
22 directory.
23
24 See the documentation pages in the MNE docs for more information on
25 `source alignment and coordinate frames <mne_source_coords_>`_
26
27 .. note:: For this example you will need to install ``matplotlib`` and
28 ``nilearn`` on top of your usual ``mne-bids`` installation.
29
30 """
31 # Authors: Stefan Appelhoff <[email protected]>
32 # License: BSD (3-clause)
33
34 ###############################################################################
35 # We are importing everything we need for this example:
36
37 import os.path as op
38 import shutil as sh
39
40 import numpy as np
41 import matplotlib.pyplot as plt
42 from nilearn.plotting import plot_anat
43 import mne
44 from mne.datasets import sample
45 from mne.source_space import head_to_mri
46
47 from mne_bids import (write_raw_bids, make_bids_basename, write_anat,
48 get_head_mri_trans)
49 from mne_bids.utils import print_dir_tree
50
51 ###############################################################################
52 # We will be using the `MNE sample data <mne_sample_data_>`_ and write a basic
53 # BIDS dataset. For more information, you can checkout the respective
54 # :ref:`example <ex-convert-mne-sample>`.
55
56 data_path = sample.data_path()
57 event_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,
58 'Visual/Right': 4, 'Smiley': 5, 'Button': 32}
59 raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
60 events_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')
61 output_path = op.abspath(op.join(data_path, '..', 'MNE-sample-data-bids'))
62 if op.exists(output_path):
63 sh.rmtree(output_path)
64 raw = mne.io.read_raw_fif(raw_fname)
65 sub = '01'
66 ses = '01'
67 task = 'audiovisual'
68 run = '01'
69 bids_basename = make_bids_basename(subject=sub, session=ses, task=task,
70 run=run)
71 write_raw_bids(raw, bids_basename, output_path, events_data=events_data,
72 event_id=event_id, overwrite=True)
73
74 # Print the directory tree
75 print_dir_tree(output_path)
76
77 ###############################################################################
78 # Now let's assume that we have also collected some T1 weighted MRI data for
79 # our subject. And furthermore, that we have already aligned our coordinate
80 # frames (using e.g., the `coregistration GUI`_) and obtained a transformation
81 # matrix :code:`trans`.
82
83 # Get the path to our MRI scan
84 t1_mgh_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
85
86 # Load the transformation matrix and show what it looks like
87 trans_fname = op.join(data_path, 'MEG', 'sample',
88 'sample_audvis_raw-trans.fif')
89 trans = mne.read_trans(trans_fname)
90 print(trans)
91
92 ###############################################################################
93 # We can save the MRI to our existing BIDS directory and at the same time
94 # create a JSON sidecar file that contains metadata, we will later use to
95 # retrieve our transformation matrix :code:`trans`.
96
97 # We use the write_anat function
98 write_anat(bids_root=output_path, # point to the BIDS dir we wrote earlier
99 subject=sub,
100 t1w=t1_mgh_fname, # path to the MRI scan
101 session=ses,
102 raw=raw, # the raw MEG data file connected to the MRI
103 trans=trans, # our transformation matrix
104 verbose=True # this will print out the sidecar file
105 )
106
107 # Let's have another look at our BIDS directory
108 print_dir_tree(output_path)
109
110 ###############################################################################
111 # Our BIDS dataset is now ready to be shared. We can easily estimate the
112 # transformation matrix using ``MNE-BIDS`` and the BIDS dataset.
113
114 bids_fname = bids_basename + '_meg.fif'
115
116 # reproduce our trans
117 estim_trans = get_head_mri_trans(bids_fname=bids_fname, # name of the MEG file
118 bids_root=output_path # root of our BIDS dir
119 )
120
121 ###############################################################################
122 # Finally, let's use the T1 weighted MRI image and plot the anatomical
123 # landmarks Nasion, LPA, and RPA (=left and right preauricular points) onto
124 # the brain image. For that, we can extract the location of Nasion, LPA, and
125 # RPA from the MEG file, apply our transformation matrix :code:`trans`, and
126 # plot the results.
127
128 # Get Landmarks from MEG file, 0, 1, and 2 correspond to LPA, NAS, RPA
129 # and the 'r' key will provide us with the xyz coordinates
130 pos = np.asarray((raw.info['dig'][0]['r'],
131 raw.info['dig'][1]['r'],
132 raw.info['dig'][2]['r']))
133
134
135 # We use a function from MNE-Python to convert MEG coordinates to MRI space
136 # for the conversion we use our estimated transformation matrix and the
137 # MEG coordinates extracted from the raw file. `subjects` and `subjects_dir`
138 # are used internally, to point to the T1-weighted MRI file: `t1_mgh_fname`
139 mri_pos = head_to_mri(pos=pos,
140 subject='sample',
141 mri_head_t=estim_trans,
142 subjects_dir=op.join(data_path, 'subjects')
143 )
144
145 # Plot it
146 fig, axs = plt.subplots(3, 1)
147 for point_idx, label in enumerate(('LPA', 'NAS', 'RPA')):
148 plot_anat(t1_mgh_fname, axes=axs[point_idx],
149 cut_coords=mri_pos[point_idx, :],
150 title=label)
151
152
153 ###############################################################################
154 # .. LINKS
155 #
156 # .. _coregistration GUI:
157 # https://martinos.org/mne/stable/auto_tutorials/source-modeling/plot_source_alignment.html#defining-the-headmri-trans-using-the-gui # noqa: E501
158 # .. _mne_source_coords:
159 # https://www.martinos.org/mne/stable/auto_tutorials/source-modeling/plot_source_alignment.html # noqa: E501
160 # .. _mne_sample_data:
161 # https://martinos.org/mne/stable/manual/sample_dataset.html
162 #
163
[end of examples/convert_mri_and_trans.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/convert_mri_and_trans.py b/examples/convert_mri_and_trans.py
--- a/examples/convert_mri_and_trans.py
+++ b/examples/convert_mri_and_trans.py
@@ -95,14 +95,14 @@
# retrieve our transformation matrix :code:`trans`.
# We use the write_anat function
-write_anat(bids_root=output_path, # point to the BIDS dir we wrote earlier
- subject=sub,
- t1w=t1_mgh_fname, # path to the MRI scan
- session=ses,
- raw=raw, # the raw MEG data file connected to the MRI
- trans=trans, # our transformation matrix
- verbose=True # this will print out the sidecar file
- )
+anat_dir = write_anat(bids_root=output_path, # the BIDS dir we wrote earlier
+ subject=sub,
+ t1w=t1_mgh_fname, # path to the MRI scan
+ session=ses,
+ raw=raw, # the raw MEG data file connected to the MRI
+ trans=trans, # our transformation matrix
+ verbose=True # this will print out the sidecar file
+ )
# Let's have another look at our BIDS directory
print_dir_tree(output_path)
@@ -142,10 +142,13 @@
subjects_dir=op.join(data_path, 'subjects')
)
+# Our MRI written to BIDS, we got `anat_dir` from our `write_anat` function
+t1_nii_fname = op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz')
+
# Plot it
fig, axs = plt.subplots(3, 1)
for point_idx, label in enumerate(('LPA', 'NAS', 'RPA')):
- plot_anat(t1_mgh_fname, axes=axs[point_idx],
+ plot_anat(t1_nii_fname, axes=axs[point_idx],
cut_coords=mri_pos[point_idx, :],
title=label)
|
{"golden_diff": "diff --git a/examples/convert_mri_and_trans.py b/examples/convert_mri_and_trans.py\n--- a/examples/convert_mri_and_trans.py\n+++ b/examples/convert_mri_and_trans.py\n@@ -95,14 +95,14 @@\n # retrieve our transformation matrix :code:`trans`.\n \n # We use the write_anat function\n-write_anat(bids_root=output_path, # point to the BIDS dir we wrote earlier\n- subject=sub,\n- t1w=t1_mgh_fname, # path to the MRI scan\n- session=ses,\n- raw=raw, # the raw MEG data file connected to the MRI\n- trans=trans, # our transformation matrix\n- verbose=True # this will print out the sidecar file\n- )\n+anat_dir = write_anat(bids_root=output_path, # the BIDS dir we wrote earlier\n+ subject=sub,\n+ t1w=t1_mgh_fname, # path to the MRI scan\n+ session=ses,\n+ raw=raw, # the raw MEG data file connected to the MRI\n+ trans=trans, # our transformation matrix\n+ verbose=True # this will print out the sidecar file\n+ )\n \n # Let's have another look at our BIDS directory\n print_dir_tree(output_path)\n@@ -142,10 +142,13 @@\n subjects_dir=op.join(data_path, 'subjects')\n )\n \n+# Our MRI written to BIDS, we got `anat_dir` from our `write_anat` function\n+t1_nii_fname = op.join(anat_dir, 'sub-01_ses-01_T1w.nii.gz')\n+\n # Plot it\n fig, axs = plt.subplots(3, 1)\n for point_idx, label in enumerate(('LPA', 'NAS', 'RPA')):\n- plot_anat(t1_mgh_fname, axes=axs[point_idx],\n+ plot_anat(t1_nii_fname, axes=axs[point_idx],\n cut_coords=mri_pos[point_idx, :],\n title=label)\n", "issue": "plot_anat should use BIDS subjects_dir\nI'm a bit confused by the example of convert_mri_and_trans.py. I think here:\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/master/examples/convert_mri_and_trans.py#L139-L150\r\n\r\nIt should actually read in the nifti file that is written instead of the old mgz file. What do you think @sappelhoff ?\n", "before_files": [{"content": "\"\"\"\n==========================================================================\nSave and load T1-weighted MRI scan along with anatomical landmarks in BIDS\n==========================================================================\n\nWhen working with MEEG data in the domain of source localization, we usually\nhave to deal with aligning several coordinate systems, such as the coordinate\nsystems of ...\n\n- the head of a study participant\n- the recording device (in the case of MEG)\n- the anatomical MRI scan of a study participant\n\nThe process of aligning these frames is also called coregistration, and is\nperformed with the help of a transformation matrix, called ``trans`` in MNE.\n\nIn this tutorial, we show how ``MNE-BIDS`` can be used to save a T1 weighted\nMRI scan in BIDS format, and to encode all information of the ``trans`` object\nin a BIDS compatible way.\n\nFinally, we will automatically reproduce our ``trans`` object from a BIDS\ndirectory.\n\nSee the documentation pages in the MNE docs for more information on\n`source alignment and coordinate frames <mne_source_coords_>`_\n\n.. note:: For this example you will need to install ``matplotlib`` and\n ``nilearn`` on top of your usual ``mne-bids`` installation.\n\n\"\"\"\n# Authors: Stefan Appelhoff <[email protected]>\n# License: BSD (3-clause)\n\n###############################################################################\n# We are importing everything we need for this example:\n\nimport os.path as op\nimport shutil as sh\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom nilearn.plotting import plot_anat\nimport mne\nfrom mne.datasets import sample\nfrom mne.source_space import head_to_mri\n\nfrom mne_bids import (write_raw_bids, make_bids_basename, write_anat,\n get_head_mri_trans)\nfrom mne_bids.utils import print_dir_tree\n\n###############################################################################\n# We will be using the `MNE sample data <mne_sample_data_>`_ and write a basic\n# BIDS dataset. For more information, you can checkout the respective\n# :ref:`example <ex-convert-mne-sample>`.\n\ndata_path = sample.data_path()\nevent_id = {'Auditory/Left': 1, 'Auditory/Right': 2, 'Visual/Left': 3,\n 'Visual/Right': 4, 'Smiley': 5, 'Button': 32}\nraw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')\nevents_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw-eve.fif')\noutput_path = op.abspath(op.join(data_path, '..', 'MNE-sample-data-bids'))\nif op.exists(output_path):\n sh.rmtree(output_path)\nraw = mne.io.read_raw_fif(raw_fname)\nsub = '01'\nses = '01'\ntask = 'audiovisual'\nrun = '01'\nbids_basename = make_bids_basename(subject=sub, session=ses, task=task,\n run=run)\nwrite_raw_bids(raw, bids_basename, output_path, events_data=events_data,\n event_id=event_id, overwrite=True)\n\n# Print the directory tree\nprint_dir_tree(output_path)\n\n###############################################################################\n# Now let's assume that we have also collected some T1 weighted MRI data for\n# our subject. And furthermore, that we have already aligned our coordinate\n# frames (using e.g., the `coregistration GUI`_) and obtained a transformation\n# matrix :code:`trans`.\n\n# Get the path to our MRI scan\nt1_mgh_fname = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')\n\n# Load the transformation matrix and show what it looks like\ntrans_fname = op.join(data_path, 'MEG', 'sample',\n 'sample_audvis_raw-trans.fif')\ntrans = mne.read_trans(trans_fname)\nprint(trans)\n\n###############################################################################\n# We can save the MRI to our existing BIDS directory and at the same time\n# create a JSON sidecar file that contains metadata, we will later use to\n# retrieve our transformation matrix :code:`trans`.\n\n# We use the write_anat function\nwrite_anat(bids_root=output_path, # point to the BIDS dir we wrote earlier\n subject=sub,\n t1w=t1_mgh_fname, # path to the MRI scan\n session=ses,\n raw=raw, # the raw MEG data file connected to the MRI\n trans=trans, # our transformation matrix\n verbose=True # this will print out the sidecar file\n )\n\n# Let's have another look at our BIDS directory\nprint_dir_tree(output_path)\n\n###############################################################################\n# Our BIDS dataset is now ready to be shared. We can easily estimate the\n# transformation matrix using ``MNE-BIDS`` and the BIDS dataset.\n\nbids_fname = bids_basename + '_meg.fif'\n\n# reproduce our trans\nestim_trans = get_head_mri_trans(bids_fname=bids_fname, # name of the MEG file\n bids_root=output_path # root of our BIDS dir\n )\n\n###############################################################################\n# Finally, let's use the T1 weighted MRI image and plot the anatomical\n# landmarks Nasion, LPA, and RPA (=left and right preauricular points) onto\n# the brain image. For that, we can extract the location of Nasion, LPA, and\n# RPA from the MEG file, apply our transformation matrix :code:`trans`, and\n# plot the results.\n\n# Get Landmarks from MEG file, 0, 1, and 2 correspond to LPA, NAS, RPA\n# and the 'r' key will provide us with the xyz coordinates\npos = np.asarray((raw.info['dig'][0]['r'],\n raw.info['dig'][1]['r'],\n raw.info['dig'][2]['r']))\n\n\n# We use a function from MNE-Python to convert MEG coordinates to MRI space\n# for the conversion we use our estimated transformation matrix and the\n# MEG coordinates extracted from the raw file. `subjects` and `subjects_dir`\n# are used internally, to point to the T1-weighted MRI file: `t1_mgh_fname`\nmri_pos = head_to_mri(pos=pos,\n subject='sample',\n mri_head_t=estim_trans,\n subjects_dir=op.join(data_path, 'subjects')\n )\n\n# Plot it\nfig, axs = plt.subplots(3, 1)\nfor point_idx, label in enumerate(('LPA', 'NAS', 'RPA')):\n plot_anat(t1_mgh_fname, axes=axs[point_idx],\n cut_coords=mri_pos[point_idx, :],\n title=label)\n\n\n###############################################################################\n# .. LINKS\n#\n# .. _coregistration GUI:\n# https://martinos.org/mne/stable/auto_tutorials/source-modeling/plot_source_alignment.html#defining-the-headmri-trans-using-the-gui # noqa: E501\n# .. _mne_source_coords:\n# https://www.martinos.org/mne/stable/auto_tutorials/source-modeling/plot_source_alignment.html # noqa: E501\n# .. _mne_sample_data:\n# https://martinos.org/mne/stable/manual/sample_dataset.html\n#\n", "path": "examples/convert_mri_and_trans.py"}]}
| 2,604 | 470 |
gh_patches_debug_4778
|
rasdani/github-patches
|
git_diff
|
pypa__setuptools-2800
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please restore --owner= and --group= options to setuptools' sdist command
Tar archives record the Unix ownership (user and group) of each file they contain. This can cause undesirable variation between source tarballs generated from the same VCS checkout but in different build environments, so `distutils.command.sdist.sdist` has options `--owner=[username]` and `--group=[groupname]` to override the recorded ownership. However, `setuptools.command.sdist.sdist` supersedes the `user_options` array of its parent class and makes most of the options inaccessible, including `--owner` and `--group`.
Please restore these options. The simplest change to achieve this would be to copy the `--owner` and `--group` array entries from `distutils.command.sdist.sdist.user_options` to `setuptools.command.sdist.sdist.user_options`. I have written a monkeypatch in my `setup.py` that does exactly this, so I can confirm that no other changes to setuptools' code are required. However, a more future-proof change would be to have setuptools.command.sdist.sdist copy its parent class's `user_options` array and *remove* only those entries that are definitely not wanted.
Please restore --owner= and --group= options to setuptools' sdist command
Tar archives record the Unix ownership (user and group) of each file they contain. This can cause undesirable variation between source tarballs generated from the same VCS checkout but in different build environments, so `distutils.command.sdist.sdist` has options `--owner=[username]` and `--group=[groupname]` to override the recorded ownership. However, `setuptools.command.sdist.sdist` supersedes the `user_options` array of its parent class and makes most of the options inaccessible, including `--owner` and `--group`.
Please restore these options. The simplest change to achieve this would be to copy the `--owner` and `--group` array entries from `distutils.command.sdist.sdist.user_options` to `setuptools.command.sdist.sdist.user_options`. I have written a monkeypatch in my `setup.py` that does exactly this, so I can confirm that no other changes to setuptools' code are required. However, a more future-proof change would be to have setuptools.command.sdist.sdist copy its parent class's `user_options` array and *remove* only those entries that are definitely not wanted.
</issue>
<code>
[start of setuptools/command/sdist.py]
1 from distutils import log
2 import distutils.command.sdist as orig
3 import os
4 import sys
5 import io
6 import contextlib
7
8 from .py36compat import sdist_add_defaults
9
10 import pkg_resources
11
12 _default_revctrl = list
13
14
15 def walk_revctrl(dirname=''):
16 """Find all files under revision control"""
17 for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
18 for item in ep.load()(dirname):
19 yield item
20
21
22 class sdist(sdist_add_defaults, orig.sdist):
23 """Smart sdist that finds anything supported by revision control"""
24
25 user_options = [
26 ('formats=', None,
27 "formats for source distribution (comma-separated list)"),
28 ('keep-temp', 'k',
29 "keep the distribution tree around after creating " +
30 "archive file(s)"),
31 ('dist-dir=', 'd',
32 "directory to put the source distribution archive(s) in "
33 "[default: dist]"),
34 ]
35
36 negative_opt = {}
37
38 README_EXTENSIONS = ['', '.rst', '.txt', '.md']
39 READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
40
41 def run(self):
42 self.run_command('egg_info')
43 ei_cmd = self.get_finalized_command('egg_info')
44 self.filelist = ei_cmd.filelist
45 self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
46 self.check_readme()
47
48 # Run sub commands
49 for cmd_name in self.get_sub_commands():
50 self.run_command(cmd_name)
51
52 self.make_distribution()
53
54 dist_files = getattr(self.distribution, 'dist_files', [])
55 for file in self.archive_files:
56 data = ('sdist', '', file)
57 if data not in dist_files:
58 dist_files.append(data)
59
60 def initialize_options(self):
61 orig.sdist.initialize_options(self)
62
63 self._default_to_gztar()
64
65 def _default_to_gztar(self):
66 # only needed on Python prior to 3.6.
67 if sys.version_info >= (3, 6, 0, 'beta', 1):
68 return
69 self.formats = ['gztar']
70
71 def make_distribution(self):
72 """
73 Workaround for #516
74 """
75 with self._remove_os_link():
76 orig.sdist.make_distribution(self)
77
78 @staticmethod
79 @contextlib.contextmanager
80 def _remove_os_link():
81 """
82 In a context, remove and restore os.link if it exists
83 """
84
85 class NoValue:
86 pass
87
88 orig_val = getattr(os, 'link', NoValue)
89 try:
90 del os.link
91 except Exception:
92 pass
93 try:
94 yield
95 finally:
96 if orig_val is not NoValue:
97 setattr(os, 'link', orig_val)
98
99 def _add_defaults_optional(self):
100 super()._add_defaults_optional()
101 if os.path.isfile('pyproject.toml'):
102 self.filelist.append('pyproject.toml')
103
104 def _add_defaults_python(self):
105 """getting python files"""
106 if self.distribution.has_pure_modules():
107 build_py = self.get_finalized_command('build_py')
108 self.filelist.extend(build_py.get_source_files())
109 self._add_data_files(self._safe_data_files(build_py))
110
111 def _safe_data_files(self, build_py):
112 """
113 Extracting data_files from build_py is known to cause
114 infinite recursion errors when `include_package_data`
115 is enabled, so suppress it in that case.
116 """
117 if self.distribution.include_package_data:
118 return ()
119 return build_py.data_files
120
121 def _add_data_files(self, data_files):
122 """
123 Add data files as found in build_py.data_files.
124 """
125 self.filelist.extend(
126 os.path.join(src_dir, name)
127 for _, src_dir, _, filenames in data_files
128 for name in filenames
129 )
130
131 def _add_defaults_data_files(self):
132 try:
133 super()._add_defaults_data_files()
134 except TypeError:
135 log.warn("data_files contains unexpected objects")
136
137 def check_readme(self):
138 for f in self.READMES:
139 if os.path.exists(f):
140 return
141 else:
142 self.warn(
143 "standard file not found: should have one of " +
144 ', '.join(self.READMES)
145 )
146
147 def make_release_tree(self, base_dir, files):
148 orig.sdist.make_release_tree(self, base_dir, files)
149
150 # Save any egg_info command line options used to create this sdist
151 dest = os.path.join(base_dir, 'setup.cfg')
152 if hasattr(os, 'link') and os.path.exists(dest):
153 # unlink and re-copy, since it might be hard-linked, and
154 # we don't want to change the source version
155 os.unlink(dest)
156 self.copy_file('setup.cfg', dest)
157
158 self.get_finalized_command('egg_info').save_version_info(dest)
159
160 def _manifest_is_not_generated(self):
161 # check for special comment used in 2.7.1 and higher
162 if not os.path.isfile(self.manifest):
163 return False
164
165 with io.open(self.manifest, 'rb') as fp:
166 first_line = fp.readline()
167 return (first_line !=
168 '# file GENERATED by distutils, do NOT edit\n'.encode())
169
170 def read_manifest(self):
171 """Read the manifest file (named by 'self.manifest') and use it to
172 fill in 'self.filelist', the list of files to include in the source
173 distribution.
174 """
175 log.info("reading manifest file '%s'", self.manifest)
176 manifest = open(self.manifest, 'rb')
177 for line in manifest:
178 # The manifest must contain UTF-8. See #303.
179 try:
180 line = line.decode('UTF-8')
181 except UnicodeDecodeError:
182 log.warn("%r not UTF-8 decodable -- skipping" % line)
183 continue
184 # ignore comments and blank lines
185 line = line.strip()
186 if line.startswith('#') or not line:
187 continue
188 self.filelist.append(line)
189 manifest.close()
190
[end of setuptools/command/sdist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py
--- a/setuptools/command/sdist.py
+++ b/setuptools/command/sdist.py
@@ -31,6 +31,10 @@
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file [default: current group]"),
]
negative_opt = {}
|
{"golden_diff": "diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py\n--- a/setuptools/command/sdist.py\n+++ b/setuptools/command/sdist.py\n@@ -31,6 +31,10 @@\n ('dist-dir=', 'd',\n \"directory to put the source distribution archive(s) in \"\n \"[default: dist]\"),\n+ ('owner=', 'u',\n+ \"Owner name used when creating a tar file [default: current user]\"),\n+ ('group=', 'g',\n+ \"Group name used when creating a tar file [default: current group]\"),\n ]\n \n negative_opt = {}\n", "issue": "Please restore --owner= and --group= options to setuptools' sdist command\nTar archives record the Unix ownership (user and group) of each file they contain. This can cause undesirable variation between source tarballs generated from the same VCS checkout but in different build environments, so `distutils.command.sdist.sdist` has options `--owner=[username]` and `--group=[groupname]` to override the recorded ownership. However, `setuptools.command.sdist.sdist` supersedes the `user_options` array of its parent class and makes most of the options inaccessible, including `--owner` and `--group`.\r\n\r\nPlease restore these options. The simplest change to achieve this would be to copy the `--owner` and `--group` array entries from `distutils.command.sdist.sdist.user_options` to `setuptools.command.sdist.sdist.user_options`. I have written a monkeypatch in my `setup.py` that does exactly this, so I can confirm that no other changes to setuptools' code are required. However, a more future-proof change would be to have setuptools.command.sdist.sdist copy its parent class's `user_options` array and *remove* only those entries that are definitely not wanted.\nPlease restore --owner= and --group= options to setuptools' sdist command\nTar archives record the Unix ownership (user and group) of each file they contain. This can cause undesirable variation between source tarballs generated from the same VCS checkout but in different build environments, so `distutils.command.sdist.sdist` has options `--owner=[username]` and `--group=[groupname]` to override the recorded ownership. However, `setuptools.command.sdist.sdist` supersedes the `user_options` array of its parent class and makes most of the options inaccessible, including `--owner` and `--group`.\r\n\r\nPlease restore these options. The simplest change to achieve this would be to copy the `--owner` and `--group` array entries from `distutils.command.sdist.sdist.user_options` to `setuptools.command.sdist.sdist.user_options`. I have written a monkeypatch in my `setup.py` that does exactly this, so I can confirm that no other changes to setuptools' code are required. However, a more future-proof change would be to have setuptools.command.sdist.sdist copy its parent class's `user_options` array and *remove* only those entries that are definitely not wanted.\n", "before_files": [{"content": "from distutils import log\nimport distutils.command.sdist as orig\nimport os\nimport sys\nimport io\nimport contextlib\n\nfrom .py36compat import sdist_add_defaults\n\nimport pkg_resources\n\n_default_revctrl = list\n\n\ndef walk_revctrl(dirname=''):\n \"\"\"Find all files under revision control\"\"\"\n for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):\n for item in ep.load()(dirname):\n yield item\n\n\nclass sdist(sdist_add_defaults, orig.sdist):\n \"\"\"Smart sdist that finds anything supported by revision control\"\"\"\n\n user_options = [\n ('formats=', None,\n \"formats for source distribution (comma-separated list)\"),\n ('keep-temp', 'k',\n \"keep the distribution tree around after creating \" +\n \"archive file(s)\"),\n ('dist-dir=', 'd',\n \"directory to put the source distribution archive(s) in \"\n \"[default: dist]\"),\n ]\n\n negative_opt = {}\n\n README_EXTENSIONS = ['', '.rst', '.txt', '.md']\n READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)\n\n def run(self):\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n self.filelist = ei_cmd.filelist\n self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))\n self.check_readme()\n\n # Run sub commands\n for cmd_name in self.get_sub_commands():\n self.run_command(cmd_name)\n\n self.make_distribution()\n\n dist_files = getattr(self.distribution, 'dist_files', [])\n for file in self.archive_files:\n data = ('sdist', '', file)\n if data not in dist_files:\n dist_files.append(data)\n\n def initialize_options(self):\n orig.sdist.initialize_options(self)\n\n self._default_to_gztar()\n\n def _default_to_gztar(self):\n # only needed on Python prior to 3.6.\n if sys.version_info >= (3, 6, 0, 'beta', 1):\n return\n self.formats = ['gztar']\n\n def make_distribution(self):\n \"\"\"\n Workaround for #516\n \"\"\"\n with self._remove_os_link():\n orig.sdist.make_distribution(self)\n\n @staticmethod\n @contextlib.contextmanager\n def _remove_os_link():\n \"\"\"\n In a context, remove and restore os.link if it exists\n \"\"\"\n\n class NoValue:\n pass\n\n orig_val = getattr(os, 'link', NoValue)\n try:\n del os.link\n except Exception:\n pass\n try:\n yield\n finally:\n if orig_val is not NoValue:\n setattr(os, 'link', orig_val)\n\n def _add_defaults_optional(self):\n super()._add_defaults_optional()\n if os.path.isfile('pyproject.toml'):\n self.filelist.append('pyproject.toml')\n\n def _add_defaults_python(self):\n \"\"\"getting python files\"\"\"\n if self.distribution.has_pure_modules():\n build_py = self.get_finalized_command('build_py')\n self.filelist.extend(build_py.get_source_files())\n self._add_data_files(self._safe_data_files(build_py))\n\n def _safe_data_files(self, build_py):\n \"\"\"\n Extracting data_files from build_py is known to cause\n infinite recursion errors when `include_package_data`\n is enabled, so suppress it in that case.\n \"\"\"\n if self.distribution.include_package_data:\n return ()\n return build_py.data_files\n\n def _add_data_files(self, data_files):\n \"\"\"\n Add data files as found in build_py.data_files.\n \"\"\"\n self.filelist.extend(\n os.path.join(src_dir, name)\n for _, src_dir, _, filenames in data_files\n for name in filenames\n )\n\n def _add_defaults_data_files(self):\n try:\n super()._add_defaults_data_files()\n except TypeError:\n log.warn(\"data_files contains unexpected objects\")\n\n def check_readme(self):\n for f in self.READMES:\n if os.path.exists(f):\n return\n else:\n self.warn(\n \"standard file not found: should have one of \" +\n ', '.join(self.READMES)\n )\n\n def make_release_tree(self, base_dir, files):\n orig.sdist.make_release_tree(self, base_dir, files)\n\n # Save any egg_info command line options used to create this sdist\n dest = os.path.join(base_dir, 'setup.cfg')\n if hasattr(os, 'link') and os.path.exists(dest):\n # unlink and re-copy, since it might be hard-linked, and\n # we don't want to change the source version\n os.unlink(dest)\n self.copy_file('setup.cfg', dest)\n\n self.get_finalized_command('egg_info').save_version_info(dest)\n\n def _manifest_is_not_generated(self):\n # check for special comment used in 2.7.1 and higher\n if not os.path.isfile(self.manifest):\n return False\n\n with io.open(self.manifest, 'rb') as fp:\n first_line = fp.readline()\n return (first_line !=\n '# file GENERATED by distutils, do NOT edit\\n'.encode())\n\n def read_manifest(self):\n \"\"\"Read the manifest file (named by 'self.manifest') and use it to\n fill in 'self.filelist', the list of files to include in the source\n distribution.\n \"\"\"\n log.info(\"reading manifest file '%s'\", self.manifest)\n manifest = open(self.manifest, 'rb')\n for line in manifest:\n # The manifest must contain UTF-8. See #303.\n try:\n line = line.decode('UTF-8')\n except UnicodeDecodeError:\n log.warn(\"%r not UTF-8 decodable -- skipping\" % line)\n continue\n # ignore comments and blank lines\n line = line.strip()\n if line.startswith('#') or not line:\n continue\n self.filelist.append(line)\n manifest.close()\n", "path": "setuptools/command/sdist.py"}]}
| 2,839 | 137 |
gh_patches_debug_22833
|
rasdani/github-patches
|
git_diff
|
biolab__orange3-text-383
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Twitter: truncated text
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.5.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.17.dev
##### Expected behavior
Twitter retrieves the entire text of tweets.
##### Actual behavior
Twitter truncates text.
##### Steps to reproduce the behavior
Retrieve any sample of tweets.
##### Additional info (worksheets, data, screenshots, ...)
<img width="637" alt="screen shot 2018-09-25 at 14 30 13" src="https://user-images.githubusercontent.com/12524972/46014492-c58e4a00-c0cf-11e8-94f7-f8a12e36cf57.png">
</issue>
<code>
[start of orangecontrib/text/twitter.py]
1 from collections import OrderedDict, Iterable
2
3 import tweepy
4
5 from Orange import data
6 from orangecontrib.text import Corpus
7 from orangecontrib.text.language_codes import code2lang
8
9 __all__ = ['Credentials', 'TwitterAPI']
10
11
12 def coordinates_geoJSON(json):
13 if json:
14 return json.get('coordinates', [None, None])
15 return [None, None]
16
17
18 class Credentials:
19 """ Twitter API credentials. """
20
21 def __init__(self, consumer_key, consumer_secret):
22 self.consumer_key = consumer_key
23 self.consumer_secret = consumer_secret
24 self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
25 self._valid = None
26
27 @property
28 def valid(self):
29 if self._valid is None:
30 self.check()
31 return self._valid
32
33 def check(self):
34 try:
35 self.auth.get_authorization_url()
36 self._valid = True
37 except tweepy.TweepError:
38 self._valid = False
39 return self._valid
40
41 def __getstate__(self):
42 odict = self.__dict__.copy()
43 odict['_valid'] = None
44 odict.pop('auth')
45 return odict
46
47 def __setstate__(self, odict):
48 self.__dict__.update(odict)
49 self.auth = tweepy.OAuthHandler(self.consumer_key,
50 self.consumer_secret)
51
52 def __eq__(self, other):
53 return isinstance(other, Credentials) \
54 and self.consumer_key == other.consumer_key \
55 and self.consumer_secret == other.consumer_secret
56
57
58 class TwitterAPI:
59 """ Fetch tweets from the Tweeter API.
60
61 Notes:
62 Results across multiple searches are aggregated. To remove tweets form
63 previous searches and only return results from the last search either
64 call `reset` method before searching or provide `collecting=False`
65 argument to search method.
66 """
67 attributes = []
68 class_vars = [
69 (data.DiscreteVariable('Author'), lambda doc: '@' + doc.author.screen_name),
70 ]
71
72 tv = data.TimeVariable('Date')
73 metas = [
74 (data.StringVariable('Content'), lambda doc: doc.text),
75 (tv, lambda doc: TwitterAPI.tv.parse(doc.created_at.isoformat())),
76 (data.DiscreteVariable('Language'), lambda doc: doc.lang),
77 (data.DiscreteVariable('Location'), lambda doc: getattr(doc.place, 'country_code', None)),
78 (data.ContinuousVariable('Number of Likes', number_of_decimals=0),
79 lambda doc: doc.favorite_count),
80 (data.ContinuousVariable('Number of Retweets', number_of_decimals=0),
81 lambda doc: doc.retweet_count),
82 (data.DiscreteVariable('In Reply To'),
83 lambda doc: '@' + doc.in_reply_to_screen_name if doc.in_reply_to_screen_name else ''),
84 (data.DiscreteVariable('Author Name'), lambda doc: doc.author.name),
85 (data.StringVariable('Author Description'), lambda doc: doc.author.description),
86 (data.ContinuousVariable('Author Statuses Count', number_of_decimals=0),
87 lambda doc: doc.author.statuses_count),
88 (data.ContinuousVariable('Author Favourites Count', number_of_decimals=0),
89 lambda doc: doc.author.favourites_count),
90 (data.ContinuousVariable('Author Friends Count', number_of_decimals=0),
91 lambda doc: doc.author.friends_count),
92 (data.ContinuousVariable('Author Followers Count', number_of_decimals=0),
93 lambda doc: doc.author.followers_count),
94 (data.ContinuousVariable('Author Listed Count', number_of_decimals=0),
95 lambda doc: doc.author.listed_count),
96 (data.DiscreteVariable('Author Verified'), lambda doc: str(doc.author.verified)),
97 (data.ContinuousVariable('Longitude'),
98 lambda doc: coordinates_geoJSON(doc.coordinates)[0]),
99 (data.ContinuousVariable('Latitude'),
100 lambda doc: coordinates_geoJSON(doc.coordinates)[1]),
101 ]
102
103 text_features = [metas[0][0]] # Content
104 string_attributes = [m for m, _ in metas
105 if isinstance(m, data.StringVariable)]
106
107 def __init__(self, credentials,
108 on_progress=None, should_break=None,
109 on_error=None, on_rate_limit=None):
110 self.key = credentials
111 self.api = tweepy.API(credentials.auth)
112 self.container = OrderedDict()
113 self.search_history = []
114
115 # Callbacks:
116 self.on_error = on_error
117 self.on_rate_limit = on_rate_limit
118 self.on_progress = on_progress or (lambda *args: args)
119 self.should_break = should_break or (lambda *args: False)
120
121 @property
122 def tweets(self):
123 return self.container.values()
124
125 def search_content(self, content, *, max_tweets=0,
126 lang=None, allow_retweets=True,
127 collecting=False):
128 """ Search by content.
129
130 Args:
131 content (list of str): A list of key words to search for.
132 max_tweets (int): If greater than zero limits the number of
133 downloaded tweets.
134 lang (str): A language's code (either ISO 639-1 or ISO 639-3
135 formats).
136 allow_retweets(bool): Whether to download retweets.
137 collecting (bool): Whether to collect results across multiple
138 search calls.
139
140 Returns:
141 Corpus
142 """
143 if not collecting:
144 self.reset()
145
146 if max_tweets == 0:
147 max_tweets = float('Inf')
148
149 def build_query():
150 nonlocal content
151 if not content:
152 q = 'from: '
153 else:
154 if not isinstance(content, list):
155 content = [content]
156 q = ' OR '.join(['"{}"'.format(q) for q in content])
157 if not allow_retweets:
158 q += ' -filter:retweets'
159 return q
160
161 query = build_query()
162 cursor = tweepy.Cursor(self.api.search, q=query, lang=lang)
163 corpus, count = self.fetch(cursor, max_tweets)
164 self.append_history('Content', content, lang if lang else 'Any',
165 str(allow_retweets), count)
166 return corpus
167
168 def search_authors(self, authors, *, max_tweets=0, collecting=False):
169 """ Search by authors.
170
171 Args:
172 authors (list of str): A list of authors to search for.
173 max_tweets (int): If greater than zero limits the number of
174 downloaded tweets.
175 collecting (bool): Whether to collect results across multiple
176 search calls.
177
178 Returns:
179 Corpus
180 """
181 if not collecting:
182 self.reset()
183
184 if max_tweets == 0: # set to max allowed for progress
185 max_tweets = 3200
186
187 if not isinstance(authors, list):
188 authors = [authors]
189
190 cursors = [tweepy.Cursor(self.api.user_timeline, screen_name=a)
191 for a in authors]
192 corpus, count = self.fetch(cursors, max_tweets)
193 self.append_history('Author', authors, None, None, count)
194 return corpus
195
196 def fetch(self, cursors, max_tweets):
197 if not isinstance(cursors, list):
198 cursors = [cursors]
199
200 count = 0
201 try:
202 for i, cursor in enumerate(cursors):
203 for j, tweet in enumerate(cursor.items(max_tweets), start=1):
204 if self.should_break():
205 break
206 if tweet.id not in self.container:
207 count += 1
208 self.container[tweet.id] = tweet
209 if j % 20 == 0:
210 self.on_progress(len(self.container),
211 (i*max_tweets + j)/
212 (len(cursors)*max_tweets))
213 if self.should_break():
214 break
215 except tweepy.TweepError as e:
216 if e.response.status_code == 429 and self.on_rate_limit:
217 self.on_rate_limit()
218 elif self.on_error:
219 self.on_error(str(e))
220 return None, 0
221 return self.create_corpus(), count
222
223 def create_corpus(self):
224 return Corpus.from_documents(self.tweets, 'Twitter', self.attributes,
225 self.class_vars, self.metas,
226 title_indices=[-1])
227
228 def reset(self):
229 """ Removes all downloaded tweets. """
230 self.search_history = []
231 self.container = OrderedDict()
232
233 def append_history(self, mode, query, lang, allow_retweets, n_tweets):
234 query = ', '.join(query) if isinstance(query, Iterable) else query
235 if lang in code2lang.keys():
236 lang = code2lang[lang]
237 self.search_history.append((
238 ('Query', query),
239 ('Search by', mode),
240 ('Language', lang),
241 ('Allow retweets', allow_retweets),
242 ('Tweets count', n_tweets),
243 ))
244
245 def report(self):
246 return self.search_history
247
[end of orangecontrib/text/twitter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/orangecontrib/text/twitter.py b/orangecontrib/text/twitter.py
--- a/orangecontrib/text/twitter.py
+++ b/orangecontrib/text/twitter.py
@@ -71,7 +71,7 @@
tv = data.TimeVariable('Date')
metas = [
- (data.StringVariable('Content'), lambda doc: doc.text),
+ (data.StringVariable('Content'), lambda doc: doc.full_text),
(tv, lambda doc: TwitterAPI.tv.parse(doc.created_at.isoformat())),
(data.DiscreteVariable('Language'), lambda doc: doc.lang),
(data.DiscreteVariable('Location'), lambda doc: getattr(doc.place, 'country_code', None)),
@@ -159,7 +159,8 @@
return q
query = build_query()
- cursor = tweepy.Cursor(self.api.search, q=query, lang=lang)
+ cursor = tweepy.Cursor(self.api.search, q=query, lang=lang,
+ tweet_mode='extended')
corpus, count = self.fetch(cursor, max_tweets)
self.append_history('Content', content, lang if lang else 'Any',
str(allow_retweets), count)
|
{"golden_diff": "diff --git a/orangecontrib/text/twitter.py b/orangecontrib/text/twitter.py\n--- a/orangecontrib/text/twitter.py\n+++ b/orangecontrib/text/twitter.py\n@@ -71,7 +71,7 @@\n \n tv = data.TimeVariable('Date')\n metas = [\n- (data.StringVariable('Content'), lambda doc: doc.text),\n+ (data.StringVariable('Content'), lambda doc: doc.full_text),\n (tv, lambda doc: TwitterAPI.tv.parse(doc.created_at.isoformat())),\n (data.DiscreteVariable('Language'), lambda doc: doc.lang),\n (data.DiscreteVariable('Location'), lambda doc: getattr(doc.place, 'country_code', None)),\n@@ -159,7 +159,8 @@\n return q\n \n query = build_query()\n- cursor = tweepy.Cursor(self.api.search, q=query, lang=lang)\n+ cursor = tweepy.Cursor(self.api.search, q=query, lang=lang,\n+ tweet_mode='extended')\n corpus, count = self.fetch(cursor, max_tweets)\n self.append_history('Content', content, lang if lang else 'Any',\n str(allow_retweets), count)\n", "issue": "Twitter: truncated text\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.5.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.17.dev\r\n\r\n##### Expected behavior\r\nTwitter retrieves the entire text of tweets.\r\n\r\n\r\n##### Actual behavior\r\nTwitter truncates text.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nRetrieve any sample of tweets.\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n<img width=\"637\" alt=\"screen shot 2018-09-25 at 14 30 13\" src=\"https://user-images.githubusercontent.com/12524972/46014492-c58e4a00-c0cf-11e8-94f7-f8a12e36cf57.png\">\r\n\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict, Iterable\n\nimport tweepy\n\nfrom Orange import data\nfrom orangecontrib.text import Corpus\nfrom orangecontrib.text.language_codes import code2lang\n\n__all__ = ['Credentials', 'TwitterAPI']\n\n\ndef coordinates_geoJSON(json):\n if json:\n return json.get('coordinates', [None, None])\n return [None, None]\n\n\nclass Credentials:\n \"\"\" Twitter API credentials. \"\"\"\n\n def __init__(self, consumer_key, consumer_secret):\n self.consumer_key = consumer_key\n self.consumer_secret = consumer_secret\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n self._valid = None\n\n @property\n def valid(self):\n if self._valid is None:\n self.check()\n return self._valid\n\n def check(self):\n try:\n self.auth.get_authorization_url()\n self._valid = True\n except tweepy.TweepError:\n self._valid = False\n return self._valid\n\n def __getstate__(self):\n odict = self.__dict__.copy()\n odict['_valid'] = None\n odict.pop('auth')\n return odict\n\n def __setstate__(self, odict):\n self.__dict__.update(odict)\n self.auth = tweepy.OAuthHandler(self.consumer_key,\n self.consumer_secret)\n\n def __eq__(self, other):\n return isinstance(other, Credentials) \\\n and self.consumer_key == other.consumer_key \\\n and self.consumer_secret == other.consumer_secret\n\n\nclass TwitterAPI:\n \"\"\" Fetch tweets from the Tweeter API.\n\n Notes:\n Results across multiple searches are aggregated. To remove tweets form\n previous searches and only return results from the last search either\n call `reset` method before searching or provide `collecting=False`\n argument to search method.\n \"\"\"\n attributes = []\n class_vars = [\n (data.DiscreteVariable('Author'), lambda doc: '@' + doc.author.screen_name),\n ]\n\n tv = data.TimeVariable('Date')\n metas = [\n (data.StringVariable('Content'), lambda doc: doc.text),\n (tv, lambda doc: TwitterAPI.tv.parse(doc.created_at.isoformat())),\n (data.DiscreteVariable('Language'), lambda doc: doc.lang),\n (data.DiscreteVariable('Location'), lambda doc: getattr(doc.place, 'country_code', None)),\n (data.ContinuousVariable('Number of Likes', number_of_decimals=0),\n lambda doc: doc.favorite_count),\n (data.ContinuousVariable('Number of Retweets', number_of_decimals=0),\n lambda doc: doc.retweet_count),\n (data.DiscreteVariable('In Reply To'),\n lambda doc: '@' + doc.in_reply_to_screen_name if doc.in_reply_to_screen_name else ''),\n (data.DiscreteVariable('Author Name'), lambda doc: doc.author.name),\n (data.StringVariable('Author Description'), lambda doc: doc.author.description),\n (data.ContinuousVariable('Author Statuses Count', number_of_decimals=0),\n lambda doc: doc.author.statuses_count),\n (data.ContinuousVariable('Author Favourites Count', number_of_decimals=0),\n lambda doc: doc.author.favourites_count),\n (data.ContinuousVariable('Author Friends Count', number_of_decimals=0),\n lambda doc: doc.author.friends_count),\n (data.ContinuousVariable('Author Followers Count', number_of_decimals=0),\n lambda doc: doc.author.followers_count),\n (data.ContinuousVariable('Author Listed Count', number_of_decimals=0),\n lambda doc: doc.author.listed_count),\n (data.DiscreteVariable('Author Verified'), lambda doc: str(doc.author.verified)),\n (data.ContinuousVariable('Longitude'),\n lambda doc: coordinates_geoJSON(doc.coordinates)[0]),\n (data.ContinuousVariable('Latitude'),\n lambda doc: coordinates_geoJSON(doc.coordinates)[1]),\n ]\n\n text_features = [metas[0][0]] # Content\n string_attributes = [m for m, _ in metas\n if isinstance(m, data.StringVariable)]\n\n def __init__(self, credentials,\n on_progress=None, should_break=None,\n on_error=None, on_rate_limit=None):\n self.key = credentials\n self.api = tweepy.API(credentials.auth)\n self.container = OrderedDict()\n self.search_history = []\n\n # Callbacks:\n self.on_error = on_error\n self.on_rate_limit = on_rate_limit\n self.on_progress = on_progress or (lambda *args: args)\n self.should_break = should_break or (lambda *args: False)\n\n @property\n def tweets(self):\n return self.container.values()\n\n def search_content(self, content, *, max_tweets=0,\n lang=None, allow_retweets=True,\n collecting=False):\n \"\"\" Search by content.\n\n Args:\n content (list of str): A list of key words to search for.\n max_tweets (int): If greater than zero limits the number of\n downloaded tweets.\n lang (str): A language's code (either ISO 639-1 or ISO 639-3\n formats).\n allow_retweets(bool): Whether to download retweets.\n collecting (bool): Whether to collect results across multiple\n search calls.\n\n Returns:\n Corpus\n \"\"\"\n if not collecting:\n self.reset()\n\n if max_tweets == 0:\n max_tweets = float('Inf')\n\n def build_query():\n nonlocal content\n if not content:\n q = 'from: '\n else:\n if not isinstance(content, list):\n content = [content]\n q = ' OR '.join(['\"{}\"'.format(q) for q in content])\n if not allow_retweets:\n q += ' -filter:retweets'\n return q\n\n query = build_query()\n cursor = tweepy.Cursor(self.api.search, q=query, lang=lang)\n corpus, count = self.fetch(cursor, max_tweets)\n self.append_history('Content', content, lang if lang else 'Any',\n str(allow_retweets), count)\n return corpus\n\n def search_authors(self, authors, *, max_tweets=0, collecting=False):\n \"\"\" Search by authors.\n\n Args:\n authors (list of str): A list of authors to search for.\n max_tweets (int): If greater than zero limits the number of\n downloaded tweets.\n collecting (bool): Whether to collect results across multiple\n search calls.\n\n Returns:\n Corpus\n \"\"\"\n if not collecting:\n self.reset()\n\n if max_tweets == 0: # set to max allowed for progress\n max_tweets = 3200\n\n if not isinstance(authors, list):\n authors = [authors]\n\n cursors = [tweepy.Cursor(self.api.user_timeline, screen_name=a)\n for a in authors]\n corpus, count = self.fetch(cursors, max_tweets)\n self.append_history('Author', authors, None, None, count)\n return corpus\n\n def fetch(self, cursors, max_tweets):\n if not isinstance(cursors, list):\n cursors = [cursors]\n\n count = 0\n try:\n for i, cursor in enumerate(cursors):\n for j, tweet in enumerate(cursor.items(max_tweets), start=1):\n if self.should_break():\n break\n if tweet.id not in self.container:\n count += 1\n self.container[tweet.id] = tweet\n if j % 20 == 0:\n self.on_progress(len(self.container),\n (i*max_tweets + j)/\n (len(cursors)*max_tweets))\n if self.should_break():\n break\n except tweepy.TweepError as e:\n if e.response.status_code == 429 and self.on_rate_limit:\n self.on_rate_limit()\n elif self.on_error:\n self.on_error(str(e))\n return None, 0\n return self.create_corpus(), count\n\n def create_corpus(self):\n return Corpus.from_documents(self.tweets, 'Twitter', self.attributes,\n self.class_vars, self.metas,\n title_indices=[-1])\n\n def reset(self):\n \"\"\" Removes all downloaded tweets. \"\"\"\n self.search_history = []\n self.container = OrderedDict()\n\n def append_history(self, mode, query, lang, allow_retweets, n_tweets):\n query = ', '.join(query) if isinstance(query, Iterable) else query\n if lang in code2lang.keys():\n lang = code2lang[lang]\n self.search_history.append((\n ('Query', query),\n ('Search by', mode),\n ('Language', lang),\n ('Allow retweets', allow_retweets),\n ('Tweets count', n_tweets),\n ))\n\n def report(self):\n return self.search_history\n", "path": "orangecontrib/text/twitter.py"}]}
| 3,315 | 252 |
gh_patches_debug_14471
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1333
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[FEAT] Support sklearn 0.23.2
Currently we only support sklearn versions >= 1.0. We need to try to support the 0.23.2 as it is widely used by many environments and frameworks.
</issue>
<code>
[start of deepchecks/core/check_utils/whole_dataset_drift_utils.py]
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing common WholeDatasetDriftCheck (domain classifier drift) utils."""
12
13 from typing import List
14 import warnings
15
16 import numpy as np
17 import pandas as pd
18 from plotly.subplots import make_subplots
19
20 from sklearn.pipeline import Pipeline
21 from sklearn.compose import ColumnTransformer
22
23 with warnings.catch_warnings():
24 warnings.simplefilter('ignore')
25 from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import
26
27 from sklearn.ensemble import HistGradientBoostingClassifier
28 from sklearn.metrics import roc_auc_score
29 from sklearn.preprocessing import OrdinalEncoder
30 from sklearn.model_selection import train_test_split
31 import plotly.graph_objects as go
32
33 from deepchecks.tabular import Dataset
34 from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces
35 from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder
36 from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
37 from deepchecks.utils.function import run_available_kwargs
38 from deepchecks.utils.strings import format_percent
39 from deepchecks.utils.typing import Hashable
40
41
42 def run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,
43 numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,
44 random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,
45 max_num_categories_for_display: int, show_categories_by: str,
46 min_meaningful_drift_score: float):
47 """Calculate whole dataset drift."""
48 domain_classifier = generate_model(numerical_features, cat_features, random_state)
49
50 train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)
51 test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)
52
53 # create new dataset, with label denoting whether sample belongs to test dataset
54 domain_class_df = pd.concat([train_sample_df, test_sample_df])
55 domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))
56
57 x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,
58 stratify=domain_class_labels,
59 random_state=random_state,
60 test_size=test_size)
61
62 domain_classifier = domain_classifier.fit(x_train, y_train)
63
64 y_test.name = 'belongs_to_test'
65 domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),
66 cat_features=cat_features, label='belongs_to_test')
67
68 # calculate feature importance of domain_classifier, containing the information which features separate
69 # the dataset best.
70 fi, importance_type = calculate_feature_importance_or_none(
71 domain_classifier,
72 domain_test_dataset,
73 force_permutation=True,
74 permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}
75 )
76
77 fi = fi.sort_values(ascending=False) if fi is not None else None
78
79 domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])
80 drift_score = auc_to_drift_score(domain_classifier_auc)
81
82 values_dict = {
83 'domain_classifier_auc': domain_classifier_auc,
84 'domain_classifier_drift_score': drift_score,
85 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},
86 }
87
88 feature_importance_note = f"""
89 <span>
90 The percents of explained dataset difference are the importance values for the feature calculated
91 using `{importance_type}`.
92 </span><br><br>
93 """
94
95 if fi is not None and drift_score > min_meaningful_drift_score:
96 top_fi = fi.head(n_top_columns)
97 top_fi = top_fi.loc[top_fi > min_feature_importance]
98 else:
99 top_fi = None
100
101 if top_fi is not None and len(top_fi):
102 score = values_dict['domain_classifier_drift_score']
103
104 displays = [feature_importance_note, build_drift_plot(score),
105 '<h3>Main features contributing to drift</h3>',
106 N_TOP_MESSAGE % n_top_columns]
107 displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,
108 max_num_categories_for_display, show_categories_by)
109 for feature in top_fi.index]
110 else:
111 displays = None
112
113 return values_dict, displays
114
115
116 def generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],
117 random_state: int = 42) -> Pipeline:
118 """Generate the unfitted Domain Classifier model."""
119 categorical_transformer = Pipeline(
120 steps=[('rare', RareCategoryEncoder(254)),
121 ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
122 unknown_value=np.nan,
123 dtype=np.float64))]
124 )
125
126 preprocessor = ColumnTransformer(
127 transformers=[
128 ('num', 'passthrough', numerical_columns),
129 ('cat', categorical_transformer, categorical_columns),
130 ]
131 )
132
133 return Pipeline(
134 steps=[('preprocessing', preprocessor),
135 ('model', HistGradientBoostingClassifier(
136 max_depth=2, max_iter=10, random_state=random_state,
137 categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)
138 ))])
139
140
141 def auc_to_drift_score(auc: float) -> float:
142 """Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
143
144 Parameters
145 ----------
146 auc : float
147 auc of the Domain Classifier
148 """
149 return max(2 * auc - 1, 0)
150
151
152 def build_drift_plot(score):
153 """Build traffic light drift plot."""
154 bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
155 x_axis['title'] = 'Drift score'
156 drift_plot = go.Figure(layout=dict(
157 title='Drift Score - Whole Dataset Total',
158 xaxis=x_axis,
159 yaxis=y_axis,
160 width=700,
161 height=200
162
163 ))
164
165 drift_plot.add_traces(bar_traces)
166 return drift_plot
167
168
169 def display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,
170 max_num_categories, show_categories_by):
171 """Display a distribution comparison plot for the given columns."""
172 column_name = train_column.name
173
174 title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'
175 dist_traces, xaxis_layout, yaxis_layout = \
176 feature_distribution_traces(train_column.dropna(),
177 test_column.dropna(),
178 column_name,
179 is_categorical=column_name in cat_features,
180 max_num_categories=max_num_categories,
181 show_categories_by=show_categories_by)
182
183 all_categories = list(set(train_column).union(set(test_column)))
184 add_footnote = column_name in cat_features and len(all_categories) > max_num_categories
185
186 if add_footnote:
187 fig = make_subplots(rows=2, cols=1, vertical_spacing=0.6, shared_yaxes=False, shared_xaxes=False,
188 row_heights=[0.8, 0.2],
189 subplot_titles=[title])
190
191 fig.add_traces(dist_traces)
192
193 param_to_print_dict = {
194 'train_largest': 'largest categories (by train)',
195 'test_largest': 'largest categories (by test)',
196 'largest_difference': 'largest difference between categories'
197 }
198 train_data_percents = dist_traces[0].y.sum()
199 test_data_percents = dist_traces[1].y.sum()
200
201 fig.add_annotation(
202 x=0, y=-0.2, showarrow=False, xref='paper', yref='paper', xanchor='left',
203 text=f'* Showing the top {max_num_categories} {param_to_print_dict[show_categories_by]} out of '
204 f'total {len(all_categories)} categories.'
205 f'<br>Shown data is {format_percent(train_data_percents)} of train data and '
206 f'{format_percent(test_data_percents)} of test data.'
207 )
208
209 else:
210 fig = go.Figure()
211 fig.add_traces(dist_traces)
212
213 layout = go.Layout(
214 title=title,
215 xaxis=xaxis_layout,
216 yaxis=yaxis_layout,
217 legend=dict(
218 title='Dataset',
219 yanchor='top',
220 y=0.9,
221 xanchor='left'),
222 width=700,
223 height=300
224 )
225
226 fig.update_layout(layout)
227
228 return fig
229
[end of deepchecks/core/check_utils/whole_dataset_drift_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py
+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
@@ -132,10 +132,11 @@
return Pipeline(
steps=[('preprocessing', preprocessor),
- ('model', HistGradientBoostingClassifier(
- max_depth=2, max_iter=10, random_state=random_state,
- categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)
- ))])
+ ('model', run_available_kwargs(HistGradientBoostingClassifier,
+ max_depth=2, max_iter=10, random_state=random_state,
+ categorical_features=[False] * len(numerical_columns)
+ + [True] * len(categorical_columns)
+ ))])
def auc_to_drift_score(auc: float) -> float:
|
{"golden_diff": "diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n@@ -132,10 +132,11 @@\n \n return Pipeline(\n steps=[('preprocessing', preprocessor),\n- ('model', HistGradientBoostingClassifier(\n- max_depth=2, max_iter=10, random_state=random_state,\n- categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)\n- ))])\n+ ('model', run_available_kwargs(HistGradientBoostingClassifier,\n+ max_depth=2, max_iter=10, random_state=random_state,\n+ categorical_features=[False] * len(numerical_columns)\n+ + [True] * len(categorical_columns)\n+ ))])\n \n \n def auc_to_drift_score(auc: float) -> float:\n", "issue": "[FEAT] Support sklearn 0.23.2\nCurrently we only support sklearn versions >= 1.0. We need to try to support the 0.23.2 as it is widely used by many environments and frameworks.\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing common WholeDatasetDriftCheck (domain classifier drift) utils.\"\"\"\n\nfrom typing import List\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom plotly.subplots import make_subplots\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import\n\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.model_selection import train_test_split\nimport plotly.graph_objects as go\n\nfrom deepchecks.tabular import Dataset\nfrom deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\nfrom deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder\nfrom deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\nfrom deepchecks.utils.function import run_available_kwargs\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\ndef run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,\n numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,\n random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,\n max_num_categories_for_display: int, show_categories_by: str,\n min_meaningful_drift_score: float):\n \"\"\"Calculate whole dataset drift.\"\"\"\n domain_classifier = generate_model(numerical_features, cat_features, random_state)\n\n train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)\n test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)\n\n # create new dataset, with label denoting whether sample belongs to test dataset\n domain_class_df = pd.concat([train_sample_df, test_sample_df])\n domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))\n\n x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,\n stratify=domain_class_labels,\n random_state=random_state,\n test_size=test_size)\n\n domain_classifier = domain_classifier.fit(x_train, y_train)\n\n y_test.name = 'belongs_to_test'\n domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),\n cat_features=cat_features, label='belongs_to_test')\n\n # calculate feature importance of domain_classifier, containing the information which features separate\n # the dataset best.\n fi, importance_type = calculate_feature_importance_or_none(\n domain_classifier,\n domain_test_dataset,\n force_permutation=True,\n permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}\n )\n\n fi = fi.sort_values(ascending=False) if fi is not None else None\n\n domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])\n drift_score = auc_to_drift_score(domain_classifier_auc)\n\n values_dict = {\n 'domain_classifier_auc': domain_classifier_auc,\n 'domain_classifier_drift_score': drift_score,\n 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},\n }\n\n feature_importance_note = f\"\"\"\n <span>\n The percents of explained dataset difference are the importance values for the feature calculated\n using `{importance_type}`.\n </span><br><br>\n \"\"\"\n\n if fi is not None and drift_score > min_meaningful_drift_score:\n top_fi = fi.head(n_top_columns)\n top_fi = top_fi.loc[top_fi > min_feature_importance]\n else:\n top_fi = None\n\n if top_fi is not None and len(top_fi):\n score = values_dict['domain_classifier_drift_score']\n\n displays = [feature_importance_note, build_drift_plot(score),\n '<h3>Main features contributing to drift</h3>',\n N_TOP_MESSAGE % n_top_columns]\n displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,\n max_num_categories_for_display, show_categories_by)\n for feature in top_fi.index]\n else:\n displays = None\n\n return values_dict, displays\n\n\ndef generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n steps=[('rare', RareCategoryEncoder(254)),\n ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', 'passthrough', numerical_columns),\n ('cat', categorical_transformer, categorical_columns),\n ]\n )\n\n return Pipeline(\n steps=[('preprocessing', preprocessor),\n ('model', HistGradientBoostingClassifier(\n max_depth=2, max_iter=10, random_state=random_state,\n categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)\n ))])\n\n\ndef auc_to_drift_score(auc: float) -> float:\n \"\"\"Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.\n\n Parameters\n ----------\n auc : float\n auc of the Domain Classifier\n \"\"\"\n return max(2 * auc - 1, 0)\n\n\ndef build_drift_plot(score):\n \"\"\"Build traffic light drift plot.\"\"\"\n bar_traces, x_axis, y_axis = drift_score_bar_traces(score)\n x_axis['title'] = 'Drift score'\n drift_plot = go.Figure(layout=dict(\n title='Drift Score - Whole Dataset Total',\n xaxis=x_axis,\n yaxis=y_axis,\n width=700,\n height=200\n\n ))\n\n drift_plot.add_traces(bar_traces)\n return drift_plot\n\n\ndef display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,\n max_num_categories, show_categories_by):\n \"\"\"Display a distribution comparison plot for the given columns.\"\"\"\n column_name = train_column.name\n\n title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'\n dist_traces, xaxis_layout, yaxis_layout = \\\n feature_distribution_traces(train_column.dropna(),\n test_column.dropna(),\n column_name,\n is_categorical=column_name in cat_features,\n max_num_categories=max_num_categories,\n show_categories_by=show_categories_by)\n\n all_categories = list(set(train_column).union(set(test_column)))\n add_footnote = column_name in cat_features and len(all_categories) > max_num_categories\n\n if add_footnote:\n fig = make_subplots(rows=2, cols=1, vertical_spacing=0.6, shared_yaxes=False, shared_xaxes=False,\n row_heights=[0.8, 0.2],\n subplot_titles=[title])\n\n fig.add_traces(dist_traces)\n\n param_to_print_dict = {\n 'train_largest': 'largest categories (by train)',\n 'test_largest': 'largest categories (by test)',\n 'largest_difference': 'largest difference between categories'\n }\n train_data_percents = dist_traces[0].y.sum()\n test_data_percents = dist_traces[1].y.sum()\n\n fig.add_annotation(\n x=0, y=-0.2, showarrow=False, xref='paper', yref='paper', xanchor='left',\n text=f'* Showing the top {max_num_categories} {param_to_print_dict[show_categories_by]} out of '\n f'total {len(all_categories)} categories.'\n f'<br>Shown data is {format_percent(train_data_percents)} of train data and '\n f'{format_percent(test_data_percents)} of test data.'\n )\n\n else:\n fig = go.Figure()\n fig.add_traces(dist_traces)\n\n layout = go.Layout(\n title=title,\n xaxis=xaxis_layout,\n yaxis=yaxis_layout,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.9,\n xanchor='left'),\n width=700,\n height=300\n )\n\n fig.update_layout(layout)\n\n return fig\n", "path": "deepchecks/core/check_utils/whole_dataset_drift_utils.py"}]}
| 3,174 | 230 |
gh_patches_debug_5576
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-1989
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Upate.to_dict includes Update._effective_message
### Steps to reproduce
1. Send a message to the bot.
2.
```python
import json
from telegram import Bot
update = Bot('TOKEN').get_updates()[-1]
update.effective_chat
update.effective_user
update.effective_message
print(json.dumps(update.to_dict(), indent=2))
```
### Expected behaviour
```
{
"update_id": 601206924,
"message": {
"message_id": 3030,
"date": 1591742976,
"chat": {
"id": 447493381,
"type": "private",
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439"
},
"text": "1",
"entities": [],
"caption_entities": [],
"photo": [],
"new_chat_members": [],
"new_chat_photo": [],
"delete_chat_photo": false,
"group_chat_created": false,
"supergroup_chat_created": false,
"channel_chat_created": false,
"from": {
"id": 447493381,
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439",
"is_bot": false,
"language_code": "en"
}
}
}
```
### Actual behaviour
```
{
"update_id": 601206924,
"message": {
"message_id": 3030,
"date": 1591742976,
"chat": {
"id": 447493381,
"type": "private",
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439"
},
"text": "1",
"entities": [],
"caption_entities": [],
"photo": [],
"new_chat_members": [],
"new_chat_photo": [],
"delete_chat_photo": false,
"group_chat_created": false,
"supergroup_chat_created": false,
"channel_chat_created": false,
"from": {
"id": 447493381,
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439",
"is_bot": false,
"language_code": "en"
}
},
"_effective_user": {
"id": 447493381,
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439",
"is_bot": false,
"language_code": "en"
},
"_effective_chat": {
"id": 447493381,
"type": "private",
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439"
},
"_effective_message": {
"message_id": 3030,
"date": 1591742976,
"chat": {
"id": 447493381,
"type": "private",
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439"
},
"text": "1",
"entities": [],
"caption_entities": [],
"photo": [],
"new_chat_members": [],
"new_chat_photo": [],
"delete_chat_photo": false,
"group_chat_created": false,
"supergroup_chat_created": false,
"channel_chat_created": false,
"from": {
"id": 447493381,
"first_name": "\u041d\u0438\u043a\u043e\u043b\u0430\u0439",
"is_bot": false,
"language_code": "en"
}
}
}
```
This also applies to effective_* attributes of other classes, e.g. `Message.effective_attachment`
### Configuration
**Operating System:** windows 10
**Version of Python, python-telegram-bot & dependencies:**
```
python-telegram-bot 12.7
certifi 2020.04.05.1
future 0.18.2
Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]
```
### Proposed solution
Make `TelegramObject.to_dict` ignore all attributes starting with an underscore rather than a fixed list.
https://github.com/python-telegram-bot/python-telegram-bot/blob/a42b68933c3628b9b98c83927661eddb61d1571a/telegram/base.py#L56-L67
</issue>
<code>
[start of telegram/base.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2020
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """Base class for Telegram Objects."""
20
21 try:
22 import ujson as json
23 except ImportError:
24 import json
25
26
27 class TelegramObject(object):
28 """Base class for most telegram objects."""
29
30 _id_attrs = ()
31
32 def __str__(self):
33 return str(self.to_dict())
34
35 def __getitem__(self, item):
36 return self.__dict__[item]
37
38 @classmethod
39 def de_json(cls, data, bot):
40 if not data:
41 return None
42
43 data = data.copy()
44
45 return data
46
47 def to_json(self):
48 """
49 Returns:
50 :obj:`str`
51
52 """
53
54 return json.dumps(self.to_dict())
55
56 def to_dict(self):
57 data = dict()
58
59 for key in iter(self.__dict__):
60 if key in ('bot',
61 '_id_attrs',
62 '_credentials',
63 '_decrypted_credentials',
64 '_decrypted_data',
65 '_decrypted_secret'):
66 continue
67
68 value = self.__dict__[key]
69 if value is not None:
70 if hasattr(value, 'to_dict'):
71 data[key] = value.to_dict()
72 else:
73 data[key] = value
74
75 if data.get('from_user'):
76 data['from'] = data.pop('from_user', None)
77 return data
78
79 def __eq__(self, other):
80 if isinstance(other, self.__class__):
81 return self._id_attrs == other._id_attrs
82 return super(TelegramObject, self).__eq__(other) # pylint: disable=no-member
83
84 def __hash__(self):
85 if self._id_attrs:
86 return hash((self.__class__, self._id_attrs)) # pylint: disable=no-member
87 return super(TelegramObject, self).__hash__()
88
[end of telegram/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/telegram/base.py b/telegram/base.py
--- a/telegram/base.py
+++ b/telegram/base.py
@@ -57,12 +57,7 @@
data = dict()
for key in iter(self.__dict__):
- if key in ('bot',
- '_id_attrs',
- '_credentials',
- '_decrypted_credentials',
- '_decrypted_data',
- '_decrypted_secret'):
+ if key == 'bot' or key.startswith('_'):
continue
value = self.__dict__[key]
|
{"golden_diff": "diff --git a/telegram/base.py b/telegram/base.py\n--- a/telegram/base.py\n+++ b/telegram/base.py\n@@ -57,12 +57,7 @@\n data = dict()\n \n for key in iter(self.__dict__):\n- if key in ('bot',\n- '_id_attrs',\n- '_credentials',\n- '_decrypted_credentials',\n- '_decrypted_data',\n- '_decrypted_secret'):\n+ if key == 'bot' or key.startswith('_'):\n continue\n \n value = self.__dict__[key]\n", "issue": "[BUG] Upate.to_dict includes Update._effective_message\n### Steps to reproduce\r\n1. Send a message to the bot.\r\n\r\n2.\r\n```python\r\nimport json\r\nfrom telegram import Bot\r\n\r\nupdate = Bot('TOKEN').get_updates()[-1]\r\nupdate.effective_chat\r\nupdate.effective_user\r\nupdate.effective_message\r\nprint(json.dumps(update.to_dict(), indent=2))\r\n```\r\n### Expected behaviour\r\n```\r\n{\r\n \"update_id\": 601206924,\r\n \"message\": {\r\n \"message_id\": 3030,\r\n \"date\": 1591742976,\r\n \"chat\": {\r\n \"id\": 447493381,\r\n \"type\": \"private\",\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\"\r\n },\r\n \"text\": \"1\",\r\n \"entities\": [],\r\n \"caption_entities\": [],\r\n \"photo\": [],\r\n \"new_chat_members\": [],\r\n \"new_chat_photo\": [],\r\n \"delete_chat_photo\": false,\r\n \"group_chat_created\": false,\r\n \"supergroup_chat_created\": false,\r\n \"channel_chat_created\": false,\r\n \"from\": {\r\n \"id\": 447493381,\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\",\r\n \"is_bot\": false,\r\n \"language_code\": \"en\"\r\n }\r\n }\r\n}\r\n```\r\n\r\n### Actual behaviour\r\n```\r\n{\r\n \"update_id\": 601206924,\r\n \"message\": {\r\n \"message_id\": 3030,\r\n \"date\": 1591742976,\r\n \"chat\": {\r\n \"id\": 447493381,\r\n \"type\": \"private\",\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\"\r\n },\r\n \"text\": \"1\",\r\n \"entities\": [],\r\n \"caption_entities\": [],\r\n \"photo\": [],\r\n \"new_chat_members\": [],\r\n \"new_chat_photo\": [],\r\n \"delete_chat_photo\": false,\r\n \"group_chat_created\": false,\r\n \"supergroup_chat_created\": false,\r\n \"channel_chat_created\": false,\r\n \"from\": {\r\n \"id\": 447493381,\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\",\r\n \"is_bot\": false,\r\n \"language_code\": \"en\"\r\n }\r\n },\r\n \"_effective_user\": {\r\n \"id\": 447493381,\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\",\r\n \"is_bot\": false,\r\n \"language_code\": \"en\"\r\n },\r\n \"_effective_chat\": {\r\n \"id\": 447493381,\r\n \"type\": \"private\",\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\"\r\n },\r\n \"_effective_message\": {\r\n \"message_id\": 3030,\r\n \"date\": 1591742976,\r\n \"chat\": {\r\n \"id\": 447493381,\r\n \"type\": \"private\",\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\"\r\n },\r\n \"text\": \"1\",\r\n \"entities\": [],\r\n \"caption_entities\": [],\r\n \"photo\": [],\r\n \"new_chat_members\": [],\r\n \"new_chat_photo\": [],\r\n \"delete_chat_photo\": false,\r\n \"group_chat_created\": false,\r\n \"supergroup_chat_created\": false,\r\n \"channel_chat_created\": false,\r\n \"from\": {\r\n \"id\": 447493381,\r\n \"first_name\": \"\\u041d\\u0438\\u043a\\u043e\\u043b\\u0430\\u0439\",\r\n \"is_bot\": false,\r\n \"language_code\": \"en\"\r\n }\r\n }\r\n}\r\n```\r\n\r\nThis also applies to effective_* attributes of other classes, e.g. `Message.effective_attachment`\r\n\r\n### Configuration\r\n**Operating System:** windows 10\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n```\r\npython-telegram-bot 12.7\r\ncertifi 2020.04.05.1\r\nfuture 0.18.2\r\nPython 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:37:02) [MSC v.1924 64 bit (AMD64)]\r\n```\r\n\r\n### Proposed solution\r\nMake `TelegramObject.to_dict` ignore all attributes starting with an underscore rather than a fixed list.\r\nhttps://github.com/python-telegram-bot/python-telegram-bot/blob/a42b68933c3628b9b98c83927661eddb61d1571a/telegram/base.py#L56-L67\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"Base class for Telegram Objects.\"\"\"\n\ntry:\n import ujson as json\nexcept ImportError:\n import json\n\n\nclass TelegramObject(object):\n \"\"\"Base class for most telegram objects.\"\"\"\n\n _id_attrs = ()\n\n def __str__(self):\n return str(self.to_dict())\n\n def __getitem__(self, item):\n return self.__dict__[item]\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n data = data.copy()\n\n return data\n\n def to_json(self):\n \"\"\"\n Returns:\n :obj:`str`\n\n \"\"\"\n\n return json.dumps(self.to_dict())\n\n def to_dict(self):\n data = dict()\n\n for key in iter(self.__dict__):\n if key in ('bot',\n '_id_attrs',\n '_credentials',\n '_decrypted_credentials',\n '_decrypted_data',\n '_decrypted_secret'):\n continue\n\n value = self.__dict__[key]\n if value is not None:\n if hasattr(value, 'to_dict'):\n data[key] = value.to_dict()\n else:\n data[key] = value\n\n if data.get('from_user'):\n data['from'] = data.pop('from_user', None)\n return data\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self._id_attrs == other._id_attrs\n return super(TelegramObject, self).__eq__(other) # pylint: disable=no-member\n\n def __hash__(self):\n if self._id_attrs:\n return hash((self.__class__, self._id_attrs)) # pylint: disable=no-member\n return super(TelegramObject, self).__hash__()\n", "path": "telegram/base.py"}]}
| 2,558 | 122 |
gh_patches_debug_20086
|
rasdani/github-patches
|
git_diff
|
python-telegram-bot__python-telegram-bot-3911
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add rich equality comparison to `WriteAccessAllowed`
The comparison should be based on the `web_app_name` attribute only.
See https://github.com/python-telegram-bot/python-telegram-bot/pull/3898#discussion_r1337582872
</issue>
<code>
[start of telegram/_writeaccessallowed.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2023
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains objects related to the write access allowed service message."""
20 from typing import Optional
21
22 from telegram._telegramobject import TelegramObject
23 from telegram._utils.types import JSONDict
24
25
26 class WriteAccessAllowed(TelegramObject):
27 """
28 This object represents a service message about a user allowing a bot to write messages after
29 adding the bot to the attachment menu or launching a Web App from a link.
30
31 .. versionadded:: 20.0
32
33 Args:
34 web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.
35
36 .. versionadded:: 20.3
37
38 Attributes:
39 web_app_name (:obj:`str`): Optional. Name of the Web App which was launched from a link.
40
41 .. versionadded:: 20.3
42
43 """
44
45 __slots__ = ("web_app_name",)
46
47 def __init__(
48 self, web_app_name: Optional[str] = None, *, api_kwargs: Optional[JSONDict] = None
49 ):
50 super().__init__(api_kwargs=api_kwargs)
51 self.web_app_name: Optional[str] = web_app_name
52
53 self._freeze()
54
[end of telegram/_writeaccessallowed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/telegram/_writeaccessallowed.py b/telegram/_writeaccessallowed.py
--- a/telegram/_writeaccessallowed.py
+++ b/telegram/_writeaccessallowed.py
@@ -28,7 +28,12 @@
This object represents a service message about a user allowing a bot to write messages after
adding the bot to the attachment menu or launching a Web App from a link.
+ Objects of this class are comparable in terms of equality. Two objects of this class are
+ considered equal, if their :attr:`web_app_name` is equal.
+
.. versionadded:: 20.0
+ .. versionchanged:: NEXT.VERSION
+ Added custom equality comparison for objects of this class.
Args:
web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.
@@ -50,4 +55,6 @@
super().__init__(api_kwargs=api_kwargs)
self.web_app_name: Optional[str] = web_app_name
+ self._id_attrs = (self.web_app_name,)
+
self._freeze()
|
{"golden_diff": "diff --git a/telegram/_writeaccessallowed.py b/telegram/_writeaccessallowed.py\n--- a/telegram/_writeaccessallowed.py\n+++ b/telegram/_writeaccessallowed.py\n@@ -28,7 +28,12 @@\n This object represents a service message about a user allowing a bot to write messages after\n adding the bot to the attachment menu or launching a Web App from a link.\n \n+ Objects of this class are comparable in terms of equality. Two objects of this class are\n+ considered equal, if their :attr:`web_app_name` is equal.\n+\n .. versionadded:: 20.0\n+ .. versionchanged:: NEXT.VERSION\n+ Added custom equality comparison for objects of this class.\n \n Args:\n web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.\n@@ -50,4 +55,6 @@\n super().__init__(api_kwargs=api_kwargs)\n self.web_app_name: Optional[str] = web_app_name\n \n+ self._id_attrs = (self.web_app_name,)\n+\n self._freeze()\n", "issue": "Add rich equality comparison to `WriteAccessAllowed`\nThe comparison should be based on the `web_app_name` attribute only.\r\n\r\nSee https://github.com/python-telegram-bot/python-telegram-bot/pull/3898#discussion_r1337582872\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains objects related to the write access allowed service message.\"\"\"\nfrom typing import Optional\n\nfrom telegram._telegramobject import TelegramObject\nfrom telegram._utils.types import JSONDict\n\n\nclass WriteAccessAllowed(TelegramObject):\n \"\"\"\n This object represents a service message about a user allowing a bot to write messages after\n adding the bot to the attachment menu or launching a Web App from a link.\n\n .. versionadded:: 20.0\n\n Args:\n web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.\n\n .. versionadded:: 20.3\n\n Attributes:\n web_app_name (:obj:`str`): Optional. Name of the Web App which was launched from a link.\n\n .. versionadded:: 20.3\n\n \"\"\"\n\n __slots__ = (\"web_app_name\",)\n\n def __init__(\n self, web_app_name: Optional[str] = None, *, api_kwargs: Optional[JSONDict] = None\n ):\n super().__init__(api_kwargs=api_kwargs)\n self.web_app_name: Optional[str] = web_app_name\n\n self._freeze()\n", "path": "telegram/_writeaccessallowed.py"}]}
| 1,148 | 242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.